metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JHFarrant/cheekybanjos",
"score": 3
} |
#### File: cheekybanjos/cheekybanjos/user.py
```python
import logging
from .core import BanjosAPIObject
from .exceptions import CheekyException
logger = logging.getLogger(__name__)
class AddressBook(BanjosAPIObject):
default = None
addresses = None
def __init__(self, _client, vals):
super(AddressBook, self).__init__(_client, vals)
self.addresses = [Address(self._client, address) for address in vals.get("addresses", [])]
@property
def default_addresses(self):
if self.default is None:
raise CheekyException("No default address is set. Go online to set a address.")
default_addresses = [address for address in self.addresses if address.id == self.default]
assert len(default_addresses) == 1
return default_addresses[0]
class Address(BanjosAPIObject):
id = None
line1 = None
line2 = None
city = None
postCode = None
county = None
location = None
class Wallet(BanjosAPIObject):
default = None
wallet = None
def __init__(self, _client, vals):
super(Wallet, self).__init__(_client, vals)
self.wallet = [BankCard(self._client, card) for card in vals.get("wallet", [])]
@property
def default_card(self):
if self.default is None:
raise CheekyException("No default card set. Go online to set a default card.")
default_card = [card for card in self.wallet if card.id == self.default]
assert len(default_card) == 1
return default_card[0]
class BankCard(BanjosAPIObject):
id = None
expiryMonth = None
expiryYear = None
last4Digits = None
cardType = None
nickname = None
temporary = None
``` |
{
"source": "jhfatehi/brewpc",
"score": 2
} |
#### File: input2/source/dbwrite.py
```python
import sqlite3
def test(db_path, brew_num, batch_num, data1, data2, data3):
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('''UPDATE test
SET data1 = ?, data2 = ?, data3 = ?
WHERE brew_num = ? AND batch = ?''',
(data1, data2, data3, brew_num, batch_num))
conn.commit()
conn.close()
def add_brew(db_path, batches, brew_num, brew_size, brand):
n = 'none'
brew_data = []
mash_data = []
for ii in range(int(batches)):
brew_data.append((brew_num, str(ii+1), brew_size, brand, n,n,n)) #for test sheet. remmove latter
mash_data.append((brew_num, str(ii+1), brew_size, brand, n,n,n,n,n,n,n,n,n,n,n,n,n,n,n))
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.executemany('insert into test values (?,?,?,?,?,?,?)', brew_data) #for test sheet. remmove latter
c.executemany('insert into mash values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', mash_data)
conn.commit()
conn.close()
def mash(db_path,
brew_num,
batch_num,
dGRStemp,
dSTKtemp,
dMSHvol,
dMSHtemp,
dMSHtime,
dBREWsig,
dRNCvol,
dVLFtime,
dMASHph,
d1RNvol,
dSPGvol,
dROFtime,
dRACKcnt,
dFILLtime,
dFILLvol):
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('''UPDATE mash
SET dGRStemp = ?,
dSTKtemp = ?,
dMSHvol = ?,
dMSHtemp = ?,
dMSHtime = ?,
dBREWsig = ?,
dRNCvol = ?,
dVLFtime = ?,
dMASHph = ?,
d1RNvol = ?,
dSPGvol = ?,
dROFtime = ?,
dRACKcnt = ?,
dFILLtime = ?,
dFILLvol = ?
WHERE brew_num = ? AND batch = ?''',
(dGRStemp,
dSTKtemp,
dMSHvol,
dMSHtemp,
dMSHtime,
dBREWsig,
dRNCvol,
dVLFtime,
dMASHph,
d1RNvol,
dSPGvol,
dROFtime,
dRACKcnt,
dFILLtime,
dFILLvol,
brew_num,
batch_num))
conn.commit()
conn.close()
```
#### File: input4/source/inval.py
```python
import mysql.connector
def check_brew_batch(db_path, brew_num, batch):
query = '''SELECT count(1)
from mash
where brew_num = %s and batch_num = %s'''
conn = mysql.connector.connect(
user=db_path.get('mysql', 'usr'),
password=db_path.get('mysql', 'pw'),
host='127.0.0.1',
database=db_path.get('mysql', 'db'),
port=db_path.get('mysql', 'local_bind_port'))
cur = conn.cursor()
cur.execute(query, (brew_num, batch))
rows = cur.fetchall()
return rows[0][0]
def check_brew(db_path, brew_num):
query = '''SELECT count(1)
from brews
where brew_num = %s'''
conn = mysql.connector.connect(
user=db_path.get('mysql', 'usr'),
password=db_path.get('mysql', 'pw'),
host='127.0.0.1',
database=db_path.get('mysql', 'db'),
port=db_path.get('mysql', 'local_bind_port'))
cur = conn.cursor()
cur.execute(query, (brew_num,))
rows = cur.fetchall()
return rows[0][0]
def check_int(x):
try:
int(x)
except:
return 0
return 1
def check_brand(db_path, brand):
query = '''SELECT count(1)
from process
where brand = %s'''
conn = mysql.connector.connect(
user=db_path.get('mysql', 'usr'),
password=db_path.get('mysql', 'pw'),
host='127.0.0.1',
database=db_path.get('mysql', 'db'),
port=db_path.get('mysql', 'local_bind_port'))
cur = conn.cursor()
cur.execute(query, (brand,))
rows = cur.fetchall()
return rows[0][0]
``` |
{
"source": "jhflorey/djangoSalesforceMaster",
"score": 3
} |
#### File: salesforce/backend/test_helpers.py
```python
from unittest import skip, skipUnless, expectedFailure # NOQA
import sys
import uuid
import django
from django.conf import settings
from salesforce import router
# uid strings for tests that accidentally run concurrent
uid_random = '-' + str(uuid.uuid4())[:7]
# this is the same as the name of tox test environment, e.g. 'py35-dj110'
uid_version = 'py{0}{1}-dj{2}{3}'.format(*(sys.version_info[:2] + django.VERSION[:2]))
sf_alias = getattr(settings, 'SALESFORCE_DB_ALIAS', 'salesforce')
default_is_sf = router.is_sf_database(sf_alias)
current_user = settings.DATABASES[sf_alias]['USER']
def expectedFailureIf(condition):
"""Conditional 'expectedFailure' decorator for TestCase"""
if condition:
return expectedFailure
else:
return lambda func: func
```
#### File: testrunner/example/views.py
```python
import logging
from django.conf import settings
from django import template, shortcuts, http
from salesforce.testrunner.example import models, forms
log = logging.getLogger(__name__)
def list_accounts(request):
accounts = models.Account.objects.all()[0:5]
return shortcuts.render_to_response('list-accounts.html', dict(
title = "List First 5 Accounts",
accounts = accounts,
))
def search_accounts(request):
accounts = []
if(request.method == 'POST'):
form = forms.SearchForm(request.POST)
if(form.is_valid()):
accounts = models.Account.objects.filter(Name__icontains=form.cleaned_data['query'])
else:
form = forms.SearchForm()
return shortcuts.render_to_response('search-accounts.html', dict(
title = "Search Accounts by Email",
accounts = accounts,
form = form,
))
``` |
{
"source": "jhfong/NeMo",
"score": 2
} |
#### File: text_normalization/taggers/roman.py
```python
from nemo_text_processing.text_normalization.data_loader_utils import load_labels
from nemo_text_processing.text_normalization.graph_utils import GraphFst, get_abs_path, insert_space
from nemo_text_processing.text_normalization.taggers.cardinal import CardinalFst
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class RomanFst(GraphFst):
"""
Finite state transducer for classifying electronic: as URLs, email addresses, etc.
e.g. <EMAIL> -> tokens { electronic { username: "cdf1" domain: "abc.edu" } }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="roman", kind="classify", deterministic=deterministic)
def _load_roman(file: str):
roman = load_labels(get_abs_path(file))
roman_numerals = [(x, y) for x, y in roman] + [(x.upper(), y) for x, y in roman]
return pynini.string_map(roman_numerals)
cardinal_graph = CardinalFst(deterministic=True).graph
digit_teen = _load_roman("data/roman/digit_teen.tsv") @ cardinal_graph
ties = _load_roman("data/roman/ties.tsv") @ cardinal_graph
hundreds = _load_roman("data/roman/hundreds.tsv") @ cardinal_graph
graph = (
(ties | digit_teen | hundreds)
| (ties + insert_space + digit_teen)
| (hundreds + pynini.closure(insert_space + ties, 0, 1) + pynini.closure(insert_space + digit_teen, 0, 1))
).optimize()
graph = pynutil.insert("integer: \"") + graph + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
``` |
{
"source": "jhfoxliu/iMVP",
"score": 4
} |
#### File: iMVP_utils/iMVP_utils/embedding.py
```python
def onehot_encoder_df(df, column="seq", enc_bases="ATCGN"):
"""This function is used for generate One-Hot encoding sequences from a DataFrame.
Parameters
----------
df: pd.DataFrame
A DataFrame.
column: str or tuple
The column containing the sequences
enc_bases: str
The encoding bases. Default="ATCGN".
Returns
-------
sequences_onehot: list
A list of one-hot encoded sequences.
"""
from sklearn.preprocessing import OneHotEncoder
import numpy as np
enc = OneHotEncoder(dtype=np.int8)
enc.fit([[i] for i in enc_bases])
sequences_onehot = []
for idx, row in df.iterrows():
seq = [[i] for i in str(row[column]).upper()]
sequences_onehot.append(enc.transform(seq).toarray().reshape(-1))
return sequences_onehot
def onehot_encoder_iterable(iter_obj, enc_bases="ATCGN"):
"""This function is used for generate One-Hot encoding sequences from a iterable object.
Parameters
----------
iter_obj: iterable
An iterable object containing the sequences.
enc_bases: str
The encoding bases. Default="ATCGN".
Returns
-------
sequences_onehot: list
A list of one-hot encoded sequences.
"""
from sklearn.preprocessing import OneHotEncoder
import numpy as np
enc = OneHotEncoder(dtype=np.int8)
enc.fit([[i] for i in enc_bases])
sequences_onehot = []
for item in iter_obj:
seq = [[i] for i in item.upper()]
sequences_onehot.append(enc.transform(seq).toarray().reshape(-1))
return sequences_onehot
def run_UMAP(onehot_input, df=None, init="random", random_state=42, min_dist=0.01, n_neighbors=20, densmap=False, verbose=True, n_jobs=6):
"""An implement of UMAP (CPU version).
Parameters
----------
onehot_input: iterable.
A list of one-hot encoded sequences.
df: pd.DataFrame
A DataFrame to process. If given, it will return a DataFrame with X and Y columns. If not, it will return X and Y, separatively.
init: str.
init value for UMAP.
random_state: int
random seed.
min_dist: float
min_dist for UMAP
n_neighbors: int
n_neighbors for UMAP
densmap: boolean
If use DensMAP.
verbose: boolean
verbose level
Returns
-------
A DataFrame or [X and Y]
"""
import umap
model = umap.UMAP(init=init, random_state=random_state, n_components=2, min_dist=min_dist, n_neighbors=n_neighbors, verbose=verbose, densmap=densmap, n_jobs=n_jobs)
umap_output = model.fit_transform(onehot_input)
if df is not None:
df = df.copy()
df["X"] = umap_output[:, 0]
df["Y"] = umap_output[:, 1]
del model
return df
else:
del model
return umap_output[:, 0], umap_output[:, 1]
def run_UMAP_GPU(onehot_input, df=None, init="random", random_state=42, min_dist=0.01, n_neighbors=20, densmap=False, verbose=True):
"""An implement of UMAP (GPU version).
Parameters
----------
onehot_input: iterable.
A list of one-hot encoded sequences.
df: pd.DataFrame
A DataFrame to process. If given, it will return a DataFrame with X and Y columns. If not, it will return X and Y, separatively.
init: str.
init value for UMAP.
random_state: int
random seed.
min_dist: float
min_dist for UMAP
n_neighbors: int
n_neighbors for UMAP
densmap: boolean
If use DensMAP.
verbose: boolean
verbose level
Returns
-------
A DataFrame or [X and Y]
"""
import cuml
model = cuml.UMAP(init=init, random_state=random_state, n_components=2, min_dist=min_dist, n_neighbors=n_neighbors, verbose=verbose, densmap=densmap)
umap_output = model.fit_transform(onehot_input)
if df is not None:
df = df.copy()
df["X"] = umap_output[:, 0]
df["Y"] = umap_output[:, 1]
del model
return df
else:
del model
return umap_output[:, 0], umap_output[:, 1]
def get_sparse_matrix_from_indices_distances_umap(
knn_indices, knn_dists, n_obs, n_neighbors
):
"""A helper function for Louvain and Leiden. Adopted from Scanpy.
Parameters
----------
knn_indices: object
knn_dists: object
n_obs: int
n_neighbors: int
"""
import numpy as np
from scipy.sparse import issparse, coo_matrix, csr_matrix
rows = np.zeros((n_obs * n_neighbors), dtype=np.int64)
cols = np.zeros((n_obs * n_neighbors), dtype=np.int64)
vals = np.zeros((n_obs * n_neighbors), dtype=np.float64)
for i in range(knn_indices.shape[0]):
for j in range(n_neighbors):
if knn_indices[i, j] == -1:
continue # We didn't get the full knn for i
if knn_indices[i, j] == i:
val = 0.0
else:
val = knn_dists[i, j]
rows[i * n_neighbors + j] = i
cols[i * n_neighbors + j] = knn_indices[i, j]
vals[i * n_neighbors + j] = val
result = coo_matrix((vals, (rows, cols)), shape=(n_obs, n_obs))
result.eliminate_zeros()
return result.tocsr()
def compute_connectivities_umap(
knn_indices,
knn_dists,
n_obs,
n_neighbors,
set_op_mix_ratio=1.0,
local_connectivity=1.0,
):
"""A helper function for Louvain and Leiden. Adopted from Scanpy.
Parameters
----------
knn_indices: object
knn_dists: object
n_obs: int
n_neighbors: int
set_op_mix_ratio: float
local_connectivity: float
"""
from scipy.sparse import issparse, coo_matrix, csr_matrix
from umap.umap_ import fuzzy_simplicial_set
X = coo_matrix(([], ([], [])), shape=(n_obs, 1))
connectivities = fuzzy_simplicial_set(
X,
n_neighbors,
None,
None,
knn_indices=knn_indices,
knn_dists=knn_dists,
set_op_mix_ratio=set_op_mix_ratio,
local_connectivity=local_connectivity,
)
if isinstance(connectivities, tuple):
# In umap-learn 0.4, this returns (result, sigmas, rhos)
connectivities = connectivities[0]
distances = get_sparse_matrix_from_indices_distances_umap(
knn_indices, knn_dists, n_obs, n_neighbors
)
return distances, connectivities.tocsr()
def get_igraph_from_adjacency(adjacency, directed=None):
"""A helper function for Louvain and Leiden. Adopted from Scanpy.
Parameters
----------
adjacency: object
Generated by compute_connectivities_umap
Returns
-------
iGraph object
"""
import numpy as np
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except KeyError:
pass
return g
def get_igraph(onehot_input, random_state=42, metric="euclidean", n_neighbors=20, metric_kwds={}, n_jobs=6, angular=False, verbose=False):
"""Prepare iGraph object for Louvain and Leiden
Parameters
----------
onehot_input: np.array
The one-hot encoded sequences.
random_state: int
Random seed.
metric: str
Same as UMAP performed.
n_neighbors: int
Same as UMAP.
metric_kwds: dict
angular: boolean
verbose: boolean
Returns
-------
iGraph object
"""
from umap.umap_ import nearest_neighbors
n_obs = onehot_input.shape[0]
knn_indices, knn_dists, forest = nearest_neighbors(
onehot_input,
n_neighbors,
random_state=random_state,
metric=metric,
metric_kwds=metric_kwds,
angular=angular,
verbose=verbose,
n_jobs=n_jobs,
)
distances, connectivities = compute_connectivities_umap(knn_indices, knn_dists, n_obs, n_neighbors, set_op_mix_ratio=1.0, local_connectivity=1.0)
g = get_igraph_from_adjacency(connectivities)
return g
if __name__ == "__main__":
pass
```
#### File: iMVP_utils/iMVP_utils/interactive.py
```python
import os
import io
from pydoc import classname
from Bio import SeqIO
import time
import iMVP_utils
from iMVP_utils import interactive_functions
# import interactive_functions
import base64
import PIL.Image as Image
import pandas as pd
import numpy as np
import dash
from dash import dcc
from dash import html
from dash.dependencies import Input, Output,State
from dash import callback_context
from flask import Flask
import plotly.express as px
import plotly.graph_objects as go
def launch_backend(output_path="./output/"):
"""The function to launch the backend for interactive
Parameters
---------
output_path: str
The output directory of the files.
Returns
---------
A Dash App object.
"""
iMVP_finished = False
input_validated = False
first_time = True
assets_path = os.path.dirname(iMVP_utils.__file__) + "/assets/"
server= Flask(__name__)
app = dash.Dash(name="app1", server=server, assets_folder=assets_path)
"""Run this app with 'python app.py -p port_number and visit http://127.0.0.1:prot_number/ in your web browser.
(Press CTRL+C to quit)
"""
if os.path.exists (output_path) == False:
os.mkdir(output_path)
def run_iMVP(content, input_parameters):
"""Clustering upload data in fasta format with UMAP and HDBSCAN.
Parameters
---------
content: string
A comma separated string, including type and content of upload file.
input_parameters: dict
A list of reserved parameters for HDBSCAN.
---------
Returns
---------
A Div component: chilren
A html div of 'processing' information.
Html stlye: dict
A style of 'submit-data' button.
HDBSCAN_dict: dict
The results of HDBSCAN.
"""
nonlocal output_path
time_start = time.time()
_, content_string = content.split(',')
decoded = base64.b64decode(content_string)
style_submit_button = {'width': '40%'}
df = pd.read_csv(
io.StringIO(decoded.decode('utf-8').replace("\r\n", "\n").replace("\r", "\n").replace(">", "")),sep = "\n", header=None)
fa_data = pd.concat([df[::2].reset_index(drop=True),df[1::2].reset_index(drop=True)],axis=1)
fa_data.columns = ['sites','seq']
# run HDBSACN
df_HDBSCAN = interactive_functions.run_cluster(fa_data, output_path, input_parameters)
png = interactive_functions.draw_logo("{path}/init.fa".format(path=output_path), input_parameters)
image = Image.open(io.BytesIO(png))
img_file = '{path}/weblogo.png'.format(path=output_path)
image.save(img_file)
HDBSCAN_dict = df_HDBSCAN.to_dict()
time_end = time.time()
# used_time = round((time_end - time_start)/60,2)
used_time = time_end - time_start
if used_time >= 3600:
used_time = time.strftime("%H hr %M min %S sec", time.gmtime(used_time))
elif used_time >= 60:
used_time = time.strftime("%M min %S sec", time.gmtime(used_time))
else:
used_time = time.strftime("%S sec", time.gmtime(used_time))
# except Exception as e :
# return html.Div([
# 'There was an error processing this file.'
# ]), {'display':'none'}
df_groupby = df_HDBSCAN.groupby("Cluster")[["Cluster"]].count()
df_groupby.columns = ["Count"]
df_groupby["Cluster ID"] = df_groupby.index
df_groupby = df_groupby[["Cluster ID", "Count"]]
print(df_groupby)
# html.Div([
# dash.dash_table.DataTable(df_groupby.to_dict('records'), [{"name": i, "id": i} for i in df_groupby.columns])
# ], style={"width": "400px", "margin-left":"50px"}
# ),
return html.Div([html.H3("{} inputs were analyzed. Finished in {}!".format(df_HDBSCAN.shape[0], used_time)), html.H3('Done!'),]), \
style_submit_button , HDBSCAN_dict
def check_fasta_input(content):
basespace = {i:1 for i in "ATCGUNRDEQHILKMFPSWYV"}
_, content_string = content.split(',')
decoded = base64.b64decode(content_string)
N = 0
is_fasta = True
try:
for line in decoded.decode('utf-8').replace("\r\n", "\n").replace("\r", "\n").split("\n"):
try:
# print(line)
if not line:
continue
elif N % 2 == 0:
if line.startswith(">") == False:
is_fasta = False
break
elif N % 2 == 1:
for b in line:
if b.upper() not in basespace:
is_fasta = False
break
N += 1
except Exception as e:
is_fasta = False
break
except Exception as e:
is_fasta = False
return is_fasta
@app.callback(
Output('upload_data','children'),
Input('upload_data','filename'),
State('upload_data', 'contents')
)
def upload_info(filenames, contents):
nonlocal input_validated
input_validated = False
if filenames is None:
return html.Div([
'Drag and Drop or ',html.A('Click to upload your FASTA file here')
], style={"line-height": "300px", "width": "600px"}) # style={"padding":"50px"}
else:
for file, c in zip(filenames, contents):
fasta_status = check_fasta_input(c)
if fasta_status == True:
input_validated = True
return html.Div([html.B('{filename}'.format(filename = file),style={'color':'#ff4777'}),' has been uploaded'], style={"line-height": "300px", "width": "600px"})
else:
return html.Div([html.B('Warining: {filename} is not a validated FASTA file!'.format(filename = file),style={'color':'#ff4777'}),], style={"line-height": "300px", "width": "600px"})
@app.callback(
Output('hdbscan-parameter-list','data'),
Output('processing_1', 'children'),
Output('processing_3','children'),
Output('upload_data', 'style'),
Output('para-list-div', 'style'),
Output('upload-div', 'style'),
Input('submit-button-state', 'n_clicks'),
[
State('upload_data', 'contents'),
State('exp_len', 'value'),
State('n_neighbors', 'value'),
State('min_dist', 'value'),
State('random_state', 'value'),
State('umap_jobs', 'value'),
State('umap_init', 'value'),
State('densmap', 'value'),
State('min_cluster_size', 'value'),
State('min_samples', 'value'),
State('cluster_selection_method', 'value'),
State('cluster_selection_epsilon', 'value'),
State('hdbscan_jobs', 'value'),
State('softclustering', 'value'),
State('weblogo_unit', 'value'),
State('weblogo_first_index', 'value'),
State('weblogo_base_type', 'value'),
],
prevent_initial_call=True)
def infomation_hide_div(n_clicks,list_of_contents, exp_len, n_neighbors, min_dist, random_state, umap_jobs, umap_init, densmap ,min_cluster_size, min_samples, cluster_selection_method, cluster_selection_epsilon, hdbscan_jobs, softclustering, weblogo_unit, weblogo_first_index, weblogo_base_type):
"""
Parameters
---------
n_clicks: int
The number of clicks to trigger the clustering.
list_of_contents: list
Contents of upload data.
exp_len: int
The expected lengths for the sequences.
n_neighbors: int
n_neighbors for UMAP.
min_dist: int
min_dist for UMAP.
random_state: int
random_state for UMAP.
umap_jobs: int
n_jobs for UMAP.
umap_init: str
init for UMAP.
densmap: boolean
densmap for UMAP.
min_cluster_size: int
The parameter of HBDSACN from user.
min_samples: int
The parameter of HBDSACN from user.
cluster_selection_method: string
The parameter of HBDSACN from user.
core_dist_n_jobs: int
The parameter of HBDSACN from user.
softclustering: bool
The parameter of HBDSACN from user.
weblog_unit: str
The parameter for Weblogo.
weblog_first_index: str
The parameter for Weblogo.
---------
Return
---------
hdbscan-parameter-list: dict
The parameters of HDBSCAN from user.
processing_1: div object
The information about data processing.
processing_3: div object
The information about data processing.
upload_data: dict
A Div style of 'upload_data' to hide the div object.
para-list-div: dict
A Div style of 'para-list-div' to hide the div.
upload-div: dict
A Div style of 'upload-div' to hide the div.
"""
nonlocal input_validated
dict_parameters = {
"exp_len": exp_len,
"n_neighbors": n_neighbors,
"min_dist": min_dist,
"random_state": random_state,
"umap_jobs": umap_jobs,
"umap_init": umap_init,
"densmap": densmap,
"min_cluster_size": min_cluster_size,
"min_samples": min_samples,
"cluster_selection_method": cluster_selection_method,
"cluster_selection_epsilon": cluster_selection_epsilon,
"hdbscan_jobs": hdbscan_jobs,
"softclustering": softclustering,
"weblogo_unit": weblogo_unit,
"weblogo_first_index": weblogo_first_index,
"weblogo_base_type": weblogo_base_type,
}
if input_validated == True:
hide_div_style_1 = {'display': 'none'}
hide_div_style_2 = {'display': 'none'}
hide_div_style_3 = {'display': 'none'}
if list_of_contents is not None:
for c in list_of_contents:
return dict_parameters, [html.H3(time.asctime( time.localtime(time.time())))],html.H3('Processing ......',id='process'),hide_div_style_1,hide_div_style_2,hide_div_style_3
else:
return dash.no_update
@app.callback(
Output('processing_2', 'children'),
Output('submit-data', 'style'),
Output('cluster-data', 'data'),
Output('processing_3','style'),
Output('horizontal_line','style'),
Input('hdbscan-parameter-list','data'),
[State('upload_data', 'contents')],
prevent_initial_call=True)
def upload_file(parameter_list, list_of_contents):
"""
Parameters
---------
parameter_list: list
The parameters of HDBSCAN from user.
list_of_contents: list
A content of upload file.
---------
Returns
---------
processing_2: div object
The information about data processing.
style: dict
A style to hide 'submit-data' div object.
parameters_dict: dict
The results of clustering with HDBSCAN.
hide_info_style: dict
A style to hide 'processing_2' div object.
display_hr: dict
A style to display horizontal line.
"""
nonlocal iMVP_finished
hide_info_style = {'display':'none'}
display_hr = {'display':'inline-block'}
if list_of_contents is not None:
for c in list_of_contents:
iMVP_out = run_iMVP(c, parameter_list)
if len(iMVP_out) == 2:
processing_2, style = iMVP_out
parameters_dict = None
else:
processing_2, style, parameters_dict = iMVP_out
iMVP_finished = True
return processing_2, style, parameters_dict, hide_info_style, display_hr
@app.callback(
Output('cluster_figure', 'figure'),
Output('my-checklist','options'),
Output('type', 'data'),
Output('hidden_data','style'),
Output('submit-button','style'),
[Input('submit-data','n_clicks'),
State('cluster-data', 'data'),
Input("scatter-slider", "value"),
],prevent_initial_call=True
)
def cluster_figure(n_clicks, cluster_data, markersize):
"""
Parameters
---------
n_clicks: int
The number of clicks to trigger clustering.
cluster_data: dict
The results of clustering with HDBSCAN.
markersize:
The size of markers
---------
Returns
---------
cluster_figure: graph object in plotly
A graph to display the clusters of HBDSCAN.
my-checklist: list
The types of cluster that user choosed.
type: list
Types of clusters.
hidden_data: dict
A style to hide the div object.
submit-button: button object
A style to show the div of button object.
"""
dff = pd.DataFrame(cluster_data)
df = dff.sort_values(by="Cluster", ascending=True)
type = range(1,max(df['Cluster']) + 1)
df['Cluster'] = df['Cluster'].astype(str)
available_type = list(map(str, type))
df['customdata'] = df.index.values.tolist()
options = [{'label': '{:>3}'.format(i), 'value':i } for i in available_type]
fig = px.scatter(df, x="X", y="Y", color="Cluster", custom_data=["customdata"])
fig.update_traces(marker_size=markersize) # , selector=dict(mode='markers')
fig.update_layout(dragmode='lasso', hovermode=False, width=600, height=600)
return fig, options,available_type,{'display':'inline-block', 'min-width': "1200px"},{'display':'none'} #
@app.callback(
Output("my-checklist", "value"),
Output("all-or-none", "value"),
Output("select-data","data"),
[Input("type", 'data'),
Input("all-or-none", "value"),
Input("my-checklist", 'value')],
)
def select_all_none(option,all_selected, my_selected):
"""
Parameters
---------
option: list
Types of all clusters.
all_selected: list
When user choose all clusters.
my_selected: list
Types of clusters that user choosed.
---------
Returns
---------
my-checklist: list
Types of clusters that user choosed, which show as checklist object.
all-or-none: list
Types of all clusters, which show as checklist object.
select-data: list
Types of clusters that user choosed, which store as dcc object.
"""
ctx = callback_context
input_id = ctx.triggered[0]["prop_id"].split(".")[0]
if input_id == "my-checklist":
all_selected = ["Select All"] if set(my_selected) == set(option) else []
else:
my_selected = option if all_selected else []
if all_selected != []:
select_data = option
else:
select_data = my_selected
return my_selected, all_selected, select_data
@app.callback(
Output('weblogo','src'),
Input('cluster_figure', 'selectedData'),
Input('cluster-data', 'data'),
Input('select-data','data'),
Input('hdbscan-parameter-list','data'),
prevent_initial_call=True
)
def draw_weblogo(selected_data,cluster_data, clusters_select, parameter_list):
"""
Parameters
---------
selected_data: dict
Data selected with lasso or checklist
cluster_data: dict
The results of clustering with HDBSCAN.
clusters_select: list
Types of clusters selected from users
---------
Return
---------
weblogo: png
Weblogo picture in png format.
---------
"""
nonlocal output_path, iMVP_finished, first_time
if iMVP_finished == True and first_time == True:
# and os.path.isfile("{path}/selected_data.fa".format(path=output_path)) == False
img_file = '{path}/weblogo.png'.format(path=output_path)
encode_img = base64.b64encode(open(img_file,'rb').read())
iMVP_finished = True
first_time = False
return 'data:image/png;base64,{}'.format(encode_img.decode())
elif clusters_select == [] and selected_data is None:
return dash.no_update
else:
df = pd.DataFrame(cluster_data)
df['Cluster'] = df['Cluster'].astype(str)
fa_index = []
if selected_data is None:
custom = []
selected_data = {}
for i in df[df['Cluster'].isin(clusters_select) ].index.values.tolist():
custom.append({'customdata':[i]})
selected_data['points'] = custom
for points in selected_data['points']:
fa_index.extend(points.get('customdata'))
fasta_name = "{path}/selected_data.fa".format(path=output_path)
base_type = parameter_list["weblogo_base_type"]
with open(fasta_name, "w") as fasta_out:
for idx, row in df.loc[fa_index].iterrows():
if base_type == "DNA":
seq_out = str(row["seq"]).upper().replace("U", "T")
elif base_type == "RNA":
seq_out = str(row["seq"]).upper().replace("T", "U")
else:
seq_out = str(row["seq"])
fasta_out.write(">{}\n{}\n".format(idx, seq_out))
png = interactive_functions.draw_logo(fasta_name, parameter_list)
image = Image.open(io.BytesIO(png))
img_file = '{path}/weblogo.png'.format(path=output_path)
image.save(img_file)
encode_img = base64.b64encode(open(img_file,'rb').read())
return 'data:image/png;base64,{}'.format(encode_img.decode())
@app.callback(
Output("download-text", "data"),
Input("btn-download-txt", "n_clicks"),
prevent_initial_call=True)
def download_fasta(n_clicks):
"""
Parameters
---------
n_clicks: int
The number of clicks to trigger download file in fasta format.
---------
Return
---------
download-text: string
A fasta format file.
---------
"""
nonlocal output_path
with open('{path}/selected_data.fa'.format(path=output_path)) as f:
contents = f.read()
return dict(content=contents, filename="seleted_data.fa")
@app.callback(
Output("download-csv", "data"),
Input("btn-download-csv", "n_clicks"),
prevent_initial_call=True)
def download_csv(n_clicks):
"""
Parameters
---------
n_clicks: int
The number of clicks to trigger download CSV file.
---------
Return
---------
download-text: string
A fasta format file.
---------
"""
nonlocal output_path
with open('{path}/all_clusters.csv'.format(path=output_path)) as f:
contents = f.read()
return dict(content=contents, filename="all_clusters.csv")
@app.callback(
Output("download-png", "data"),
Input("btn-download-png", "n_clicks"),
prevent_initial_call=True
)
def download_weblogo(n_clicks):
nonlocal output_path
"""
Parameters
---------
n_clicks: int
The number of clicks to trigger download weblogo picture.
---------
Return
---------
download-png: png
A file in png format of weblogo.
---------
"""
return dash.dcc.send_file("{path}/weblogo.png".format(path=output_path))
app.layout = html.Div([
html.Div([
html.Div([
html.H1(
"iMVP Motif Viewer",
style = {'textAlign':'center'}), # , 'margin-left':'20%'
html.H3(
"Version: 0.2.3; Contributed by <NAME>, <NAME> @ Zhang Lab (SYSU).",
style = {'textAlign':'center'}), # , 'margin-left': '10%'
html.H3("Documents: https://readthedocs.org/iMVP/", style = {'textAlign':'center'}),
html.Div([
# html.Br(),
# html.Br(),
html.Div("Tips #1: To go back to the parameters page, please refresh the page."),
html.Div("Tips #2: Use Ctrl+C in command lines to terminate the backend."),
], style={"horizonal-align":"center", 'text-align':'center'}),
], style={"width":"1000px"}),
html.Div([
html.Div([
html.Div([
html.Div([
html.Div([
html.H4('I. Upload data'),
dcc.Upload(
id = 'upload_data',
multiple=True,
style={"line-height": "300px"} # , "min-width": "100%
),
], className = "upload",id = 'upload-div', style={'display':'inline-block', "width": "600px"}, # "line-height: 20%;"
),
html.Div([
html.Br(),
html.Br(),
html.Div("If you have confirmed your input and parameters, click the button to run."),
html.Br(),
html.Button(id='submit-button-state', n_clicks=0, children='Submit'),
],
className="input", style={"horizonal-align":"center", 'text-align':'center'},
),
], className="two columns", style={'display':'inline-block', "width": "600px", "vertical-align":"top", "margin-right":"5%", "margin-left":"2.5%"}
),
# html.Div([
# ], className="three columns", style={'display':'inline-block'}
# ),
html.Div([
html.Div([
html.H4('II. Quality control'),
html.Div([
"1. Expected lengths of the sequences =",
dcc.Input(id='exp_len', type='number', value='21', min='0'),
]),
], style={'display':'inline-block', "vertical-align":"top"}),
html.H4('III. UMAP parameters'),
html.Div([
"1. n_neighbors =",
dcc.Input(id='n_neighbors', type='number', value='20', min='0'),
]),
html.Div([
"2. min_dist =",
dcc.Input(id='min_dist', type='number', value='0.01', step='any', min='0', max='1'),
]),
html.Div([
"3. random_state =",
dcc.Input(id='random_state', type='number', value='42', min='-2147483648', max='2147483647'),
]),
html.Div([
"4. jobs =",
dcc.Input(id='umap_jobs', type='number', value='6'),
]),
html.Div([
html.Div(["5. init ="], className="two columns", style={"display": "inline-block"}), # style={"display": "inline-block"}
html.Div([dcc.RadioItems(['random', 'spectral'], 'random', id='umap_init')], className="two columns", style={"display": "inline-block"}),
], className="row"# style={"width":"30%"}
),
html.Div([
html.Div(["6. DensMAP ="], className="two columns", style={"display": "inline-block"}), # style={"display": "inline-block"}
html.Div([dcc.RadioItems(['True', 'False'], 'False', id='densmap')], className="two columns", style={"display": "inline-block"}),
], className="row"# style={"width":"30%"}
),
html.H4('IV. HBDSCAN parameters'),
html.Div([
"1. min_cluster_size =",
dcc.Input(id='min_cluster_size', type='number', value='100'),
]),
html.Div([
"2. min_samples =",
dcc.Input(id='min_samples', type='number', value='100'),
]),
html.Div([
"3. cluster_selection_epsilon =",
dcc.Input(id='cluster_selection_epsilon', type='number', step="any", value='0.0'),
]),
html.Div([
html.Div(["4. cluster_selection_method ="], className="two columns", style={"display": "inline-block"}), # style={"display": "inline-block"}
html.Div([dcc.RadioItems(['eom', 'leaf'], 'eom', id='cluster_selection_method')], className="two columns", style={"display": "inline-block"}),
], className="row"# style={"width":"30%"}
),
html.Div([
html.Div(["5. soft clustering ="], className="two columns", style={"display": "inline-block"}), # style={"display": "inline-block"}
html.Div([dcc.RadioItems(['True', 'False'], 'True', id='softclustering')], className="two columns", style={"display": "inline-block"}),
], className="row"# style={"width":"30%"}
),
html.Div([
"6. jobs =",
dcc.Input(id='hdbscan_jobs', type='number', value='6'),
]),
html.H4('V. Weblogo'),
html.Div([
html.Div(["1. Unit ="], className="two columns", style={"display": "inline-block"}), # style={"display": "inline-block"}
html.Div([dcc.RadioItems(['probability', 'bits'], 'probability', id='weblogo_unit')], className="two columns", style={"display": "inline-block"}),
], className="row"# style={"width":"30%"}
),
html.Div([
html.Div(["2. Base type (LOGO and FASTA output) ="], className="two columns", style={"display": "inline-block"}), # style={"display": "inline-block"}
html.Div([dcc.RadioItems(['As input', 'DNA', "RNA"], 'As input', id='weblogo_base_type')], className="two columns", style={"display": "block"}), # inline-
], className="row"# style={"width":"30%"}
),
html.Div(["3. First index =",
dcc.Input(id='weblogo_first_index', type='number', value='-10'),
]),
], className="two columns", style={'display':'inline-block'}
),
], style={'display':'inline-block', "margin-left":"0%", "width":"100%"}, # , "width":"40%"
),
], className="row", style={'display':'inline-block', "width":"100%"}
),
], id="para-list-div", style={"width":"auto"})
], className="section1",id = 'section1', style={"width":"1600px"}),
html.Hr(id = "horizontal_line",style={'display':'none'}),
html.Div([
html.Div([
html.Div([
html.Div(
dcc.Graph(id = 'cluster_figure'),
style={'display': 'inline-block'} # 'width': '40%',
),
html.Div(
html.H4("Marker size:"),
style={"margin-left": "10%"}
),
html.Div(
dcc.Slider(id='scatter-slider', value=10, min=1, max=30,),
style={"margin-left": "10%", "margin-right": "10%"}
),
], className="two columns", style={"min-width":"600px", "display": "inline-block", "max-width": "40%", "margin-right": "5%"} # , "margin-right": "%"
),
html.Div([
html.Div([
html.Div(
html.Img(id = "weblogo", style={'max-width':"600px",'display': 'inline-block'}),
style={'display': 'inline-block'} # , 'verticalAlign':'top'
),
# style={'width':"200px"}), # 'width': '50%','display': 'inline-block', 'position':'relative','bottom':'150px'
html.Div([
html.Div([
html.Div(html.Button("Download FASTA", id = "btn-download-txt"), # , style = {'width': '99%'}
),
dcc.Download(id = "download-text")
],
style={'display':'inline-block', "margin-left": "10%", "margin-right":"5%"},
className='three columns'),
html.Div([
html.Div(html.Button("Download CSV", id="btn-download-csv"), # , style={'width': '99%'}
),
dcc.Download(id = "download-csv"),
],
style={'display':'inline-block', "margin-left":"5%", "margin-right":"5%"},
className='three columns'),
html.Div([
html.Div(html.Button("Download LOGO", id="btn-download-png"), # , style={'width': '99%'}
),
dcc.Download(id = "download-png"),
],
style={'display':'inline-block', "margin-left":"5%", "margin-right": "10%"},
className='three columns'),
], className="row", style={"horizonal-align": "middle", "margin-left": "5%", "margin-top": "5%", "margin-right": "5%", "margin-bottom": "5%"} # 'display': 'inline-block',
),
html.Br(),
html.Br(),
html.Div([
html.Div(
html.H4("Select clusters:", style={'display': 'inline-block'}),
),
html.Div([
dcc.Checklist(
id="all-or-none",
options=[{"label": "Select All", "value": "Select All"}],
value=[],
# labelStyle={"display": "inline-block", "position": "relative", "vertical-align":"middle"},
),
], # style={'display': 'inline-block'}
),
html.Div([
dcc.Checklist(
id="my-checklist",
#options=[{"label": x, "value": x} for x in option],
value=["1"],
# labelStyle={"display": "inline-block", "position": "relative", "vertical-align":"middle"}, # "text-": "relative"
),
], # style={'display': 'inline-block'}
),
], style={'display': 'inline-block'}),
], style={'display': 'inline-block'}
),
], className="two columns", style={"margin-top":"5%", "max-width": "40%", "display": "inline-block",'vertical-align': 'top'}
),
], className="row", style={"display": "inline-block", "min-width":"1000px"}
),
html.Br(),
html.Br(),
html.Br(),
], id = "hidden_data", style={'display':'none', 'min-width': '1000px'},
),
# html.Div(
# dcc.Markdown('''
# *Usage:*
# The software encodes the data using one-hot encoding and the dimensionality reduction using **UMAP**. Then, the matrix is clustered using **HDBSCAN** to get all the clusters from the fasta file.Firstly, upload the fasta data of the same length. Then, input the parameters for the clustering with HDBSCAN. After submitting the data, it will take a few minutes for the background to process the data.Finally, you can select clusters by tick checklist, or use a lasso to circle the parts of interest on the cluster plot. That part of data would display by weblogo of base enrichment.
# ''')
# )
html.Div(id = "processing_1"),
html.Div(id = "processing_2"),
html.Div(id = "processing_3"),
html.Div(
html.Button(
id = 'submit-data',
n_clicks=0,
children='Draw the Figures',
style = {'display':'none'}),
id = 'submit-button'),
dcc.Store(id = "cluster-data"),
dcc.Store(id = "hdbscan-parameter-list"),
dcc.Store(id = "select-data"),
dcc.Store(id = "type")
], style={"margin-left":"0%", "width":"1000px"})
return app
if __name__ == "__main__":
pass
``` |
{
"source": "JHFVR/cleanco",
"score": 3
} |
#### File: JHFVR/cleanco/cleanco.py
```python
from collections import OrderedDict
import re
from termdata import terms_by_country as country_dict, terms_by_type as type_dict
# Sorted business types / abbreviation by length of business type
sorted_types = []
for business_type in type_dict:
for item in type_dict[business_type]:
temp_tuple = [business_type, item]
sorted_types.append(temp_tuple)
sorted_types = sorted(sorted_types, key=lambda part: len(part[1]), reverse=True)
# Sorted business countries / type abbreviations by length of business type abbreviations
sorted_countries = []
for country in country_dict:
for item in country_dict[country]:
temp_tuple = [country, item]
sorted_countries.append(temp_tuple)
sorted_countries = sorted(sorted_countries, key=lambda part: len(part[1]), reverse=True)
# All of the suffixes sorted by length
all_sorted = sorted_types + sorted_countries
suffix_sort = []
for item in all_sorted:
suffix_sort.append(item[1])
suffix_sort = sorted(suffix_sort, key=lambda part: len(part), reverse=True)
class cleanco(object):
def __init__(self, business_name):
# always do non-visible cleanup, but store the original just in case
self.business_name = ' '.join(business_name.split())
self._original = business_name
def string_stripper(self, business_name):
# Get rid of extra prefix-, suffix- & in-between spaces
business_name = " ".join(business_name.split())
# Get rid of all trailing non-letter symbols except '.'
match = re.search(r'[^\.\w]+$', business_name, flags=re.UNICODE)
if match is not None:
business_name = business_name[:match.span()[0]]
return business_name
def end_strip(self, a_set):
end_set = []
business_name = self.business_name
business_name = self.string_stripper(business_name)
for key, suffix in a_set:
if ((business_name.lower()).endswith(" " + suffix)):
end_set.append(key)
end_set = list(OrderedDict.fromkeys(end_set))
if end_set != []:
return end_set
else:
return None
def clean_name(self, suffix=True, prefix=False, middle=False, multi=False):
"return cleared version of the business name"
name = self.business_name
# Run it through the string_stripper once more
name = self.string_stripper(name)
loname = name.lower()
# return name without suffixed/prefixed/middle type term(s)
for item in suffix_sort:
if suffix:
if loname.endswith(" " + item):
start = loname.find(item)
end = len(item)
name = name[0:-end-1]
name = self.string_stripper(name)
loname = name.lower()
if multi==False:
break
if prefix:
if loname.startswith(item+' '):
name = name[len(item)+1:]
loname = name.lower()
if multi==False:
break
if middle:
term = ' ' + item + ' '
if term in loname:
start = loname.find(term)
end = start + len(term)
name = name[:start] + " " + name[end:]
loname = name.lower()
if multi==False:
break
return self.string_stripper(name)
def type(self):
self.type = self.end_strip(sorted_types)
return self.type
def country(self):
self.country = self.end_strip(sorted_countries)
return self.country
``` |
{
"source": "jhfwb/utils_xhr",
"score": 3
} |
#### File: utils_xhr/utils_xhr/IndexTool.py
```python
import os
import re
import time
from _xhr_tool._utils.RR_Comments import ChinaWordTool
#索引库
class indexObject:
def __init__(self):
self.letterTable=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','OTHERS']
self.A=0
self.lines=[]
self.letterSite={}
def getDatasByLetter(self,letter=''):#获得字母所在数组。返回一个迭代器。
datas=self.getDatasAndStatusByLetter(letter=letter)
if datas==None:
return None
return list(map(lambda x:x[0],datas))
def getDatasAndStatusByLetter(self,letter=''):
index1 = self.letterSite.get(letter)
index2 = self.letterSite.get(self._getNextLetter(letter))
if index1 + 1 == index2:
return None
newLines=[]
for line in self.lines[index1+1:index2]:
entry=line.split('=')
key = entry[0].replace('\n','')
if len(entry)==2:
value=entry[1].replace('\n','')
elif len(entry)==1:
value=None
else:
raise ValueError("存在多个等号。有且只允许每行只有一个等号"+line)
newLines.append((key,value))
return newLines
def _getNextLetter(self,letter=""):
if letter=="OTHERS":
return None
for i in range(len(self.letterTable)):
if self.letterTable[i]==letter:
return self.letterTable[i+1]
class IndexDatabase:
def __init__(self,path=""):
self.path=path
if not os.path.exists(path):
fp=open(mode='w',encoding='utf-8',file=path)
fp.writelines(['IndexDatabase:索引数据库\n','IndexDatabase:'+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'\n',"IndexDatabase:A=3,B=4,C=5,D=6,E=7,F=8,G=9,H=10,I=11,J=12,K=13,L=14,M=15,N=16,O=17,P=18,Q=19,R=20,S=21,T=22,U=23,V=24,W=25,X=26,Y=27,Z=28,OTHERS=29\n",
'[A]\n','[B]\n','[C]\n','[D]\n','[E]\n','[F]\n','[G]\n','[H]\n','[I]\n',
'[J]\n','[K]\n','[L]\n','[M]\n','[N]\n','[O]\n','[P]\n','[Q]\n','[R]\n',
'[S]\n','[T]\n','[U]\n','[V]\n','[W]\n''[X]\n','[Y]\n','[Z]\n','[OTHERS]\n'])
fp.close()
needReload = False
try:
self.indexObject = self._loadIndexObject()
except:
needReload=True
if needReload==True:
self._reBuildIndexFile()
else:
for key in self.indexObject.letterSite.keys():
if '['+key+']\n'!=self.indexObject.lines[self.indexObject.letterSite[key]]:
needReload=True
break
if needReload:
self._reBuildIndexFile()
def _reBuildIndexFile(self):
datas = []
# 获取datas
fp2 = open(mode='r', encoding='utf-8', file=self.path)
for line in fp2.readlines():
if not line.startswith('IndexDatabase:') and not line.startswith('['):
datas.append(line[:len(line) - 1])
fp2.close()
try:
os.rename(self.path, self.path.replace('.txt', '_need_delete.txt'))
except FileExistsError:
os.remove(self.path.replace('.txt', '_need_delete.txt'))
os.rename(self.path, self.path.replace('.txt', '_need_delete.txt'))
fp = open(mode='w', encoding='utf-8', file=self.path)
fp.writelines(
['IndexDatabase:索引数据库\n', 'IndexDatabase:' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + '\n',
"IndexDatabase:A=3,B=4,C=5,D=6,E=7,F=8,G=9,H=10,I=11,J=12,K=13,L=14,M=15,N=16,O=17,P=18,Q=19,R=20,S=21,T=22,U=23,V=24,W=25,X=26,Y=27,Z=28,OTHERS=29\n",
'[A]\n', '[B]\n', '[C]\n', '[D]\n', '[E]\n', '[F]\n', '[G]\n', '[H]\n', '[I]\n',
'[J]\n', '[K]\n', '[L]\n', '[M]\n', '[N]\n', '[O]\n', '[P]\n', '[Q]\n', '[R]\n',
'[S]\n', '[T]\n', '[U]\n', '[V]\n', '[W]\n''[X]\n', '[Y]\n', '[Z]\n', '[OTHERS]\n'])
fp.close()
self.indexObject = self._loadIndexObject()
for data in datas:
self.addKeys(data)
def _saveIndexObjectToFile(self):
self.indexObject.letterSite
self.indexObject.lines
def _updataLetterLines(self):
line=""
for key in self.indexObject.letterSite.keys():
self.indexObject.letterSite[key]
line+=key+"="+str(self.indexObject.letterSite[key])+","
line=line[0:len(line)-1]
self.indexObject.lines[2]="IndexDatabase:"+line+'\n'
def _loadIndexObject(self):
"""
根据第三行的数据,载入字母对应表。
"""
fp = open(mode='r', encoding='utf-8', file=self.path)
indexObj = indexObject()
indexObj.lines=fp.readlines()
fp.close()
indexObj.letterSite={}
for entry in indexObj.lines[2][0:len(indexObj.lines[2])-1].replace('IndexDatabase:','').split(','):
keys=entry.split('=')
indexObj.letterSite.setdefault(keys[0],int(keys[1]))
return indexObj
# def _saveIndexObject(self):
def isContainKeyName(self,keyName):
"""
判断是否包含某个索引
"""
datas=self.indexObject.getDatasByLetter(self._getFirstLetter(keyName))
if datas==None:
return False
return keyName in datas
def setStatuses(self,keyNames,status):
for keyName in keyNames:
self._setSingleStatus(keyName,status)
def _setSingleStatus(self,keyName,status):
if not self.isContainKeyName(keyName):
raise ValueError("不存在该keyName:"+keyName)
self.deleteKeys(keyName)
if not status=="":
self.addKeys(keyName + "=" + str(status))
else:
self.addKeys(keyName)
def _deleteSingleKey(self,keyName):
"""
删除索引
"""
if not self.isContainKeyName(keyName):
raise ValueError("无法删除数据:keyName。因为,不存在该数据")
letter=self._getFirstLetter(keyName)
startIndex=self._getFirstLetterSite(letter)
deleteInde=-1
for i in range(startIndex,len(self.indexObject.lines)):
if self.indexObject.lines[i].split('=')[0].replace('\n','')==keyName:
deleteInde=i
break
deleteData=self.indexObject.lines[i][:len(self.indexObject.lines[i])-1]
del self.indexObject.lines[deleteInde]
# 后移所有数据
removeSign = 0
for key in self.indexObject.letterSite.keys():
if removeSign == 1:
self.indexObject.letterSite[key] = self.indexObject.letterSite[key] - 1 # 后期可以改成多个
if key == letter:
removeSign = 1
return deleteData
def deleteKeys(self,keyNames):
if type(keyNames) == type(""):
keyNames = [keyNames]
if type(keyNames) != type([]):
raise ValueError("KeyName必须是list类型," + str(keyNames) + "不是list类型")
deleteDatas=[]
for keyName in keyNames:
deleteData=self._deleteSingleKey(keyName)
if deleteData!=None:
deleteDatas.append(deleteData)
self._updataLetterLines()
fp = open(mode='w', encoding='utf-8', file=self.path)
self.indexObject.lines[1] = 'IndexDatabase:' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + '\n'
fp.writelines(self.indexObject.lines)
fp.close()
return deleteDatas
def _getFirstLetterSite(self,letter):
"""
获得首个字母在line中的位置
"""
return self.indexObject.letterSite.get(letter)
def getKeyNamesByLetters(self,letter="A"):
"""
e.g:getKeyNamesByLetters(letter="A")
获得所有以A为首拼音的数据集合
@param letter |str letter的可选参数:A,B,C.....X,Y,Z,OHTERS (务必注意必须是大写)
"""
return self.indexObject.getDatasByLetter(letter)
def getKeyNamesAndStatusByLetters(self, letter="A"):
"""
e.g:getKeyNamesByLetters(letter="A")
获得所有以A为首拼音的数据集合
@param letter |str letter的可选参数:A,B,C.....X,Y,Z,OHTERS (务必注意必须是大写)
"""
return self.indexObject.getDatasAndStatusByLetter(letter)
def getAllKeyNamesAndStatus(self):
datas = []
for letter in self.indexObject.letterTable:
letterDatas = self.getKeyNamesAndStatusByLetters(letter)
if letterDatas != None:
datas = datas + letterDatas
return datas
def getAllKeyNames(self):#???????
"""
获得所有索引
"""
datas = []
for letter in self.indexObject.letterTable:
letterDatas = self.getKeyNamesByLetters(letter)
if letterDatas != None:
datas = datas + letterDatas
return datas
def getStatusByKeyNames(self,keyName):
"""
获得关键字的状态。
"""
if self.isContainKeyName(keyName):
items=self.getKeyNamesAndStatusByLetters(self._getFirstLetter(keyName))
for item in items:
if item[0]==keyName:
return item[1]
else:
raise ValueError('keyName:'+keyName+'不存在')
def addKeys(self,keyNames,status=""):
"""
添加索引。如果成功添加,则会返回该keyNames数组。当然,如果keyNames只有一个数据,则只会返回一个数据
如果添加失败。则会返回None
"""
if keyNames=="":
return
if type(keyNames) == type(""):
keyNames = [keyNames]
if type(keyNames) != type([]):
raise ValueError("KeyName必须是list类型," + str(keyNames) + "不是list类型")
bos=[]
for keyName in keyNames:
bo=self._addSingleKey(keyName,status)
if bo:
bos.append(keyName)
if len(bos)>0:
self._updataLetterLines()
fp = open(mode='w', encoding='utf-8', file=self.path)
self.indexObject.lines[1]='IndexDatabase:'+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'\n'
fp.writelines(self.indexObject.lines)
fp.close()
return bos
else:
return None#
def _getFirstLetter(self,keyName):
letter = ChinaWordTool.getStrFirstAplha(keyName) # 获得首字母
try:
re.match(r'[A-Z]', letter).group()
except:
letter = 'OTHERS'
return letter
def _addSingleKey(self,keyName,status=""):
if self.isContainKeyName(keyName):
return False
if '\n' in keyName:
raise ValueError("KeyName不可以包含\\n")
if keyName==None:
raise ValueError("KeyName不可以是None")
if keyName=="":
raise ValueError("KeyName的长度不可以为0")
if type(keyName)!=type(""):
raise ValueError("KeyName必须是字符串类型,"+str(keyName)+"不是字符串类型")
if str(status)!="":
keyName=keyName+'='+str(status)
letter=self._getFirstLetter(keyName)
letterDataArr=self.indexObject.getDatasByLetter(letter)#获得该字母对应的数组
if letterDataArr==None:
letterDataArr=[]
sign=1
for letterData in letterDataArr:
if keyName+'\n'==letterData:
sign=0
if sign==1:
self.indexObject.lines.insert(self._getFirstLetterSite(letter)+1,keyName+'\n')
#后移所有数据
addSign=0
for key in self.indexObject.letterSite.keys():
if addSign==1:
self.indexObject.letterSite[key] = self.indexObject.letterSite[key]+1#后期可以改成多个
if key==letter:
addSign=1
return True
if __name__ == '__main__':
# arr=[1,2,3]
# arr.insert(2,233)
# print(arr)
a=IndexDatabase(
'/clientScrapySystem/DatabaseSystem/database/indexData.txt')
# a.addKeys(['我1','我2'],-1)
# a.addKeys('你好1')
# a.addKeys('你好2','132132112321')
c=a.addKeys(['你好23211','21321'])
print(c)
print(a.getAllKeyNamesAndStatus())
```
#### File: utils_xhr/io/excelTool.py
```python
import os
import re
import openpyxl
from openpyxl.styles import Font
# from _xhr_tool._utils.CsvTool import CsvTool
# from _xhr_tool._utils.excelTool.csvFileOptionTool import csvFileTool
from utils_xhr.err import ExcelToolException
def _checkExcel(path):
"""检查路径是否是checkExcel"""
from utils_xhr.io import _checkFile
_checkFile(path=path,suffix='.xlsx')
def _createNewWorkbook():
"""创建workbook对象"""
return openpyxl.Workbook()
def _createNewSheet(workbook,sheetName):
"""根据workbook创建sheet对象"""
workbook.get_sheet_names
return workbook.create_sheet(sheetName)
def _checkSheetNumber(path,workbook):
"""
判断workbook对象中,sheet是否有且只有一个,如果不是只有一个,会抛出错误
"""
if len(workbook.sheetnames) != 1:
raise ExcelToolException('Excel文件中的Sheet表格有且只能为1个.检测到该文件{}的sheet表格数量不唯一,请删除对于表格。'.format(path))
def _getSheetNameByIndex(workbook,index=0):
return workbook.sheetnames[index]
def _changeCellToStr(cell):
"""????"""
if cell.hyperlink:
cell.value="<href='"+cell.hyperlink.target+"'>"+cell.value
return cell.value
def _changeStrToCell(cell):
"""????"""
attrss=re.findall(r'<(.*)>',cell.value)
if len(attrss)>0:
arr=[]
attrs=attrss[0].split(' ')
for attr in attrs:
if attr.strip()!='':
arr.append(attr)
for ar in arr:
try:
tu=ar.split('=')
if tu[1].startswith("'") and tu[1].endswith("'"):
pass
elif tu[1].startswith("\"") and tu[1].endswith("\""):
pass
else:
raise ValueError()
except:
raise ValueError("语法错误"+ar+":无法处理该语句,请确保属性中有等号")
if tu[0]=='href':############################################################################此处扩展
cell.hyperlink=tu[1][1:len(tu[1])-1]
cell.value=re.sub(r'<.*>','',cell.value)
cell.font=Font(bold=False,italic=False,underline="single",color='0000FF')
else:
raise ValueError('语法错误:不存在'+tu[0]+'这个属性,目前只允许以下属性存在:href')
return cell.value
def _changeStyle(self,cell,fontStyle={}):
"""
改变文字的样式fontStype
underline:single | none _添加下划线
fontColor: 0000FF _蓝色
size: 11
bold: True _是否加粗
name: 宋体 _字体样式
italic: True _是否斜体
strike: False _是否添加删除线
:param cell:
:param fontStyle:
:return:
"""
#设置默认样式
if "size" in fontStyle.keys():
pass
cell.font = Font(size=fontStyle['size']|100, bold=True, italic=True, underline="single", color='0000FF')
return cell
def optionExecl(path='',datas=[],mode="r" or "w" or "a",styleRemain=True,isCreateFile=False):
"""操作excel文件。包括读(r) 写(w) 添加(a)
:param path: 文件路径
:param datas: 需要写入的数据(w)(a)写与添加模式会用到。数据格式:datas:[{'name':'张三','age':2},{'name':'李四','age':4}]
:param mode: 模式,目前有3中。r 读模式-用于读取excel文件;w 写模式用于写入文档;a 添加模式用于为文档添加信息。
:param styleRemain: 是否保留excel的格式。
"""
if isCreateFile:
if not path.endswith('.xlsx'):
raise FileExistsError(path+'文件后缀名错误!不为.xlsx')
else:
_checkExcel(path=path)
if mode=='w':
wb=_createNewWorkbook()
sheetName=_getSheetNameByIndex(wb)
sheet=wb[sheetName]
#表头的写入
if len(datas)==0:
# del wb['Sheet1'] # 删除默认表单
# wb.save(path)
wb.close()
return
firstArr=list(datas[0].keys())
for i in range(0,len(firstArr)):
sheet.cell(row=1,column=i+1,value=firstArr[i])
#表体的写入
for j in range(0,len(datas)):
for i in range(0,len(firstArr)):
cell=sheet.cell(row=j + 2, column=i + 1, value=datas[j].get(firstArr[i]))
#对value进行处理
if type(cell.value)==str:
cell=_changeStrToCell(cell)
wb.save(path)
wb.close()
elif mode=='r':
wb = openpyxl.load_workbook(path)
_checkSheetNumber(path=path ,workbook=wb)
sh=wb[wb.sheetnames[0]]
rows_data = list(sh.rows)
headLine=[]#头
datas=[]
for hc in rows_data.pop(0):
headLine.append(hc.value)
for row in rows_data:
data={}
for i in range(0,len(row)):
if styleRemain:
data.setdefault(headLine[i], _changeCellToStr(row[i]))
else:
data.setdefault(headLine[i],row[i].value)
datas.append(data)
wb.close()
return datas
elif mode=='a':
if not os.path.exists(path):
optionExecl(path=path, datas=datas, mode="w",isCreateFile=True)
return
try:
oldDatas=optionExecl(path=path, mode="r", styleRemain=True)
except IndexError:
optionExecl(path=path, datas=datas, mode="w")
return
newDatas = oldDatas + datas
optionExecl(path=path, datas=newDatas, mode="w")
def getHeader(path=''):
"""
获得excel文件的表头
"""
_checkExcel(path)
wb = openpyxl.load_workbook(path)
_checkSheetNumber(path=path, workbook=wb)
sh=wb[wb.sheetnames[0]]
rows_data = list(sh.rows)
headLine=[]
for hc in rows_data.pop(0):
headLine.append(hc.value)
return headLine
def changeExeclToCsvFile(path="",encoding='utf-8'):
"""
#将csv文件转成excel文件。目前只能转换第一个表格
:param path:需要转换的文件的路径
:param encoding:读取的excel文件的编码,写入编码默认和读取编码一直
:return: None
"""
datas=optionExecl(path=path, mode='r')
from utils_xhr.io.csvTool import optionCsv
optionCsv(path=path.replace('.xlsx','.csv'), encoding=encoding, mode='w',datas=datas,isCreateFile=True)
def changeCsvToExcelFile(path="",encoding='utf-8'):
"""
#将csv文件转成excel文件
:param path:
:param encoding:
:return:
"""
from utils_xhr.io.csvTool import optionCsv
datas = optionCsv(path=path, mode='r',encoding=encoding)
optionExecl(path=path.replace('.csv','.xlsx'),mode='w',datas=datas,isCreateFile=True)
def filter(self,path="",attr="",conditionFunction="",sheetName="Sheet1"):
filterItems=[]
items=self.optionExecl(path=path,sheetName=sheetName,mode='r')
for item in items:
if conditionFunction(item[attr]):
filterItems.append(item)
return filterItems
if __name__ == '__main__':
# datas=optionExecl(path='test1.xlsx',mode='r',isCreateFile=True)
# print(datas)
# changeCsvToExcelFile(path='test.csv')
changeExeclToCsvFile(path='test.xlsx')
```
#### File: utils_xhr/pool/objsPool.py
```python
from queue import Queue
from _xhr_tool._utils.pool._abstract.objsPool_abstract import ObjsPool_abstract
"""
该对象,应当被继承
"""
class ObjsPool(ObjsPool_abstract):
def __init__(self,obj_class,args=[],initNum=10,addNum=10):
"""
在子类的__init__方法中必须传入obj_class。
super().__init__(类名,num=10)
:param obj_class:
:param args:
:param num:
"""
self.pool = Queue()
self._obj_class = obj_class
self._initNum=initNum
self._addNum=addNum
self._args=args
self._poolAdd(initNum)
def _poolAdd(self,num=10):
for i in range(num):
self.pool.put(self._obj_class(*self._args))
def get(self):
if self.pool.qsize()==0:
self._poolAdd(self._addNum)
return self.pool.get(block=False)
def back(self,obj):
if type(obj)!=self._obj_class:
raise TypeError('归还对象类型错误:该类型:'+str(type(obj))+'与初始化的类型:'+str(self._obj_class)+'类型不一致')
self.pool.put(obj)
```
#### File: utils_xhr/reflex/reflexUtils.py
```python
import inspect
import re
import threading
import sys
from annotate_xhr import threadingRun
class ReflexUtils:
"最好不要使用单例模式,否则会对服务器造成负担"
def __init__(self):
pass
def excuteAllDecorator(self, obj={}, decoratorName="", args=[]):
funcs = self.getFuncByDecoratorName(className=obj.__class__, decoratorName=decoratorName)
results=None
for func in funcs:
results = func(obj, *args)
return results
def excuteDecorator(self,obj={},decoratorName="",args=[]):
func=self.getFuncByDecoratorName(className=obj.__class__,decoratorName=decoratorName)[0]
try:
result=func(obj,*args)
except TypeError:
raise SyntaxError('语法错误:该类:' + str(obj.__class__) + ':的方法中缺少' + decoratorName + '注解,无法指'
'定执行' + decoratorName + '下的方法。建议在该类中创建一个方法,该方法需要被装饰器' + decoratorName + '注解')
return result
def getSource(self,any):
"""获得对象的源数据
:param object any: 此处存放module, class, method, function, traceback, frame, or code object.
"""
return inspect.getsource(any)
def getSourceRemoveNotes(self,any):
"""
获得文件源数据。该源数据会剔除掉所有注释
:param object any: 此处存放module, class, method, function, traceback, frame, or code object.
"""
s=self.getSource(any)
s=self._removeNotes(s)
return s
def _removeNotes(self,s):
"""
(此方法有bug,难以修补。当比如字段名下方的注释无法删除。当注释与等号中的单词一样的时候会被误删除。)
去除掉字符串中的所有注释。包括头部注释。类注释。方法注释
e.g:
s='
@threadingRun # 我是注解
def haha(self,s,dd):
print('你好'+s)
# 我是注解
'
去除后:
s='
@threadingRun
def haha(self,s,dd):
print('你好'+s)
'
:param s: 字符串
:return:
"""
#去除为三个"的注释
classNotes=re.findall(r'class\s+\w+:\n\s+(\"\"\"[\s\S]*?\"\"\")',s)#class下面的注释
functionNotes=re.findall(r'def\s+\w+\(.*\):\n\s+(\"\"\"[\s\S]*?\"\"\")',s)#def下面的注释
for classNote in classNotes:
s=s.replace(classNote,'')
for functionNote in functionNotes:
s = s.replace(functionNote, '')
# 去除为两个"的注释
classNotes2 = re.findall(r'class\s+\w+:\n\s+(\"[\s\S]*?\")', s) # class下面的注释
functionNotes2 = re.findall(r'def\s+\w+\(.*\):\n\s+(\"[\s\S]*?\")', s) # def下面的注释
for classNote in classNotes2:
s = s.replace(classNote, '')
for functionNote in functionNotes2:
s = s.replace(functionNote, '')
# s=re.sub(r'class\s+\w+:\n+\s+(\"\"\"[\s\S]*?\"\"\")','',s)#替换掉class下面的注释
# s=re.sub(r'class\s+\w+:\n\s+(\"\"\"[\s\S]*?\"\"\")','',s)#替换掉def下面的注释
s=re.sub(r'#.*','',s) #去除掉#后面的注释
s=s.strip()
s = re.sub(r'^\"\"\"[\s\S]*?\"\"\"', '', s) # 去除掉三个"头部注释
s = re.sub(r'^\"[\s\S]*?\"', '', s) # 去除掉两个"的头部注释
s=re.sub('\n+(\s+)?\n+','\n',s) #删掉不必要的回车键
return s
# def excuate
def getFuncByDecoratorName(self,className,decoratorName=""):
"""
module, class, method, function, traceback, frame, or code object.
根据装饰器名称,获得被该装饰器修饰的方法。
注意!由于该方法内部使用到inspect.getsource()。为了避免装饰器修改了类。因此需要在方法中添加
"""
try:
if hasattr(self,'_'+str(className)+'_src'):
pass
else:
# 将注释去除掉
s=self.getSourceRemoveNotes(className)
setattr(self, '_'+str(className) + '_src',s)
s=getattr(self,'_'+str(className)+'_src')
except TypeError:
raise TypeError('类型错误:'+str(className)+"必须是类对象")
arr=[]
_index=0
while(_index!=-1):
_index=s.find(decoratorName,_index+1)
if _index==-1:
break
defIndex_start=s.find('def',_index)+3
defIndex_end=s.find('(',defIndex_start)
line=s[defIndex_start:defIndex_end].strip()
arr.append(getattr(className, line))
return arr
class A:
def haha(self,s,dd):
print('你好'+s)
def haha2(self):
print('你好')
if __name__ == '__main__':
pass
ReflexUtils()
# print(ReflexUtils().getFuncByDecoratorName(reflexUtils,'@threadingRun'))
# print(ReflexUtils().getFuncByDecoratorName(A(),'@threadingRun','我不好','woow'))
```
#### File: utils_xhr/utils_xhr/RUtils.py
```python
import os
import sys
import threading
import time
from selenium.webdriver.support.color import Color, Colors
import os
class tool:
"""
存放自定义的基础工具包
"""
_instance = None
def __init__(self):
pass
def __new__(cls, *args, **kwargs):
if tool._instance==None:
tool._instance=super().__new__(cls, *args, **kwargs)#调用上级的创建方法。
return tool._instance
@staticmethod
def bug():
Colors.pop()
assert 1/0
@staticmethod
def startNewThread(fun):
t1 = threading.Thread(target=fun, args=[]) # 开始服务器端的监听
t1.start()
return t1
def printColor(self,s="",fontColor='black',end="\n"):
"""打印出有颜色的字体。默认为黑色。打印后返回打印的值。
:param str s: 需要打印的内容
:param str fontColor: 颜色可以是以下几种 red | green | yellow | pink | blue | gray | black | cyan
:param end: 末尾的字符。(一般是\n或者空字符)
:return: 返回s的值
"""
glock=threading.Lock()
glock.acquire()
fontColorArr = { 'black': 30,'red': 31, 'green': 32, 'yellow': 33, 'blue': 34, 'pink': 35, 'cyan':36 ,'gray': 37}
line=str(fontColorArr.get(fontColor))
if fontColorArr.get(fontColor) == None:
raise ValueError("传入的fontColor有问题!找不到该字体颜色:" + fontColor)
print('\033[0;' + line + 'm', s,end=end)
glock.release()
return line
def print(self,s,fontColor='blue',timeStrColor="red",siteColor="pink",path=None):
"""
默认字体为红色。背景色为白色
能够按照颜色在控制台打印出来。可以自定义背景色和字体颜色。下划线等
:param s:打印的内容
:param fontColor: (str) red | green | yellow | pink | blue| gray | black
:param timeStrColor: (str) red | green | yellow | blue | black
:param siteColor: (int) 0 普通模式 |
1 字体加粗 |
4 下划线 |
:return: None
"""
# print(sys._getframe(1).f_lineno)
# print(sys._getframe(1).f_code.co_filename)
# print(sys._getframe(1).f_code.co_name)
# print(sys._getframe(1).f_lineno)
# 1.打印时间
# 2.打印内容
# 3.打印位置
line=""
# line = "------------FILE:" + str(sys._getframe(1).f_code.co_filename) + "_____MODULE:" + str(
# sys._getframe(1).f_code.co_name) + "_____LINE:" + str(sys._getframe(1).f_lineno)
# 1.打印时间
self.printColor(s='[' + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + ']',fontColor=timeStrColor,end="")
# 2.打印内容
self.printColor(s=s, fontColor=fontColor,
end="")
# print(sys._getframe(1).f_code.co_name)
# print(sys._getframe(2).f_code.co_name)
# print(sys._getframe(3).f_code.co_name)
# print(sys._getframe(4).f_code.co_name)
line = "------------FILE:" + str(sys._getframe(1).f_code.co_filename) + "_____MODULE:" + str(
sys._getframe(1).f_code.co_name) + "_____LINE:" + str(sys._getframe(1).f_lineno)
# 3.打印位置
self.printColor(s=line,fontColor=siteColor,end="")
print('\033[0m')
# self.printColor()
if path!=None:
if os.path.isfile(path):
pass
else:
raise ValueError('保存路径异常:'+str(path)+'.不存在该文件!')
@staticmethod
def isBaseType(variate):
"""
判断该变量是不是基础类型
:param variate:
:return:
"""
type1 = ""
if type(variate) == type(1):
type1 = "int"
return True
elif type(variate) == type("str"):
type1 = "str"
return True
elif type(variate) == type(12.3):
type1 = "float"
return True
elif type(variate) == type([1]):
type1 = "list"
return True
elif type(variate) == type(()):
type1 = "tuple"
return True
elif type(variate) == type({"key1": "123"}):
type1 = "dict"
return True
elif type(variate) == type({"key1"}):
type1 = "set"
return True
return False
@staticmethod
def getType(data):
"""
获得其数据的类型,目前更新下面两种
1.json类型 json
2.文本类型 text
:param data:
:return: 上述类型
"""
data=str(data)
if data.startswith('{')&data.endswith("}"):
try:
data=eval(data)
if type(data)==type({}):
return "json"
else:
return "text"
except:
return "text"
else:
return "text"
if __name__ == '__main__':
tool().print("你好哦")
print(222)
tool().print("你好哦")
tool().print("你好哦")
print(111)
tool().print("你好哦")
```
#### File: utils_xhr/utils_xhr/xhr_logger.py
```python
import logging
from logging import handlers
from _xhr_tool._annotate import singleObj
@singleObj
class Logger(object):
DEBUG=logging.DEBUG
INFO=logging.INFO
WARNING=logging.WARNING
ERROR=logging.ERROR
CRITICAL=logging.CRITICAL
level_relations = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'crit': logging.CRITICAL
} # 日志级别关系映射
def __init__(self,savePath="save.log"):
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
# 2.1创建一个Handler 用来写入日志文件
fileHandler = logging.FileHandler(savePath)
# 2.2创建一个Handler 用来在控制台显示
streamHandler = logging.StreamHandler()
# 创建一个
th = handlers.TimedRotatingFileHandler(filename=savePath, when='D',interval=2, backupCount=3)
"""class logging.handlers.TimedRotatingFileHandler(filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False)
参数when决定了时间间隔的类型,参数interval决定了多少的时间间隔。如when=‘D’,interval=2,就是指两天的时间间隔,backupCount决定了能留几个日志文件。超过数量就会丢弃掉老的日志文件。
when的参数决定了时间间隔的类型。两者之间的关系如下:"""
# 3.定义Handler输出的格式
foramtter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s - %(pathname)s[line:%(lineno)d]')
th.setFormatter(foramtter)
fileHandler.setFormatter(foramtter)
streamHandler.setFormatter(foramtter)
# 4.添加日志消息处理器
self.logger.addHandler(fileHandler)
self.logger.addHandler(streamHandler)
self.logger.addHandler(th)
def getLogger(self):
return self.logger
def setLevel(self,level=logging.INFO):
self.logger.setLevel(level)
def debug(self,message=''):
return self.logger.debug(message)
def info(self,message=''):
return self.logger.info(message)
def warning(self,message=''):
return self.logger.warning(message)
if __name__ == '__main__':
FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'
logging.basicConfig(format=FORMAT)
d = {'clientip': '192.168.0.1', 'user': 'fbloggs'}
logging.warning('Protocol problem: %s', 'connection reset', extra=d)
# l=Logger()
# l.setLevel(level=l.DEBUG)
# l.logger.debug('你好,我是初始信息')
``` |
{
"source": "jhfwb/xhr",
"score": 3
} |
#### File: xhr/xhr/test.py
```python
import abc
class Car(metaclass=abc.ABCMeta):
@staticmethod
def run():
pass
@staticmethod
def turn():
pass
@staticmethod
def stop():
pass
class BenTianCar(Car):
def run(self):
print('本田跑')
def turn(self):
print('本田转')
def stop(self):
print('本田停')
class FoldCar(Car):
def run(self):
print('福特跑')
def turn(self):
print('福特转')
def stop(self):
print('福特停')
class AutoSystem:
def __init__(self,car):
self.car=car
def run_car(self):
self.car.run()
def stop_car(self):
self.car.stop()
def turn_car(self):
self.car.turn()
if __name__ == '__main__':
AutoSystem(FoldCar()).run_car()
``` |
{
"source": "jhgalino/MPv2",
"score": 3
} |
#### File: MPv2/include/MPA.py
```python
class reversor:
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
return other.obj == self.obj
def __lt__(self, other):
return other.obj < self.obj
# processing
def process(personalScore):
tempStorage = list(
map(lambda x: [x[0], x[1], abs(personalScore - int(x[1]))], storage)
)
tempStorage = sorted(
tempStorage, key=lambda x: (x[2], x[1], len(x[0]), reversor(x[0]))
)
return tempStorage
def extra(
scoreGap, barkada: list
): # scoreGap is the gap between your score and your closest
extraPerson = storage[barkadaSize]
modifiedStorage = process(int(extraPerson[1]))
personWithCloseness = list(filter(lambda x: x[2] == scoreGap, modifiedStorage))
personWithCloseness = sorted(
personWithCloseness, key=lambda x: (x[2], x[1], len(x[0]), reversor(x[0]))
)
if len(personWithCloseness) == 0:
return "{}, almost".format(extraPerson[0])
else:
if personWithCloseness[0][0] in barkada:
return extraPerson[0]
else:
return "{}, {}".format(extraPerson[0], personWithCloseness[0][0])
# input
storage = []
personalScore = int(input())
barkadaSize = int(input())
numberOfNamesToFollow = int(input())
for n in range(numberOfNamesToFollow):
name, score = [str(x) for x in input().split()]
storage.append([name, score])
storage = process(personalScore)
barkada = [storage[number][0] for number in range(barkadaSize)]
lastLine = extra(storage[0][2], barkada)
# output
for i in barkada:
print(i)
print(lastLine)
```
#### File: MPv2/include/MPB.py
```python
def filterCommonName(listOfNames: list, expectedNames: int):
listOfUnique = []
for name in listOfNames:
if listOfNames.count(name) == 1:
listOfUnique.append(name)
if len(listOfUnique) == expectedNames:
break
if len(listOfUnique) < expectedNames:
listOfUnique.append("Rumpelstiltskin")
return listOfUnique
# input
listOfNames = []
expectedNames = int(input())
numberOfNames = int(input())
for n in range(numberOfNames):
listOfNames.append(input())
listOfUnique = filterCommonName(listOfNames, expectedNames)
# output
for name in listOfUnique:
print(name)
```
#### File: MPv2/include/MPE3.py
```python
def differentiate(fxn: str) -> str:
if fxn == "x":
return "1"
dividedFxn = getFirstLevel(fxn)
coeffOrTrig: str = dividedFxn[0]
exponent: str = dividedFxn[2]
insideParentheses: str = dividedFxn[1]
if coeffOrTrig.isalpha():
ans = computeTrig(coeffOrTrig, insideParentheses)
ans = ans + "*" + differentiate(insideParentheses)
if ans.endswith("*1"):
ans = list(ans)
ans.pop()
ans.pop()
ans = "".join(ans)
return ans
if len(exponent) != 0:
if len(coeffOrTrig) != 0 and coeffOrTrig.isnumeric():
ans = computeExpWithCoeff(coeffOrTrig, insideParentheses, exponent)
ans = ans + "*" + differentiate(insideParentheses)
ans = ans.replace("^1", "")
if ans.endswith("*1"):
ans = list(ans)
ans.pop()
ans.pop()
ans = "".join(ans)
return ans
else:
ans = computeExpWithoutCoeff(insideParentheses, exponent)
ans = ans + "*" + differentiate(insideParentheses)
ans = ans.replace("^1", "")
if ans.endswith("*1"):
ans = list(ans)
ans.pop()
ans.pop()
ans = "".join(ans)
return ans
if len(coeffOrTrig) == 0 and len(exponent) == 0:
ans = "1" + "*" + differentiate(insideParentheses)
ans = ans.replace("^1", "")
if ans.endswith("*1"):
ans = list(ans)
ans.pop()
ans.pop()
ans = "".join(ans)
return ans
def getFirstLevel(function: str) -> list:
indexOfOpen = function.find("(")
indexOfClose = function.rfind(")")
function = list(function)
function[indexOfOpen] = "|"
function[indexOfClose] = "|"
function = "".join(function)
assert function.count("|") == 2, "| != 2" # assert division by 2
return function.split("|")
def computeTrig(trig: str, inside: str) -> str:
if trig == "sin":
return "(cos({}))".format(inside)
elif trig == "cos":
return "(-sin({}))".format(inside)
elif trig == "tan":
return "(sec({})^2)".format(inside)
if trig == "sec":
return "(sec({})tan({}))".format(inside, inside)
if trig == "csc":
return "(-csc({})cot({}))".format(inside, inside)
if trig == "cot":
return "(-csc({})^2)".format(inside)
def computeExpWithCoeff(coeff: str, inside: str, exp: str) -> str:
cf = int(coeff)
expnt = int(exp.replace("^", ""))
cf = cf * expnt
expnt -= 1
return "{}({})^{}".format(cf, inside, expnt)
def computeExpWithoutCoeff(inside: str, exp: str) -> str:
expnt = int(exp.replace("^", ""))
cf = int(exp.replace("^", ""))
expnt -= 1
return "{}({})^{}".format(cf, inside, expnt)
OTHER_RECURSIVE_FUNCTIONS = [
"getFirstLevel",
"computeTrig",
"computeExpWithCoeff",
"computeExpWithoutCoeff",
]
print(differentiate("3(x)^3"))
```
#### File: MPv2/include/MPEv2.py
```python
import copy
def separateParts(function: str):
assert function.count("~") == 2, "function.count('~') == 2"
functionList = function.split("~")
return functionList
def getParentheses(function: str):
assert type(function) == str, "type(function) == str"
assert function.count("(") >= 1, "function.count('(') >= 1"
assert function.count(")") >= 1, "function.count(')') >= 1"
startCounter = 0
endCounter = 0
functionList = list(function)
for n in range(len(functionList)):
if startCounter < 1 and functionList[n] == "(":
functionList[n] = "~"
startCounter += 1
for n in range(len(functionList) - 1, -1, -1):
if endCounter < 1 and functionList[n] == ")":
functionList[n] = "~"
endCounter += 1
functionList = "".join(functionList)
return functionList
def trig(func: list):
assert len(func) == 3, "len(func) == 3"
assert len(func[2]) == 0, "len(func[2]) == 0"
assert len(func[0]) == 3, "len(func[0]) == 3"
assert type(func[1]) == str, "type(func[1]) == str"
assert type(func[0]) == str, "type(func[0]) == str"
ans = ""
if func[0] == "sin":
ans = "(cos({}))".format(func[1])
elif func[0] == "cos":
ans = "(-sin({}))".format(func[1])
elif func[0] == "sec":
ans = "(sec({})tan({}))".format(func[1], func[1])
elif func[0] == "csc":
ans = "(-csc({})cot({}))".format(func[1], func[1])
elif func[0] == "tan":
ans = "(sec({})^2)".format(func[1])
elif func[0] == "cot":
ans = "(-csc({})^2)".format(func[1])
return ans, func[1]
def coeff(func: list):
assert len(func) == 3, "len(func) == 3"
assert len(func[0]) >= 1, "len(func[0]) >= 1"
exponent = 1
if len(func[2]) == 2:
func[2] = list(func[2])
func[2].pop(0)
func[2] = int("".join(func[2]))
exponent = copy.deepcopy(func[2])
func[2] -= 1
if func[2] in [1, 0]:
func[2] = ""
else:
func[2] = "^" + str(func[2])
func[0] = int(func[0]) * exponent
ans = "{}({}){}".format(func[0], func[1], func[2])
return ans, func[1]
def exp(func: list):
assert len(func[0]) == 0, "len(func[0]) == 0"
assert len(func[2]) in [2, 0], "len(func[0]) == 0"
if len(func[2]) == 2:
func[2] = list(func[2])
func[2].pop(0)
func[2] = int("".join(func[2]))
coefficient = copy.deepcopy(func[2])
func[2] -= 1
if func[2] in [1, 0]:
func[2] = ""
else:
func[2] = "^" + str(func[2])
ans = "{}({}){}".format(coefficient, func[1], func[2])
return ans, func[1]
def chooseMethod(fxnList: list):
assert len(fxnList) == 3, "len(fxnList) == 3"
assert type(fxnList[0]) == str, "type(fxnList[0]) == str"
assert type(fxnList[2]) == str, "type(fxnList[2]) == str"
assert len(fxnList[0]) in [3, 1, 0], "len(fxnList[0]) in [3,1,0]"
assert len(fxnList[2]) in [2, 0], "len(fxnList[2]) in [2, 0]"
if len(fxnList[0]) == 3:
return trig(fxnList)
elif len(fxnList[0]) == 1:
return coeff(fxnList)
elif len(fxnList[0]) == 0:
return exp(fxnList)
def differentiate(fxn: str):
assert type(fxn) == str, "type(fxn) == str"
if fxn == "(x)":
return "1"
else:
answer, nextTerm = chooseMethod(separateParts(getParentheses(fxn)))
assert type(answer) == str, "type(answer) == str"
assert type(nextTerm) == str, "type(nextTerm) == str"
if nextTerm == "x":
return "{}".format(answer)
else:
return "'{}'".format(answer + "*" + differentiate(nextTerm))
OTHER_RECURSIVE_FUNCTIONS = [
"chooseMethod",
"exp",
"coeff",
"trig",
"getParentheses",
"separateParts",
]
``` |
{
"source": "jhgan00/mirae-dashboard",
"score": 3
} |
#### File: mirae-dashboard/app/plots.py
```python
from plotly.offline import plot
import plotly.express as px
import pandas as pd
import numpy as np
import os
import io
import urllib
import base64
import matplotlib.pyplot as plt
from Prediction.apps import PredictionConfig
import shap
plt.rcParams["font.family"] = "NanumGothic"
plt.rcParams['axes.unicode_minus'] = False
def plot_class_prob(labels, prob):
data = pd.DataFrame(dict(labels=labels, prob=prob))
fig = px.bar(
data,
x="labels",
y="prob",
color="labels",
color_discrete_map={"자동지급": "#5cb85c", "조사": "#d9534f", "심사": "#f0ad4e"},
category_orders = dict(labels=["자동지급", '심사', "조사"]),
text=[f"{p}%" for p in prob],
template="plotly_white"
)
fig.update_traces(textfont=dict(size=12))
fig.update_yaxes(title="확률", range=[0, 100])
fig.update_xaxes(title=None)
fig.update_layout(showlegend=False, title_font_size=20, autosize=False, height=340)
div = plot(fig, output_type="div", auto_open=False, config=dict(displayModeBar=False))
return div
def plot_classification(classification):
"""
받아야되는거? 이번 달 데이터
:return:
"""
fig = px.pie(
classification,
values = "cnt",
names = "index",
color = "index",
color_discrete_map={"자동지급": "#5cb85c", "조사": "#d9534f", "심사": "#f0ad4e"},
template = "plotly_dark"
)
fig.update_layout(
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
legend_title=None,
)
fig.update_traces(
textfont=dict(size=17, color="white"),
textinfo='label+percent+value'
)
fig.update_xaxes(title=None)
div = plot(fig, output_type="div", auto_open=False, config=dict(displayModeBar=False))
return div
def plot_force(data):
dropcols = ["ID","자동지급", "심사", "조사", "conf", "pred", "target", "sampling_method", "base_ym"]
labels = dict(자동지급=0, 심사=1, 조사=2)
pred = labels[data["pred"]]
X = data.drop(dropcols)
if X.보험료구간 == 99:
X = X.drop(["가입금액구간", "보험료구간"])
explainer = PredictionConfig.explainer_na
else:
explainer = PredictionConfig.explainer_normal
shap_values = explainer.shap_values(X.values.reshape((1,-1)))
fplot = shap.force_plot(
explainer.expected_value[pred],
shap_values[pred],
X.values.reshape((1,-1)).astype(float).round(2),
feature_names = X.index,
matplotlib=True, show=False, text_rotation=25
)
buffer = io.BytesIO()
fplot.savefig(buffer, bbox_inches="tight", format="png")
buffer.seek(0)
string = base64.b64encode(buffer.read())
uri = urllib.parse.quote(string)
return uri
def plot_threshold(df, fpath):
df = df.assign(base_cost=float(os.environ["BASE_COST"]), automation="AUTOMATION", base="BASE")
fig = px.line(
df,
x="threshold",
y="total_cost",
template="plotly_dark",
line_shape='spline',
color="automation"
)
fig.update_layout(
title_font_size=20,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
legend_title=None
)
fig2 = px.line(
df,
x="threshold",
y="base_cost",
color="base"
)
fig2.update_traces(line=dict(color="#FF0000", dash="dash"))
fig.add_trace(fig2.data[0])
fig.update_traces(line=dict(width=5))
fig.update_xaxes(title="THRESHOLD", range = [0.45, 1.05], showgrid=False)
fig.update_yaxes(title="COST", showgrid=True)
div = plot(fig, output_type="div", auto_open=False, config=dict(displayModeBar=False))
with open(fpath, "w") as html:
html.write(div)
return div
def plot_performance(performance):
# performance: 데이터프레임이 그대로 넘어온 상태
performance = performance.assign(
month = pd.to_datetime(performance.base_ym, format="%Y%m")
)
performance = performance.set_index("month").drop(["base_ym"], axis=1).stack().reset_index()
performance.columns = ["month", "performance", "value"]
fig = px.line(
performance,
x="month",
y="value",
color="performance",
template="plotly_dark"
)
fig.data[0].update(mode='markers+lines')
fig.data[1].update(mode='markers+lines')
fig.update_layout(
title_font_size=20,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
legend_title=None,
xaxis_tickformat = '%b<br>%Y'
)
fig.update_traces(line=dict(width=5), marker=dict(size=15))
fig.update_xaxes(title="MONTH", showgrid=False)
fig.update_yaxes(title="", range = [0.6, 0.9], showgrid=True)
div = plot(fig, output_type="div", auto_open=False, config=dict(displayModeBar=False))
return div
``` |
{
"source": "jhgarcia0/search_engine",
"score": 4
} |
#### File: jhgarcia0/search_engine/cosdis.py
```python
def cosdis(a,b):
from math import sqrt
letters_a = []
letters_b = []
letters_ab = []
dict_a = []
dict_b = []
dis = 0
letters_a = list(a)
letters_b = list(b)
letters_ab = letters_a + letters_b
letters_ab = set(letters_ab)
letters_ab = list(letters_ab)
for l in letters_ab:
dis += (letters_a.count(l) - letters_b.count(l))**2
try:
dis = 1/sqrt(dis)
except ZeroDivisionError:
if a == b:
return 1.0
return dis
print(letters_ab)
if __name__ == '__main__':
print(cosdis('liquidificador','liquidificador'))
``` |
{
"source": "jhgarcia0/Twitterbot",
"score": 3
} |
#### File: jhgarcia0/Twitterbot/main.py
```python
import moedas
from datetime import datetime
import tweepy
from time import sleep
def authentication():
import tweepy
auth = tweepy.OAuthHandler('y6fukObkpSjzBxmvjC6SGmuS8', '<KEY>')
auth.set_access_token('<KEY>',
'<KEY>')
api = tweepy.API(auth, wait_on_rate_limit=False)
return api
def return_time(only_minute=False, only_hour=False):
hour = datetime.now().hour
minute = datetime.now().minute
if not only_hour and not only_minute:
if minute == 0:
minute = str(minute) + '0'
return str(hour) + ':' + str(minute)
elif only_hour:
return hour
elif only_minute:
return minute
api = authentication()
contador = 0
try:
dolar_antigo = moedas.cotacao_dolar()
euro_antigo = moedas.cotacao_euro()
libra_antigo = 'x'
except Exception as e:
print('Ocorrou um erro: ', e)
while True:
sleep(2)
minute = return_time(only_minute=True)
hour = return_time(only_hour=True)
if minute == 0:
minute = str(minute) + '0'
hmin = return_time()
if int(minute) % 15 == 0:
print(f'vendo se há diferença, às {hmin}')
print(f'Dolar = {moedas.cotacao_dolar()} Euro = {moedas.cotacao_euro()}')
try:
dolar = moedas.cotacao_dolar()
euro = moedas.cotacao_euro()
libra = moedas.cotacao_libra()
except Exception as e:
print('Ocorreu um erro:', e)
try:
# Dolar
if dolar_antigo == dolar:
post_dolar = ''
elif dolar_antigo > dolar:
post_dolar = f"Dólar caiu :) -> R${dolar} "
dolar_antigo = dolar
else:
post_dolar = f"Dólar subiu :( -> R${dolar}"
dolar_antigo = dolar
# Euro
if euro_antigo == euro:
post_euro = ''
elif euro_antigo > euro:
post_euro = f"\nEuro caiu :) -> R${euro}"
euro_antigo = euro
else:
post_euro = f"\nEuro subiu :( -> R${euro}"
euro_antigo = euro
# Libra
if libra_antigo == libra:
post_libra = ''
elif libra_antigo > libra:
post_libra = f'\nLibra Caiu :) -> R${libra}'
libra_antigo = libra
else:
post_libra = f'\nLibra subiu :( -> R${libra}'
libra_antigo = libra
if post_dolar == '' and post_euro == '' and post_libra == '':
print('Nada foi postado.')
sleep(61)
else:
api.update_status(f'{post_dolar}{post_euro}{post_libra}\nàs {hmin}'.strip())
print('Postei.')
sleep(61)
except tweepy.error.TweepError as e:
print(f'Ocorreu um erro: {e}')
pass
except Exception as error:
print(f'Aconteceu um erro --> {error}')
pass
finally:
sleep(30)
contador += 1
print('end of loop num', contador)
print(20 * '=-')
``` |
{
"source": "jhgdike/books",
"score": 3
} |
#### File: common/db_utils/base.py
```python
from sqlalchemy.engine import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from .extention import *
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
class Model(object):
"""Baseclass for custom user models."""
#: the query class used. The :attr:`query` attribute is an instance
#: of this class. By default a :class:`BaseQuery` is used.
# query_class = BaseQuery
#: an instance of :attr:`query_class`. Can be used to query the
#: database for instances of this model.
pass
def make_declarative_base():
"""Creates the declarative base."""
base = declarative_base(cls=Model, name='Model', metaclass=_BoundDeclarativeMeta)
return base
class _BoundDeclarativeMeta(DeclarativeMeta):
def __init__(self, name, bases, d):
bind_key = d.pop('__bind_key__', None)
DeclarativeMeta.__init__(self, name, bases, d)
if bind_key is not None:
self.__table__.info['bind_key'] = bind_key
Base = make_declarative_base()
Base.to_dict = to_dict
AdLabReadSession = scoped_session(sessionmaker(class_=SignallingSession))
AdLabWriteSession = scoped_session(sessionmaker(class_=SignallingSession))
def _create_engine(user, password, host, port, db, pool_recycle=60, charset='utf8'):
engine = create_engine('mysql://%s:%s@%s:%s/%s?charset=%s&use_unicode=1' % (
user, password, host, port, db, charset),
pool_size=10,
max_overflow=-1,
pool_recycle=pool_recycle,
connect_args={'connect_timeout': 3, 'autocommit': 0},
)
return engine
```
#### File: books/common/response.py
```python
from __future__ import unicode_literals
from flask import jsonify
def json_success(data=''):
resp_json = {
'code': 0,
'data': data,
}
return jsonify(**resp_json)
def json_err(err, msg=''):
resp_json = {
'code': err,
'msg': msg or err.label,
}
return jsonify(**resp_json)
``` |
{
"source": "jhgdike/python-envcfg",
"score": 2
} |
#### File: python-envcfg/envcfg/__init__.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import re
import json
__version__ = '0.0.1'
class Envcfg(object):
re_name = re.compile(r'[a-z][a-z0-9_]*')
def get_config(self, config_name):
if not self.re_name.match(config_name):
error_msg = ('No module named {0}\n\nThe name of envvar module '
'should matched {1.pattern}')
raise ImportError(error_msg.format(config_name, self.re_name))
if config_name in sys.modules:
return sys.modules[config_name]
config = dict()
sys.modules[config_name] = config
config_name += '_'
for raw_name, raw_value in os.environ.items():
if raw_name.startswith(config_name) and raw_name != config_name:
config[raw_name[len(config_name):]] = json.loads(raw_value)
return config
def clear_config(self, config_name):
if not self.re_name.match(config_name):
error_msg = ('No module named {0}\n\nThe name of envvar module '
'should matched {1.pattern}')
raise ImportError(error_msg.format(config_name, self.re_name))
if config_name in sys.modules:
sys.modules.pop(config_name)
config_name += '_'
for raw_name, raw_value in os.environ.items():
if raw_name.startswith(config_name):
os.environ.pop(raw_name)
envcfg = Envcfg()
``` |
{
"source": "jhgdike/rate_limiter",
"score": 3
} |
#### File: rate_limiter/rate_limiter/limiter.py
```python
from __future__ import unicode_literals
import time
from datetime import datetime
from queue import Queue, Full, Empty
from threading import Thread
class BaseRateLimiter(object):
def __init__(self, rate):
self.rate = rate
def acquire(self, count=1):
raise NotImplementedError()
class ThreadingRateLimiter(BaseRateLimiter):
def __init__(self, rate):
super(ThreadingRateLimiter, self).__init__(rate)
self.queue = Queue(rate)
Thread(target=self._clear_queue).start()
def acquire(self, count=1):
try:
self.queue.put(1, block=False)
except Full:
return False
return True
def _clear_queue(self):
while True:
time.sleep(1.0 / self.rate)
try:
self.queue.get(block=False)
except Empty:
pass
class DistributeRateLimiter(BaseRateLimiter):
def __init__(self, rate, cache):
super(DistributeRateLimiter, self).__init__(rate)
self.cache = cache
def acquire(self, count=1, expires=3, key=None, callback=None):
try:
if isinstance(self.cache, Cache):
return self.cache.fetch_token(rate=self.rate, count=count, expires=expires, key=key)
except Exception as ex:
return True
class Cache(object):
def __init__(self):
self.key = 'default'
self.namespace = 'ratelimiter'
def fetch_token(self, *args, **kwargs):
raise NotImplementedError()
class RedisTokenCache(Cache):
def __init__(self, redis_instance):
super(RedisTokenCache, self).__init__()
self.redis = redis_instance
def fetch_token(self, rate, count=1, expires=3, key=None):
date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
key = ":".join([self.namespace, key if key else self.key, date])
try:
current = self.redis.get(key)
if int(current if current else 0) > rate:
return False
else:
with self.redis.pipeline() as p:
p.multi()
p.incr(key, count)
p.expire(key, int(expires if expires else 3))
p.execute()
return True
except Exception as ex:
return False
``` |
{
"source": "jhgeeyang/EvoDCinv",
"score": 2
} |
#### File: EvoDCinv/evodcinv/layered_model.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from stochopy import Evolutionary
from .dispersion_curve import DispersionCurve
from .thomson_haskell import ThomsonHaskell
from ._lay2vel import lay2vel as l2vf
try:
import cPickle as pickle
except ImportError:
import pickle
__all__ = [ "LayeredModel", "params2lay", "params2vel" ]
class LayeredModel:
"""
Layered velocity model
This class inverts for a layered medium given different modes of observed
dispersion curves.
Parameters
----------
model : ndarray
Layered velocity model.
"""
def __init__(self, model = None):
if model is not None and not isinstance(model, np.ndarray) and model.ndim != 2:
raise ValueError("model must be a 2-D ndarray")
if model is not None and model.shape[1] != 4:
raise ValueError("model must have 4 columns")
self._model = model
def __str__(self):
model = "%s: %s" % ("model".rjust(13), self._print_attr("model"))
misfit = "%s: %s" % ("misfit".rjust(13), self._print_attr("misfit"))
n_iter = "%s: %s" % ("n_iter".rjust(13), self._print_attr("n_iter"))
n_eval = "%s: %s" % ("n_eval".rjust(13), self._print_attr("n_eval"))
return "\n".join((model, misfit, n_iter, n_eval)) + "\n"
def _print_attr(self, attr):
if attr not in [ "model", "misfit", "n_iter", "n_eval" ]:
raise ValueError("attr should be 'model', 'misfit', 'n_iter' or 'n_eval'")
else:
if self._model is not None and attr == "model":
n_lay = len(self._model) // 3
model = self._model.reshape((n_lay, 3), order = "F")
param = "\n\t\tVP (m/s)\tVS (m/s)\tThickness (m)\n"
for i in range(n_lay):
param += "\t\t%.2f\t\t%.2f\t\t%.2f\n" % (model[i,0]*model[i,2], model[i,0], model[i,1])
return param[:-2]
elif hasattr(self, "_misfit") and attr == "misfit":
return "%.2f" % self._misfit
elif hasattr(self, "_n_iter") and attr == "n_iter":
return "%s" % self._n_iter
elif hasattr(self, "_n_eval") and attr == "n_eval":
return "%s" % self._n_eval
else:
return None
def invert(self, dcurves, beta, thickness, ny = 100, dtype = "float32", n_threads = 1,
evo_kws = dict(popsize = 10, max_iter = 100, constrain = True),
opt_kws = dict(solver = "cpso")):
"""
Invert the different modes of the dispersion curve for a layered
velocity model. Layers' P-wave velocities are determined by the Vp/Vs
ratio ranging in [ 1.5, 2.2 ]. High uncertainties should be expected
for P-wave velocities as surface waves are not much sensitive to Vp.
Layers' densities are determined using the Nafe-Drake's equation as
they only affect the amplitudes of the dispersion, not the location of
the zero-crossing.
Parameters
----------
dcurves : list of DispersionCurve
Dispersion curves to invert.
beta : ndarray (beta_min, beta_max)
S-wave velocity boundaries in m/s.
thickness : ndarray (d_min, d_max)
Layer thickness boundaries in m.
ny : int, default 100
Number of samples on the Y axis.
dtype : {'float32', 'float64'}, default 'float32'
Models data type.
n_threads : int, default 1
Number of threads to pass to OpenMP for forward modelling.
evo_kws : dict
Keywords to pass to evolutionary algorithm initialization.
opt_kws : dict
Keywords to pass to evolutionary algorithm optimizer.
"""
# Check inputs
if not isinstance(dcurves, (list, tuple)) or not np.all([ isinstance(dcurve, DispersionCurve) for dcurve in dcurves ]):
raise ValueError("dcurves must be a list of DispersionCurve objects")
else:
self._dcurves = dcurves
if not isinstance(beta, np.ndarray) or beta.ndim != 2:
raise ValueError("beta must be a 2-D ndarray")
else:
self._n_layers = beta.shape[0]
if np.any(beta[:,1] < beta[:,0]):
raise ValueError("elements in beta_max must be greater than beta_min")
if not isinstance(thickness, np.ndarray) or thickness.ndim != 2:
raise ValueError("thickness must be a 2-D ndarray")
if thickness.shape[0] != self._n_layers:
raise ValueError("inconsistent number of layers in thickness, got %d instead of %d" \
% (thickness.shape[0], self._n_layers))
if np.any(thickness[:,1] < thickness[:,0]):
raise ValueError("elements in d_max must be greater than d_min")
if not isinstance(ny, int) or ny < 1:
raise ValueError("ny must be a positive integer")
if not isinstance(n_threads, int) or n_threads < 1:
raise ValueError("n_threads must be a positive integer")
if not isinstance(opt_kws, dict):
raise ValueError("opt_kws must be a dictionary")
if not isinstance(evo_kws, dict):
raise ValueError("evo_kws must be a dictionary")
if "constrain" not in evo_kws:
evo_kws.update(constrain = True)
else:
evo_kws["constrain"] = True
if "eps2" not in evo_kws:
evo_kws.update(eps2 = -1e30)
else:
evo_kws["eps2"] = -1e30
if "snap" not in evo_kws:
evo_kws.update(snap = True)
else:
evo_kws["snap"] = True
args = ( ny, n_threads )
# get the lowest, highest value from index 0,1
lower = np.concatenate((beta[:,0], thickness[:,0], np.full(self._n_layers, 1.51)))
upper = np.concatenate((beta[:,1], thickness[:,1], np.full(self._n_layers, 2.19)))
ea = Evolutionary(self._costfunc, lower, upper, args = args, **evo_kws)
# no starting model
xopt, gfit = ea.optimize(**opt_kws)
self._misfit = gfit
# output model is 1D list
self._model = np.array(xopt, dtype = dtype)
self._misfits = np.array(ea.energy, dtype = dtype)
self._models = np.array(ea.models, dtype = dtype)
self._n_iter = ea.n_iter
self._n_eval = ea.n_eval
return self
def _costfunc(self, x, *args):
ny, n_threads = args
# didn't work..
# anyhow: get x after passed as a param in Evol Class
# get the unstandardized model as a input in Evol for Evaluation
print("This is x "+str(x))
# parameter to velocity layer
vel = params2lay(x)
misfit = 0.
count = 0
for i, dcurve in enumerate(self._dcurves):
# velocity + dcurve -> thomsen ->
th = ThomsonHaskell(vel, dcurve.wtype)
th.propagate(dcurve.faxis, ny = ny, domain = "fc", n_threads = n_threads)
if np.any([ np.isnan(sec) for sec in th._panel.ravel() ]):
return np.Inf
else:
dc_calc = th.pick([ dcurve.mode ])
# if the picking has non-trivial result
if dc_calc[0].npts > 0:
dc_obs = np.interp(dc_calc[0].faxis, dcurve.faxis, dcurve.phase_velocity)
misfit += np.sum(np.square(dc_obs - dc_calc[0].phase_velocity))
count += dcurve.npts
else:
misfit += np.Inf
break
if count != 0:
return np.sqrt(misfit / count)
else:
return np.Inf
# overriding function exists below params2lay(x)
def params2lay(self):
"""
Convert parameters to a layered velocity model usable by ThomsonHaskell
object.
Returns
-------
vel : ndarray
Layered velocity model. Each row defines the layer parameters in
the following order: P-wave velocity (m/s), S-wave velocity (m/s),
density (kg/m3) and thickness (m).
"""
return params2lay(self._model)
def params2vel(self, vtype = "s", nz = 100, zmax = None):
"""
Convert parameters to a continuous layered velocity model.
Parameters
----------
vtypes : {'s', 'p'}, default 's'
Velocity model type.
nz : int, default 100
Number of depth discretization points.
zmax : float or None, default None
Maximum depth.
Returns
-------
vel : ndarray
Layered velocity model. Each row defines the layer parameters in
the following order: P-wave velocity (m/s), S-wave velocity (m/s),
density (kg/m3) and thickness (m).
"""
return params2vel(self._model, vtype, nz, zmax)
def panel(self, wtype = "rayleigh", nf = 200,
th_kws = dict(ny = 200, domain = "fc", n_threads = 1)):
"""
Compute the Thomson-Haskell panel.
Parameters
----------
wtype : {'rayleigh', 'love'}, default 'rayleigh'
Surface wave type.
nf : int, default 200
Number of frequency samples.
th_kws : dict
Keyworded arguments passed to ThomsonHaskell propagate method.
Returns
-------
th : ThomsonHaskell
Dispersion curve panel.
"""
faxis = [ dcurve.faxis for dcurve in self._dcurves ]
faxis_full = np.unique(np.concatenate([ f for f in faxis ]))
faxis_new = np.linspace(faxis_full.min(), faxis_full.max(), nf)
vel = self.params2lay()
th = ThomsonHaskell(vel, wtype)
th.propagate(faxis_new, **th_kws)
return th
def pick(self, modes = [ 0 ], wtype = "rayleigh", nf = 200,
th_kws = dict(ny = 200, domain = "fc", n_threads = 1)):
"""
Parameters
----------
modes : list of int, default [ 0 ]
Modes number to pick (0 if fundamental).
wtype : {'rayleigh', 'love'}, default 'rayleigh'
Surface wave type.
nf : int, default 200
Number of frequency samples.
th_kws : dict
Keyworded arguments passed to ThomsonHaskell propagate method.
Returns
-------
picks : list of DispersionCurve
Picked dispersion curves.
"""
th = self.panel(wtype, nf, th_kws)
return th.pick(modes)
def save(self, filename):
"""
Pickle the dispersion curve to a file.
Parameters
----------
filename: str
Pickle filename.
"""
# just pickle dump
with open(filename, "wb") as f:
pickle.dump(self, f, protocol = pickle.HIGHEST_PROTOCOL)
@property
def model(self):
if hasattr(self, "_model"):
return self._model
else:
raise AttributeError("no inversion performed yet")
@property
def misfit(self):
if hasattr(self, "_misfit"):
return self._misfit
else:
raise AttributeError("no inversion performed yet")
@property
def models(self):
if hasattr(self, "_models"):
return self._models
else:
raise AttributeError("no inversion performed yet")
@property
def misfits(self):
if hasattr(self, "_misfits"):
return self._misfits
else:
raise AttributeError("no inversion performed yet")
@property
def energy(self):
energy = np.min(self.misfits, axis = 0)
return np.array([ np.min(energy[:i+1]) for i in range(len(energy)) ])
@property
def n_iter(self):
if hasattr(self, "_n_iter"):
return self._n_iter
else:
raise AttributeError("no inversion performed yet")
@property
def n_eval(self):
if hasattr(self, "_n_eval"):
return self._n_eval
else:
raise AttributeError("no inversion performed yet")
def _betanu2alpha(beta, nu):
return beta * np.sqrt( np.abs( ( 1.-nu ) / ( 0.5 - nu ) ) )
def _nafe_drake(alpha):
coeff = np.array([ 1.6612, -0.4712, 0.0671, -0.0043, 0.000106 ])
alpha_pow = np.array([ alpha*1e-3, (alpha* 1e-3)**2, (alpha*1e-3)**3,
(alpha*1e-3)**4, (alpha*1e-3)**5 ])
return np.dot(coeff, alpha_pow) * 1e3
def params2lay(x):
"""
Convert parameters to a layered velocity model usable by ThomsonHaskell
object.
Parameters
----------
x : ndarray
Array of parameters.
Returns
-------
vel : ndarray
Layered velocity model. Each row defines the layer parameters in
the following order: P-wave velocity (m/s), S-wave velocity (m/s),
density (kg/m3) and thickness (m).
"""
# How to read x
n_layers = len(x) // 3
beta = x[:n_layers]
alpha = beta * x[2*n_layers:]
rho = _nafe_drake(alpha)
d = x[n_layers:2*n_layers]
vel = np.concatenate((alpha[:,None], beta[:,None], rho[:,None], d[:,None]), axis = 1)
return vel
def params2vel(x, vtype = "s", nz = 100, zmax = None):
"""
Convert parameters to a continuous layered velocity model.
Parameters
----------
x : ndarray
Array of parameters.
vtypes : {'s', 'p'}, default 's'
Velocity model type.
nz : int, default 100
Number of depth discretization points.
zmax : float or None, default None
Maximum depth.
Returns
-------
vel : ndarray
Layered velocity model. Each row defines the layer parameters in
the following order: P-wave velocity (m/s), S-wave velocity (m/s),
density (kg/m3) and thickness (m).
"""
lay = params2lay(x)
zint = np.cumsum(lay[:,-1])
if zmax is None:
thick_min = lay[:,-1].min()
zmax = zint[-2] + thick_min
zint[-1] = zmax
dz = zmax / nz
az = dz * np.arange(nz)
if vtype == "s":
layz = np.stack((lay[:,1], zint)).transpose()
elif vtype == "p":
layz = np.stack((lay[:,0], zint)).transpose()
else:
raise ValueError("unknown velocity type '%s'" % vtype)
vel = l2vf.lay2vel1(layz, dz, nz)
return vel, az
``` |
{
"source": "jhgg/discord.py",
"score": 3
} |
#### File: discord.py/examples/background_task.py
```python
import discord
import asyncio
client = discord.Client()
async def my_background_task():
await client.wait_until_ready()
counter = 0
channel = discord.Object(id='channel_id_here')
while not client.is_closed:
counter += 1
await client.send_message(channel, counter)
await asyncio.sleep(60) # task runs every 60 seconds
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
loop = asyncio.get_event_loop()
try:
loop.create_task(my_background_task())
loop.run_until_complete(client.login('token'))
loop.run_until_complete(client.connect())
except Exception:
loop.run_until_complete(client.close())
finally:
loop.close()
``` |
{
"source": "jhgg/dissonance",
"score": 2
} |
#### File: dissonance/client/client.py
```python
from .api_client import APIClient
from .gateway_socket import GatewaySocket
from ..lib.event_emitter import EventEmitter
from ..stores import Stores, autodiscover
autodiscover()
class Client(object):
def __init__(self):
self._gateway_socket = None
self.config = {}
self.events = EventEmitter()
self.api_client = APIClient(self)
self.stores = Stores(self)
self.stores.dispatcher.link_events(self.events)
self.me = None
self.scheduled_greenlets = set()
def login(self, email, password):
self.api_client.login(email, password)
return self
def start(self):
gateway = self.api_client.discover_gateway()
self._gateway_socket = GatewaySocket(gateway, self)
self._gateway_socket.start()
return self
def stop(self):
if self._gateway_socket:
self._gateway_socket.kill()
self._gateway_socket = None
for greenlet in self.scheduled_greenlets:
greenlet.kill()
self.scheduled_greenlets.clear()
def join(self):
if self._gateway_socket:
self._gateway_socket.join()
def handle_packet(self, packet):
event = packet['t']
data = packet['d']
import json
print(json.dumps(data, sort_keys=True, indent=2))
self.stores.dispatch(event, data)
handler_name = 'handle_%s' % event.lower()
handler_fn = getattr(self, handler_name, None)
if handler_fn:
handler_fn(data)
def handle_ready(self, data):
print('ready', data.keys())
self.me = self.stores.users.with_id(data['user']['id'])
self.emit('ready', ready_data=data)
def handle_message_create(self, message):
message = self.stores.messages.with_id(message['id'])
self.emit('message-create', message=message)
def emit(self, event, **kwargs):
self.events.emit(event, client=self, **kwargs)
def call_later(self, callback, *args, **kwargs):
pass
def cancel_call_later(self, greenlet):
self.scheduled_greenlets.discard(greenlet)
greenlet.kill()
def send_message(self, channel, message):
"""
Convenience function to send a message to a channel.
"""
return self.api_client.create_message(channel.id, message)
```
#### File: dissonance/lib/functional.py
```python
import functools
from gevent.lock import RLock
def once(wrapped):
"""
Decorates a function that takes no arguments, ensuring it's only called once & that the result
is memoized. This function is greenlet safe.
"""
wrapped._once_called = False
wrapped._once_retval = None
lock = RLock()
@functools.wraps(wrapped)
def wrapper():
if wrapped._once_called:
return wrapped._once_retval
with lock:
if wrapped._once_called:
return wrapped._once_retval
wrapped._once_retval = wrapped()
wrapped._once_called = True
return wrapped._once_retval
return wrapper
```
#### File: dissonance/lib/store.py
```python
from .dispatcher import Dispatcher
def wait_for(*store_names):
def wrapper(wrapped):
wrapped._dependencies = getattr(wrapper, '_dependencies', set()) | set(store_names)
return wrapped
return wrapper
def handler(*events):
def wrapper(wrapped):
wrapped._events = getattr(wrapper, '_events', set()) | set(events)
return wrapped
return wrapper
class BaseStore(object):
dispatch_token = None
def __init__(self, stores):
self._stores = stores
self._dispatcher = stores.dispatcher
def discover_handlers(self):
for name in dir(self):
handler_fn = getattr(self, name)
events = getattr(handler_fn, '_events', None)
if events and callable(handler_fn):
for event in events:
self.add_handler(event, handler_fn)
def add_handler(self, event, handler):
self._dispatcher.on(self.dispatch_token, event, handler)
def initialize(self):
pass
class BaseStores(object):
_known_stores = {}
@classmethod
def register(cls, name):
def registry(store_cls):
cls._known_stores[name] = store_cls
return registry
def __init__(self, *args, **kwargs):
self.dispatcher = Dispatcher()
self.dispatch = self.dispatcher.dispatch
self.stores = []
for name, store_class in self._known_stores.items():
store = store_class(self, *args, **kwargs)
store.dispatch_token = name
setattr(self, name, store)
self.stores.append(store)
for store in self.stores:
store.discover_handlers()
store.initialize()
```
#### File: dissonance/models/message.py
```python
import random
class Message(object):
attachments = None
author_id = None
content = None
mentions = None
embeds = None
mention_everyone = None
timestamp = None
edited_timestamp = None
tokens = None
is_direct_message = False
def __init__(self, stores, id, channel_id):
self._stores = stores
self.id = int(id)
self.channel_id = int(channel_id)
@property
def channel(self):
return self._stores.channels.with_id(self.channel_id)
@property
def author(self):
return self._stores.users.with_id(self.author_id)
def update(self, message_data):
self.content = message_data.get('content', self.content)
self.tokens = self.content.split()
self.mentions = message_data.get('mentions', self.mentions)
self.mention_everyone = bool(message_data.get('mention_everyone', self.mention_everyone))
self.embeds = message_data.get('embeds', self.embeds)
self.edited_timestamp = message_data.get('edited_timestamp', self.edited_timestamp)
self.timestamp = message_data.get('timestamp', self.timestamp)
if 'author' in message_data:
self.author_id = int(message_data['author']['id'])
def __repr__(self):
return u'<Message author: %r, channel: %r, content: %r>' % (self.author, self.channel, self.content)
def reply_to_user(self, message):
if not self.is_direct_message:
message = '%s: %s' % (self.author.username, message)
return self.reply(message)
def reply(self, message):
return self._stores.client.send_message(self.channel, message)
def reply_random(self, choices):
return self.reply_to_user(random.choice(choices))
def reply_with_one_of(self, *choices):
return self.reply_random(choices)
```
#### File: dissonance/stores/guilds.py
```python
from . import register, ObjectHolder, handler, wait_for
from ..client import events
from ..models.guild import Guild
@register('guilds')
class GuildStore(ObjectHolder):
object_class = Guild
@handler(events.READY)
@wait_for('channels', 'users')
def handle_ready(self, ready_packet):
for guild in ready_packet['guilds']:
self.add(Guild.from_ready_packet(self._stores, **guild))
print('pg', guild)
def add(self, guild):
self._objects[guild.id] = guild
def __repr__(self):
return u'Guilds[%s]' % (', '.join(repr(g) for g in self._objects.values()))
```
#### File: dissonance/stores/typing.py
```python
from collections import defaultdict
from . import register, handler, Store
from ..client import events
@register('typing')
class TypingStore(Store):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.typing_users_by_channel = defaultdict(set)
@handler(events.READY)
def handle_ready(self, ready_packet):
self.typing_users_by_channel = defaultdict(set)
@handler(events.TYPING_START)
def handle_typing_start(self, typing_event):
self.client.call_later(None)
pass
@handler(events.TYPING_STOP)
def handle_typing_stop(self, typing_event):
pass
@handler(events.MESSAGE_CREATE)
def handle_message_create(self, message):
pass
def get_typing_users(self, channel_id):
if channel_id in self.typing_users_by_channel:
return self.typing_users_by_channel[channel_id]
return set()
def is_user_typing(self, channel_id, user_id):
return user_id in self.get_typing_users(channel_id)
```
#### File: doot/modules/dinner.py
```python
from lxml.html import fromstring
import requests
from werkzeug.utils import escape
import module
@module.respond('what should i eat')
@module.async(timeout=5)
def dinner(message):
response = requests.get('http://whatthefuckshouldimakefordinner.com/')
if response.status_code == requests.codes.ok:
e = fromstring(response.text)
dl = e.cssselect('dl')
a, b = [(t.text_content()).strip() for t in dl[:2]]
link = dl[1].xpath('dt/a')[0].attrib['href']
message.reply_to_user(
'%s %s (%s).' % (a, b, link)
)
```
#### File: jhgg/dissonance/setup.py
```python
import os
import dissonance
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="dissonance",
version=dissonance.version.split('-')[0] + 'b0',
author="<NAME>",
author_email="<EMAIL>",
description="Discord python client & bot framework.",
license="MIT",
keywords="chat discord bot irc jeev",
url="https://github.com/jhgg/dissonance",
packages=find_packages(exclude=['modules']),
install_requires=[
'certifi==14.5.14',
'coloredlogs==1.0.1',
'Flask==0.10.1',
'gevent==1.1rc5',
'greenlet==0.4.9',
'requests==2.9.1',
'six==1.10.0',
'websocket-client==0.35.0',
'wheel==0.24.0',
],
include_package_data=True,
zip_safe=False,
scripts=['bin/dissonance'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Communications :: Chat",
"Topic :: Utilities",
"Framework :: Flask",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only",
"License :: OSI Approved :: MIT License",
],
)
``` |
{
"source": "jhgg/epoxy",
"score": 2
} |
#### File: epoxy/bases/class_type_creator.py
```python
from ..utils.thunk import ResolveThunk, ThunkList
class ClassTypeCreator(object):
def __init__(self, registry, class_type_creator):
self._registry = registry
self._class_type_creator = class_type_creator
def __getattr__(self, item):
return self[item]
def __getitem__(self, item):
if isinstance(item, tuple):
type_thunk = ThunkList([ResolveThunk(self._registry._resolve_type, i) for i in item])
else:
type_thunk = ThunkList([ResolveThunk(self._registry._resolve_type, item)])
return self._class_type_creator(type_thunk)
```
#### File: epoxy/metaclasses/interface.py
```python
from collections import OrderedDict
from functools import partial
from graphql.core.type.definition import GraphQLInterfaceType
from ..utils.get_declared_fields import get_declared_fields
from ..utils.make_default_resolver import make_default_resolver
from ..utils.weak_ref_holder import WeakRefHolder
from ..utils.yank_potential_fields import yank_potential_fields
class InterfaceMeta(type):
def __new__(mcs, name, bases, attrs):
if attrs.pop('abstract', False):
return super(InterfaceMeta, mcs).__new__(mcs, name, bases, attrs)
class_ref = WeakRefHolder()
declared_fields = get_declared_fields(name, yank_potential_fields(attrs, bases))
interface = GraphQLInterfaceType(
name,
fields=partial(mcs._build_field_map, class_ref, declared_fields),
description=attrs.get('__doc__'),
)
mcs._register(interface, declared_fields)
cls = super(InterfaceMeta, mcs).__new__(mcs, name, bases, attrs)
cls.T = interface
cls._registry = mcs._get_registry()
class_ref.set(cls)
return cls
@staticmethod
def _register(object_type, declared_fields):
raise NotImplementedError('_register must be implemented in the sub-metaclass')
@staticmethod
def _get_registry():
raise NotImplementedError('_get_registry must be implemented in the sub-metaclass')
@staticmethod
def _build_field_map(class_ref, fields):
cls = class_ref.get()
if not cls:
return
instance = cls()
registry = cls._registry
field_map = OrderedDict()
for field_attr_name, field in fields:
interface_resolve_fn = (
field.resolver or
getattr(instance, 'resolve_{}'.format(field_attr_name), None)
)
if interface_resolve_fn:
field._interface_resolver = interface_resolve_fn
resolve_fn = interface_resolve_fn or make_default_resolver(field_attr_name)
field_map[field.name] = field.to_field(registry, resolve_fn)
return field_map
```
#### File: epoxy/metaclasses/mutation.py
```python
import functools
from graphql.core.type import GraphQLField, GraphQLNonNull
from graphql.core.type.definition import GraphQLArgument
class MutationMeta(type):
def __new__(mcs, name, bases, attrs):
if attrs.pop('abstract', False):
return super(MutationMeta, mcs).__new__(mcs, name, bases, attrs)
registry = mcs._get_registry()
input = attrs.pop('Input')
output = attrs.pop('Output')
assert input and not hasattr(input, 'T'), 'A mutation must define a class named "Input" inside of it that ' \
'does not subclass an R.InputType'
assert output and not hasattr(output, 'T'), 'A mutation must define a class named "Output" inside of it that ' \
'does not subclass an R.ObjectType'
input_attrs = mcs._process_input_attrs(registry, dict(vars(input)))
output_attrs = mcs._process_output_attrs(registry, dict(vars(output)))
Input = type(name + 'Input', (registry.InputType,), input_attrs)
Output = type(name + 'Payload', (registry.ObjectType,), output_attrs)
attrs['Input'] = Input
attrs['Output'] = Output
cls = super(MutationMeta, mcs).__new__(mcs, name, bases, attrs)
cls._registry = registry
instance = cls()
resolver = getattr(instance, 'execute')
assert resolver and callable(resolver), 'A mutation must define a function named "execute" that will execute ' \
'the mutation.'
mutation_name = name[0].lower() + name[1:]
mcs._register(mutation_name, registry.with_resolved_types(lambda R: GraphQLField(
type=R[Output],
args={
'input': GraphQLArgument(GraphQLNonNull(R[Input]))
},
resolver=functools.partial(mcs._process_resolver, resolver, Input),
description=attrs.get('__doc__', None)
)))
@staticmethod
def _register(mutation_name, mutation):
raise NotImplementedError('_register must be implemented in the sub-metaclass')
@staticmethod
def _get_registry():
raise NotImplementedError('_get_registry must be implemented in the sub-metaclass')
@staticmethod
def _process_input_attrs(registry, input_attrs):
return input_attrs
@staticmethod
def _process_output_attrs(registry, output_attrs):
return output_attrs
@staticmethod
def _process_resolver(resolver, input_class, obj, args, info):
return resolver(obj, input_class(args.get('input')), info)
```
#### File: epoxy/epoxy/registry.py
```python
from collections import OrderedDict, defaultdict
from enum import Enum
from functools import partial
from operator import itemgetter
from graphql.core.type import (
GraphQLBoolean,
GraphQLEnumType,
GraphQLFloat,
GraphQLID,
GraphQLInputObjectType,
GraphQLInt,
GraphQLInterfaceType,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
GraphQLString,
GraphQLUnionType
)
from graphql.core.type.definition import GraphQLType, get_named_type
import six
from .bases.class_type_creator import ClassTypeCreator
from .bases.input_type import InputTypeBase
from .bases.mutation import MutationBase
from .bases.object_type import ObjectTypeBase
from .bases.scalar import ScalarBase
from .metaclasses.input_type import InputTypeMeta
from .metaclasses.interface import InterfaceMeta
from .metaclasses.mutation import MutationMeta
from .metaclasses.object_type import ObjectTypeMeta
from .metaclasses.scalar import ScalarMeta
from .metaclasses.union import UnionMeta
from .types.argument import Argument
from .types.field import Field, InputField
from .utils.enum_to_graphql_enum import enum_to_graphql_enum
from .utils.maybe_t import maybe_t
from .utils.method_dispatch import method_dispatch
from .utils.thunk import AttributeTypeThunk, IdentityTypeThunk, RootTypeThunk, ThunkList, TransformThunkList
builtin_scalars = [
GraphQLBoolean,
GraphQLFloat,
GraphQLID,
GraphQLInt,
GraphQLString
]
class TypeRegistry(object):
_reserved_names = frozenset([
# Types
'ObjectType', 'InputType', 'Union' 'Interface', 'Implements', 'Scalar',
# Functions
'Schema', 'Register', 'Mixin',
# Mutations
'Mutation', 'Mutations',
# Fields
'Field', 'InputField', 'Argument'
])
Field = Field
InputField = InputField
Argument = Argument
def __init__(self):
self._registered_types = OrderedDict()
self._added_impl_types = set()
self._interface_declared_fields = {}
self._registered_types_can_be = defaultdict(set)
self._pending_types_can_be = defaultdict(set)
self._proxy = ResolvedRegistryProxy(self)
self._mutations = OrderedDict()
self.ObjectType = self._create_object_type_class()
self.InputType = self._create_input_type_class()
self.Implements = ClassTypeCreator(self, self._create_object_type_class)
self.Union = ClassTypeCreator(self, self._create_union_type_class)
self.Interface = self._create_interface_type_class()
self.Mutation = self._create_mutation_type_class()
self.Scalar = self._create_scalar_type_class()
for type in builtin_scalars:
self.Register(type)
@method_dispatch
def Register(self, t):
# Can't use dispatch, as it's not an instance, but a subclass of.
if issubclass(t, Enum):
self.Register(enum_to_graphql_enum(t))
return t
raise NotImplementedError('Unable to register {}.'.format(t))
@Register.register(GraphQLObjectType)
@Register.register(GraphQLUnionType)
@Register.register(GraphQLEnumType)
@Register.register(GraphQLInterfaceType)
@Register.register(GraphQLInputObjectType)
@Register.register(GraphQLScalarType)
def register_(self, t):
if t.name in self._registered_types and self._registered_types[t.name] is t:
return t
assert not t.name.startswith('_'), \
'Registered type name cannot start with an "_".'
assert t.name not in self._reserved_names, \
'You cannot register a type named "{}".'.format(t.name)
assert t.name not in self._registered_types, \
'There is already a registered type named "{}".'.format(t.name)
self._registered_types[t.name] = t
return t
def _resolve_type(self, item):
if item is None:
return None
if not isinstance(item, str):
item = maybe_t(item)
assert isinstance(item, GraphQLType), \
'Attempted to resolve an item "{}" that is not a GraphQLType'.format(item)
named_type = get_named_type(item)
known_type = self._registered_types.get(named_type.name)
# Happens when we attempt to resolve an un-registered type.
assert known_type and known_type not in self._reserved_names, \
'Attempted to resolve a type "{}" that is not registered with this Registry.'.format(item)
# Happens when we attempt to resolve a type that is already registered, but isn't the same type.
assert known_type is named_type, \
'Attempted to resolve a type "{}" that does not match the already registered type.'.format(item)
return item
value = self._registered_types.get(item)
assert value, 'Type "{}" was requested, but was not registered.'.format(item)
return value
def __getattr__(self, item):
if item.startswith('_'):
raise AttributeError(item)
return RootTypeThunk(self, self._resolve_type, item)
def __getitem__(self, item):
if isinstance(item, tuple):
return ThunkList([AttributeTypeThunk(self._resolve_type, i) for i in item])
return RootTypeThunk(self, self._resolve_type, item)
def __call__(self, t):
return self.Register(t)
def _create_object_type_class(self, interface_thunk=None):
registry = self
class RegistryObjectTypeMeta(ObjectTypeMeta):
@staticmethod
def _register(object_type, type_class):
registry.Register(object_type)
registry._registered_types_can_be[object_type].add(type_class)
@staticmethod
def _get_registry():
return registry
@staticmethod
def _get_interfaces():
if interface_thunk is not None:
return TransformThunkList(interface_thunk, get_named_type)
return None
@six.add_metaclass(RegistryObjectTypeMeta)
class ObjectType(ObjectTypeBase):
abstract = True
return ObjectType
def _create_interface_type_class(self):
registry = self
class RegistryInterfaceMeta(InterfaceMeta):
@staticmethod
def _register(interface, declared_fields):
registry.Register(interface)
registry._add_interface_declared_fields(interface, declared_fields)
@staticmethod
def _get_registry():
return registry
class Interface(six.with_metaclass(RegistryInterfaceMeta)):
abstract = True
return Interface
def _create_union_type_class(self, types_thunk):
registry = self
class RegistryUnionMeta(UnionMeta):
@staticmethod
def _register(union):
registry.Register(union)
@staticmethod
def _get_registry():
return registry
@staticmethod
def _get_types():
return TransformThunkList(types_thunk, get_named_type)
class Union(six.with_metaclass(RegistryUnionMeta)):
abstract = True
return Union
def _create_input_type_class(self):
registry = self
class RegistryInputTypeMeta(InputTypeMeta):
@staticmethod
def _register(input_type):
registry.Register(input_type)
@staticmethod
def _get_registry():
return registry
@six.add_metaclass(RegistryInputTypeMeta)
class InputType(InputTypeBase):
abstract = True
return InputType
def _create_scalar_type_class(self):
registry = self
class RegistryScalarMeta(ScalarMeta):
@staticmethod
def _register(scalar):
registry.Register(scalar)
@staticmethod
def _get_registry():
return registry
@six.add_metaclass(RegistryScalarMeta)
class Scalar(ScalarBase):
abstract = True
return Scalar
def _create_mutation_type_class(self):
registry = self
class RegistryMutationMeta(MutationMeta):
@staticmethod
def _register(mutation_name, mutation):
registry._register_mutation(mutation_name, mutation)
@staticmethod
def _get_registry():
return registry
@six.add_metaclass(RegistryMutationMeta)
class Mutation(MutationBase):
abstract = True
return Mutation
def _register_mutation(self, mutation_name, mutation):
assert mutation_name not in self._mutations, \
'There is already a registered mutation named "{}".'.format(mutation_name)
self._mutations[mutation_name] = mutation
@property
def Mutations(self):
if not self._mutations:
raise TypeError("No mutations have been registered.")
existing_mutation_type = self._registered_types.get('Mutations')
if existing_mutation_type:
return IdentityTypeThunk(existing_mutation_type)
mutations = GraphQLObjectType(
name='Mutations',
fields=lambda: OrderedDict([(k, v()) for k, v in sorted(self._mutations.items(), key=itemgetter(0))])
)
self._registered_types[mutations.name] = mutations
return IdentityTypeThunk(mutations)
def _create_is_type_of(self, type):
return partial(self._is_type_of, type)
def _is_type_of(self, type, obj, info):
return obj.__class__ in self._registered_types_can_be[type]
def _add_interface_declared_fields(self, interface, attrs):
self._interface_declared_fields[interface] = attrs
def _get_interface_declared_fields(self, interface):
return self._interface_declared_fields.get(interface, {})
def _register_possible_type_for(self, type_name, klass):
type = self._registered_types.get(type_name)
if type:
self._registered_types_can_be[type].add(klass)
else:
self._pending_types_can_be[type_name].add(klass)
def _add_impl_to_interfaces(self):
for type in self._registered_types.values():
if not isinstance(type, GraphQLObjectType):
continue
if type.name in self._pending_types_can_be:
self._registered_types_can_be[type] |= self._pending_types_can_be.pop(type.name)
if type in self._added_impl_types:
continue
self._added_impl_types.add(type)
for interface in type.get_interfaces():
if type in interface._impls:
continue
interface._impls.append(type)
def Schema(self, query, mutation=None, subscription=None):
query = self[query]()
mutation = self[mutation]()
subscription = self[subscription]()
self._add_impl_to_interfaces()
return GraphQLSchema(query=query, mutation=mutation, subscription=subscription)
def Mixin(self, mixin_cls, *args, **kwargs):
mixin = mixin_cls(self, *args, **kwargs)
mixin.register_types()
return mixin
def type(self, name):
return self[name]()
def types(self, *names):
return self[names]
def with_resolved_types(self, thunk):
assert callable(thunk)
return partial(thunk, self._proxy)
class ResolvedRegistryProxy(object):
def __init__(self, registry):
self._registry = registry
def __getitem__(self, item):
return self._registry[item]()
def __getattr__(self, item):
if item.startswith('_'):
raise AttributeError(item)
return self._registry[item]()
__all__ = ['TypeRegistry']
```
#### File: epoxy/utils/first_of.py
```python
from .maybe_callable import maybe_callable
def first_of(*args):
for arg in args:
arg = maybe_callable(arg)
if arg:
return arg
```
#### File: epoxy/utils/yank_potential_fields.py
```python
from ..types.field import Field
from .thunk import TypeThunk
def yank_potential_fields(attrs, bases, field_class=Field):
field_attrs = {}
potential_types = (field_class, TypeThunk)
for klass in reversed(bases):
for field_attr_name, obj in klass.__dict__.items():
if field_attr_name == 'T':
continue
if isinstance(obj, potential_types):
field_attrs[field_attr_name] = obj
for field_attr_name, obj in list(attrs.items()):
if field_attr_name == 'T':
continue
if isinstance(obj, potential_types):
field_attrs[field_attr_name] = attrs.pop(field_attr_name)
return field_attrs
```
#### File: epoxy/tests/test_arguments.py
```python
from collections import OrderedDict
from pytest import raises
from graphql.core import graphql
from graphql.core.type import GraphQLString, GraphQLInt, GraphQLID, GraphQLNonNull
from epoxy.registry import TypeRegistry
from epoxy.types.argument import Argument
make_args = lambda R: {
'a': R.Int,
'b_cool': R.String,
'c': R.ID.NonNull,
'd': Argument(R.String, default_value="hello world"),
'z': R.String(default_value="hello again", description="This is a description"),
'x': R.Int(default_value=7),
'y': Argument(GraphQLString),
'q': R.TestInputType,
'w': Argument(R.TestInputType)
}
make_ordered_dict_args = lambda R: OrderedDict([
('a', R.Int),
('b_cool', R.String),
('c', R.ID.NonNull),
('d', Argument(R.String, default_value="hello world")),
('z', R.String(default_value="hello again", description="This is a description")),
('x', R.Int(default_value=7)),
('y', Argument(GraphQLString)),
('q', R.TestInputType),
('w', Argument(R.TestInputType)),
])
def check_args(test_input_type, args):
expected_keys = ['a', 'bCool', 'c', 'd', 'z', 'x', 'y', 'q', 'w']
keys = [a.name for a in args]
assert keys == expected_keys
a, b, c, d, z, x, y, q, w = args
assert a.type is GraphQLInt
assert b.type is GraphQLString
assert isinstance(c.type, GraphQLNonNull)
assert c.type.of_type is GraphQLID
assert d.type is GraphQLString
assert d.default_value == "hello world"
assert z.type is GraphQLString
assert z.default_value == "hello again"
assert z.description == "This is a description"
assert x.type is GraphQLInt
assert x.default_value == 7
assert y.type is GraphQLString
assert q.type is test_input_type
assert w.type is test_input_type
def test_args_will_magically_order():
R = TypeRegistry()
class TestInputType(R.InputType):
a = R.Int
b = R.Int
class Query(R.ObjectType):
int = R.Int(
args=make_args(R)
)
int_from_field = R.Field(R.Int, args=make_args(R))
query_type = R.Query()
check_args(TestInputType.T, query_type.get_fields()['int'].args)
check_args(TestInputType.T, query_type.get_fields()['intFromField'].args)
def test_args_can_also_use_ordered_dict():
R = TypeRegistry()
class TestInputType(R.InputType):
a = R.Int
b = R.Int
class Query(R.ObjectType):
int = R.Int(
args=make_ordered_dict_args(R)
)
int_from_field = R.Field(R.Int, args=make_ordered_dict_args(R))
query_type = R.Query()
check_args(TestInputType.T, query_type.get_fields()['int'].args)
check_args(TestInputType.T, query_type.get_fields()['intFromField'].args)
def test_resolved_args_will_be_translated_to_original_casing():
R = TypeRegistry()
class Query(R.ObjectType):
argument_keys = R.String.List(args={
'foo': R.String,
'foo_bar': R.String
})
def resolve_argument_keys(self, obj, args, info):
return list(sorted(args.keys()))
Schema = R.Schema(R.Query)
result = graphql(Schema, '''
{
argumentKeys(foo: "Hello", fooBar: "World")
}
''')
assert not result.errors
assert result.data == {
'argumentKeys': ['foo', 'foo_bar']
}
def test_will_recognize_casing_conversion_conflicts():
R = TypeRegistry()
class Query(R.ObjectType):
argument_keys = R.String.List(args={
'foo_bar': R.String,
'fooBar': R.String
})
def resolve_argument_keys(self, obj, args, info):
return list(sorted(args.keys()))
with raises(ValueError) as excinfo:
Schema = R.Schema(R.Query)
assert str(excinfo.value) in (
'Argument foo_bar already exists as fooBar',
'Argument fooBar already exists as foo_bar',
)
```
#### File: epoxy/tests/test_declarative_definition.py
```python
from graphql.core.type.definition import GraphQLObjectType, GraphQLNonNull, GraphQLList, GraphQLField
from graphql.core.type.scalars import GraphQLString
from epoxy.registry import TypeRegistry
from pytest import raises
def check_dog(R, Dog):
assert isinstance(Dog.T, GraphQLObjectType)
assert R.type('Dog') is Dog.T
fields = Dog.T.get_fields()
assert list(fields.keys()) == ['name']
assert fields['name'].type == GraphQLString
assert fields['name'].name == 'name'
def test_register_single_type():
R = TypeRegistry()
class Dog(R.ObjectType):
name = R.Field(R.String)
check_dog(R, Dog)
def test_register_single_type_using_string():
R = TypeRegistry()
class Dog(R.ObjectType):
name = R.Field('String')
check_dog(R, Dog)
def test_register_type_can_declare_builtin_scalar_types_directly():
R = TypeRegistry()
class Dog(R.ObjectType):
name = R.String
check_dog(R, Dog)
def test_register_type_can_use_builtin_graphql_types_in_field():
R = TypeRegistry()
class Dog(R.ObjectType):
name = R.Field(GraphQLString)
check_dog(R, Dog)
def test_can_use_mixins():
R = TypeRegistry()
class DogMixin():
name = R.String
class Dog(R.ObjectType, DogMixin):
pass
check_dog(R, Dog)
def test_register_type_can_declare_builtin_scalar_type_as_non_null():
R = TypeRegistry()
class Dog(R.ObjectType):
name = R.String.NonNull
fields = Dog.T.get_fields()
assert list(fields.keys()) == ['name']
assert str(fields['name'].type) == 'String!'
def test_register_type_can_declare_other_registered_types_directly():
R = TypeRegistry()
class Dog(R.ObjectType):
friend = R.Dog
fields = Dog.T.get_fields()
assert list(fields.keys()) == ['friend']
assert fields['friend'].type == Dog.T
assert fields['friend'].name == 'friend'
def test_register_type_can_declare_other_registered_types_directly_as_non_null():
R = TypeRegistry()
class Dog(R.ObjectType):
friend = R.Dog.NonNull
fields = Dog.T.get_fields()
assert list(fields.keys()) == ['friend']
type = fields['friend'].type
assert isinstance(type, GraphQLNonNull)
assert type.of_type == Dog.T
assert fields['friend'].name == 'friend'
assert str(type) == 'Dog!'
def test_register_type_can_declare_other_registered_types_directly_as_list():
R = TypeRegistry()
class Dog(R.ObjectType):
friend = R.Dog.List
fields = Dog.T.get_fields()
assert list(fields.keys()) == ['friend']
type = fields['friend'].type
assert isinstance(type, GraphQLList)
assert type.of_type == Dog.T
assert fields['friend'].name == 'friend'
assert str(type) == '[Dog]'
def test_register_type_can_declare_other_registered_types_directly_as_list_of_non_null():
R = TypeRegistry()
class Dog(R.ObjectType):
friend = R.Dog.NonNull.List
fields = Dog.T.get_fields()
assert list(fields.keys()) == ['friend']
assert fields['friend'].name == 'friend'
type = fields['friend'].type
assert str(type) == '[Dog!]'
assert isinstance(type, GraphQLList)
type = type.of_type
assert isinstance(type, GraphQLNonNull)
assert type.of_type == Dog.T
def test_register_type_can_declare_other_registered_types_directly_as_non_null_list_of_non_null():
R = TypeRegistry()
class Dog(R.ObjectType):
friend = R.Dog.NonNull.List.NonNull
fields = Dog.T.get_fields()
assert list(fields.keys()) == ['friend']
assert fields['friend'].name == 'friend'
type = fields['friend'].type
assert str(type) == '[Dog!]!'
assert isinstance(type, GraphQLNonNull)
type = type.of_type
assert isinstance(type, GraphQLList)
type = type.of_type
assert isinstance(type, GraphQLNonNull)
assert type.of_type == Dog.T
def test_rejects_object_type_definition_with_duplicated_field_names():
R = TypeRegistry()
with raises(AssertionError) as excinfo:
class Dog(R.ObjectType):
friend = R.Dog.NonNull
friend_aliased = R.Field(R.Dog, name='friend')
assert str(excinfo.value) == 'Duplicate field definition for name "friend" in type "Dog.friend_aliased".'
def test_rejects_interface_type_definition_with_duplicated_field_names():
R = TypeRegistry()
with raises(AssertionError) as excinfo:
class Dog(R.Interface):
friend = R.Dog.NonNull
friend_aliased = R.Field(R.Dog, name='friend')
assert str(excinfo.value) == 'Duplicate field definition for name "friend" in type "Dog.friend_aliased".'
def test_orders_fields_in_order_declared():
R = TypeRegistry()
class Dog(R.ObjectType):
id = R.ID
name = R.Field('String')
dog = R.Dog
some_other_field = R.Field(R.Int)
some_other_dog = R.Field('Dog')
foo = R.String
bar = R.String
aaa = R.String
field_order = list(Dog.T.get_fields().keys())
assert field_order == ['id', 'name', 'dog', 'someOtherField', 'someOtherDog', 'foo', 'bar', 'aaa']
def test_cannot_resolve_unregistered_type():
R = TypeRegistry()
Dog = GraphQLObjectType(
name='Dog',
fields={
'a': GraphQLField(GraphQLString)
}
)
with raises(AssertionError) as excinfo:
R[Dog]()
assert str(excinfo.value) == 'Attempted to resolve a type "Dog" that is not registered with this Registry.'
R(Dog)
assert R[Dog]() is Dog
def test_cannot_resolve_type_of_same_name_that_is_mismatched():
R = TypeRegistry()
class Dog(R.ObjectType):
a = R.String
SomeOtherDog = GraphQLObjectType(
name='Dog',
fields={
'a': GraphQLField(GraphQLString)
}
)
with raises(AssertionError) as excinfo:
R[SomeOtherDog]()
assert str(excinfo.value) == 'Attempted to resolve a type "Dog" that does not match the already registered type.'
```
#### File: epoxy/tests/test_interfaces.py
```python
from graphql.core import graphql
from epoxy.registry import TypeRegistry
def test_register_interface():
R = TypeRegistry()
class Character(R.Interface):
id = R.ID
name = R.String
friends_with = R.Character.List
lives_remaining = R.Field(R.Int)
character = Character.T
fields = character.get_fields()
assert list(fields.keys()) == ['id', 'name', 'friendsWith', 'livesRemaining']
def test_register_single_type():
R = TypeRegistry()
class Character(R.Interface):
id = R.ID
name = R.String
friends_with = R.Character.List
lives_remaining = R.Field(R.Int)
class Human(R.Implements.Character):
home_planet = R.String
human = Human.T
fields = human.get_fields()
assert list(fields.keys()) == ['id', 'name', 'friendsWith', 'livesRemaining', 'homePlanet']
def test_implements_multiple_interfaces_via_r():
R = TypeRegistry()
class Character(R.Interface):
id = R.ID
name = R.String
friends_with = R.Character.List
lives_remaining = R.Field(R.Int)
class Bean(R.Interface):
real = R.Boolean
class Human(R.Implements[R.Character, R.Bean]):
home_planet = R.String
human = Human.T
fields = human.get_fields()
assert list(fields.keys()) == ['id', 'name', 'friendsWith', 'livesRemaining', 'real', 'homePlanet']
def test_implements_multiple_interfaces_via_class():
R = TypeRegistry()
class Character(R.Interface):
id = R.ID
name = R.String
friends_with = R.Character.List
lives_remaining = R.Field(R.Int)
class Bean(R.Interface):
real = R.Boolean
class Human(R.Implements[Character, Bean]):
home_planet = R.String
human = Human.T
fields = human.get_fields()
assert list(fields.keys()) == ['id', 'name', 'friendsWith', 'livesRemaining', 'real', 'homePlanet']
def test_implements_multiple_interfaces_via_string():
R = TypeRegistry()
class Character(R.Interface):
id = R.ID
name = R.String
friends_with = R.Character.List
lives_remaining = R.Field(R.Int)
class Bean(R.Interface):
real = R.Boolean
class Human(R.Implements['Character', 'Bean']):
home_planet = R.String
human = Human.T
fields = human.get_fields()
assert list(fields.keys()) == ['id', 'name', 'friendsWith', 'livesRemaining', 'real', 'homePlanet']
def test_is_sensitive_to_implementation_order():
R = TypeRegistry()
class Character(R.Interface):
id = R.ID
name = R.String
friends_with = R.Character.List
lives_remaining = R.Field(R.Int)
class Bean(R.Interface):
real = R.Boolean
hero = R.Boolean
class Human(R.Implements[R.Bean, R.Character]):
home_planet = R.String
human = Human.T
fields = human.get_fields()
assert list(fields.keys()) == ['real', 'hero', 'id', 'name', 'friendsWith', 'livesRemaining', 'homePlanet']
def test_definition_order_wont_affect_field_order():
R = TypeRegistry()
class Bean(R.Interface):
real = R.Boolean
hero = R.Boolean
class Character(R.Interface):
id = R.ID
name = R.String
friends_with = R.Character.List
lives_remaining = R.Field(R.Int)
class Human(R.Implements[R.Character, Bean]):
home_planet = R.String
human = Human.T
fields = human.get_fields()
assert list(fields.keys()) == ['id', 'name', 'friendsWith', 'livesRemaining', 'real', 'hero', 'homePlanet']
def test_runtime_type_resolution():
R = TypeRegistry()
class Pet(R.Interface):
name = R.String
class Dog(R.Implements.Pet):
bark = R.String
class Cat(R.Implements.Pet):
meow = R.String
class Query(R.ObjectType):
pets = R.Pet.List
schema = R.Schema(Query)
data = Query(pets=[
Dog(name='Clifford', bark='Really big bark, because it\'s a really big dog.'),
Cat(name='Garfield', meow='Lasagna')
])
result = graphql(schema, '''
{
pets {
name
__typename
... on Dog {
bark
}
... on Cat {
meow
}
}
}
''', data)
assert not result.errors
assert result.data == {
'pets': [{'__typename': 'Dog', 'bark': "Really big bark, because it's a really big dog.", 'name': 'Clifford'},
{'__typename': 'Cat', 'meow': 'Lasagna', 'name': 'Garfield'}]
}
```
#### File: epoxy/tests/test_registration.py
```python
from graphql.core.type.definition import GraphQLObjectType, GraphQLField
from graphql.core.type.scalars import GraphQLString
from pytest import raises
from epoxy import TypeRegistry
def test_will_disallow_duplicate_type_names_from_being_registered():
type = GraphQLObjectType(name='Query', fields={
'a': GraphQLField(GraphQLString)
})
type_duplicated = GraphQLObjectType(name='Query', fields={
'a': GraphQLField(GraphQLString)
})
R = TypeRegistry()
R(type)
with raises(AssertionError) as excinfo:
R(type_duplicated)
assert str(excinfo.value) == 'There is already a registered type named "Query".'
def test_will_allow_the_same_type_to_be_registered_more_than_once():
type = GraphQLObjectType(name='Query', fields={
'a': GraphQLField(GraphQLString)
})
R = TypeRegistry()
assert R(type)
assert R(type)
def test_cannot_register_type_starting_with_an_underscore():
type = GraphQLObjectType(name='_Query', fields={
'a': GraphQLField(GraphQLString)
})
R = TypeRegistry()
with raises(AssertionError) as excinfo:
R(type)
assert str(excinfo.value) == 'Registered type name cannot start with an "_".'
def test_cannot_register_type_thats_using_reserved_name():
R = TypeRegistry()
for name in TypeRegistry._reserved_names:
type = GraphQLObjectType(name=name, fields={
'a': GraphQLField(GraphQLString)
})
with raises(AssertionError) as excinfo:
R(type)
assert str(excinfo.value) == 'You cannot register a type named "{}".'.format(name)
``` |
{
"source": "jhgg/graphene",
"score": 2
} |
#### File: contrib/django/views.py
```python
import json
from django.http import JsonResponse
from django.views.generic import View
from django.conf import settings
from graphql.core.error import GraphQLError, format_error
def form_error(error):
if isinstance(error, GraphQLError):
return format_error(error)
return error
class GraphQLView(View):
schema = None
@staticmethod
def format_result(result):
data = {'data': result.data}
if result.errors:
data['errors'] = list(map(form_error, result.errors))
return data
def response_errors(self, *errors):
return JsonResponse({
"errors": [{
"message": str(e)
} for e in errors]
})
def execute_query(self, request, query):
if not query:
return self.response_errors(Exception("Must provide query string."))
else:
try:
result = self.schema.execute(query, root=object())
data = self.format_result(result)
except Exception as e:
if settings.DEBUG:
raise e
return self.response_errors(e)
return JsonResponse(data)
def get(self, request, *args, **kwargs):
query = request.GET.get('query')
return self.execute_query(request, query or '')
@staticmethod
def get_content_type(request):
meta = request.META
return meta.get('CONTENT_TYPE', meta.get('HTTP_CONTENT_TYPE', ''))
def post(self, request, *args, **kwargs):
content_type = self.get_content_type(request)
if content_type == 'application/json':
try:
received_json_data = json.loads(request.body.decode())
query = received_json_data.get('query')
except ValueError:
return self.response_errors(ValueError("Malformed json body in the post data"))
else:
query = request.POST.get('query') or request.GET.get('query')
return self.execute_query(request, query or '')
```
#### File: graphene/graphene/decorators.py
```python
from functools import wraps
def resolve_only_args(func):
@wraps(func)
def inner(self, args, info):
return func(self, **args)
return inner
```
#### File: graphene/relay/fields.py
```python
from collections import Iterable, OrderedDict
from graphql_relay.connection.arrayconnection import (
connection_from_list
)
from graphql_relay.connection.connection import (
connectionArgs
)
from graphql_relay.node.node import (
global_id_field,
from_global_id
)
from graphene.core.fields import Field, LazyNativeField, LazyField
from graphene.utils import cached_property
from graphene.utils import memoize
class ConnectionField(Field):
def __init__(self, field_type, resolve=None, description=''):
super(ConnectionField, self).__init__(field_type, resolve=resolve,
args=connectionArgs, description=description)
def wrap_resolved(self, value, instance, args, info):
return value
def resolve(self, instance, args, info):
resolved = super(ConnectionField, self).resolve(instance, args, info)
if resolved:
resolved = self.wrap_resolved(resolved, instance, args, info)
assert isinstance(
resolved, Iterable), 'Resolved value from the connection field have to be iterable'
return connection_from_list(resolved, args)
@memoize
def internal_type(self, schema):
from graphene.relay.types import BaseNode
object_type = self.get_object_type(schema)
assert issubclass(
object_type, BaseNode), 'Only nodes have connections.'
return object_type.get_connection(schema)
class NodeField(LazyNativeField):
def __init__(self, object_type=None, *args, **kwargs):
super(NodeField, self).__init__(*args, **kwargs)
self.field_object_type = object_type
def get_field(self, schema):
if self.field_object_type:
field = NodeTypeField(self.field_object_type)
field.contribute_to_class(self.object_type, self.field_name)
return field.internal_field(schema)
from graphene.relay.types import BaseNode
return BaseNode.get_definitions(schema).node_field
class NodeTypeField(LazyField):
def __init__(self, object_type, *args, **kwargs):
super(NodeTypeField, self).__init__(None, *args, **kwargs)
self.field_object_type = object_type
def inner_field(self, schema):
from graphene.relay.types import BaseNode
node_field = BaseNode.get_definitions(schema).node_field
def resolver(instance, args, info):
global_id = args.get('id')
resolved_global_id = from_global_id(global_id)
if resolved_global_id.type == self.field_object_type._meta.type_name:
return node_field.resolver(instance, args, info)
args = OrderedDict([(a.name, a) for a in node_field.args])
field = Field(self.field_object_type, id=args['id'], resolve=resolver)
field.contribute_to_class(self.object_type, self.field_name)
return field
class NodeIDField(LazyNativeField):
def get_field(self, schema):
return global_id_field(self.object_type._meta.type_name)
```
#### File: graphene/relay/utils.py
```python
from graphene.relay.types import BaseNode
def is_node(object_type):
return issubclass(object_type, BaseNode) and not is_node_type(object_type)
def is_node_type(object_type):
return BaseNode in object_type.__bases__
```
#### File: tests/contrib_django/models.py
```python
from __future__ import absolute_import
from django.db import models
class Pet(models.Model):
name = models.CharField(max_length=30)
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
pets = models.ManyToManyField('self')
def __str__(self): # __unicode__ on Python 2
return "%s %s" % (self.first_name, self.last_name)
class Meta:
app_label = 'contrib_django'
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter, related_name='articles')
def __str__(self): # __unicode__ on Python 2
return self.headline
class Meta:
ordering = ('headline',)
app_label = 'contrib_django'
```
#### File: tests/contrib_django/test_schema.py
```python
from py.test import raises
from collections import namedtuple
from pytest import raises
import graphene
from graphene import relay
from graphene.contrib.django import (
DjangoObjectType,
DjangoNode
)
from .models import Reporter, Article
from tests.utils import assert_equal_lists
def test_should_raise_if_no_model():
with raises(Exception) as excinfo:
class Character1(DjangoObjectType):
pass
assert 'model in the Meta' in str(excinfo.value)
def test_should_raise_if_model_is_invalid():
with raises(Exception) as excinfo:
class Character2(DjangoObjectType):
class Meta:
model = 1
assert 'not a Django model' in str(excinfo.value)
def test_should_raise_if_model_is_invalid():
with raises(Exception) as excinfo:
class ReporterTypeError(DjangoObjectType):
class Meta:
model = Reporter
only_fields = ('articles', )
schema = graphene.Schema(query=ReporterTypeError)
query = '''
query ReporterQuery {
articles
}
'''
result = schema.execute(query)
assert not result.errors
def test_should_map_fields_correctly():
class ReporterType2(DjangoObjectType):
class Meta:
model = Reporter
assert_equal_lists(
ReporterType2._meta.fields_map.keys(),
['articles', 'first_name', 'last_name', 'email', 'pets', 'id']
)
def test_should_map_fields():
class ReporterType(DjangoObjectType):
class Meta:
model = Reporter
class Query2(graphene.ObjectType):
reporter = graphene.Field(ReporterType)
def resolve_reporter(self, *args, **kwargs):
return ReporterType(Reporter(first_name='ABA', last_name='X'))
query = '''
query ReporterQuery {
reporter {
firstName,
lastName,
email
}
}
'''
expected = {
'reporter': {
'firstName': 'ABA',
'lastName': 'X',
'email': ''
}
}
Schema = graphene.Schema(query=Query2)
result = Schema.execute(query)
assert not result.errors
assert result.data == expected
def test_should_map_only_few_fields():
class Reporter2(DjangoObjectType):
class Meta:
model = Reporter
only_fields = ('id', 'email')
assert_equal_lists(
Reporter2._meta.fields_map.keys(),
['id', 'email']
)
def test_should_node():
class ReporterNodeType(DjangoNode):
class Meta:
model = Reporter
@classmethod
def get_node(cls, id):
return ReporterNodeType(Reporter(id=2, first_name='<NAME>'))
def resolve_articles(self, *args, **kwargs):
return [ArticleNodeType(Article(headline='Hi!'))]
class ArticleNodeType(DjangoNode):
class Meta:
model = Article
@classmethod
def get_node(cls, id):
return ArticleNodeType(Article(id=1, headline='Article node'))
class Query1(graphene.ObjectType):
node = relay.NodeField()
reporter = graphene.Field(ReporterNodeType)
article = graphene.Field(ArticleNodeType)
def resolve_reporter(self, *args, **kwargs):
return ReporterNodeType(Reporter(id=1, first_name='ABA', last_name='X'))
query = '''
query ReporterQuery {
reporter {
id,
firstName,
articles {
edges {
node {
headline
}
}
}
lastName,
email
}
myArticle: node(id:"QXJ0aWNsZU5vZGVUeXBlOjE=") {
id
... on ReporterNodeType {
firstName
}
... on ArticleNodeType {
headline
}
}
}
'''
expected = {
'reporter': {
'id': 'UmVwb3J0ZXJOb2RlVHlwZTox',
'firstName': 'ABA',
'lastName': 'X',
'email': '',
'articles': {
'edges': [{
'node': {
'headline': 'Hi!'
}
}]
},
},
'myArticle': {
'id': 'QXJ0aWNsZU5vZGVUeXBlOjE=',
'headline': 'Article node'
}
}
Schema = graphene.Schema(query=Query1)
result = Schema.execute(query)
assert not result.errors
assert result.data == expected
```
#### File: tests/contrib_django/test_types.py
```python
from py.test import raises
from collections import namedtuple
from pytest import raises
from graphene.core.fields import (
Field,
StringField,
)
from graphql.core.type import (
GraphQLObjectType,
GraphQLInterfaceType
)
from graphene import Schema
from graphene.contrib.django.types import (
DjangoNode,
DjangoInterface
)
from .models import Reporter, Article
from tests.utils import assert_equal_lists
class Character(DjangoInterface):
'''Character description'''
class Meta:
model = Reporter
class Human(DjangoNode):
'''Human description'''
def get_node(self, id):
pass
class Meta:
model = Article
schema = Schema()
def test_django_interface():
assert DjangoNode._meta.interface is True
def test_pseudo_interface():
object_type = Character.internal_type(schema)
assert Character._meta.interface is True
assert isinstance(object_type, GraphQLInterfaceType)
assert Character._meta.model == Reporter
assert_equal_lists(
object_type.get_fields().keys(),
['articles', 'firstName', 'lastName', 'email', 'pets', 'id']
)
def test_interface_resolve_type():
resolve_type = Character.resolve_type(schema, Human(object()))
assert isinstance(resolve_type, GraphQLObjectType)
def test_object_type():
object_type = Human.internal_type(schema)
fields_map = Human._meta.fields_map
assert Human._meta.interface is False
assert isinstance(object_type, GraphQLObjectType)
assert object_type.get_fields() == {
'headline': fields_map['headline'].internal_field(schema),
'id': fields_map['id'].internal_field(schema),
'reporter': fields_map['reporter'].internal_field(schema),
'pubDate': fields_map['pub_date'].internal_field(schema),
}
assert object_type.get_interfaces() == [DjangoNode.internal_type(schema)]
```
#### File: graphene/tests/utils.py
```python
def assert_equal_lists(l1, l2):
assert sorted(l1) == sorted(l2)
``` |
{
"source": "jhgg/graphql-core",
"score": 2
} |
#### File: tests/utilities/test_print_schema.py
```python
from typing import cast, Any, Dict
from graphql.language import DirectiveLocation
from graphql.type import (
GraphQLArgument,
GraphQLBoolean,
GraphQLEnumType,
GraphQLField,
GraphQLFloat,
GraphQLInputObjectType,
GraphQLInt,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
GraphQLString,
GraphQLUnionType,
GraphQLInputField,
GraphQLDirective,
)
from graphql.utilities import (
build_schema,
print_schema,
print_introspection_schema,
print_value,
)
from ..utils import dedent
def expect_printed_schema(schema: GraphQLSchema) -> str:
schema_text = print_schema(schema)
# keep print_schema and build_schema in sync
assert print_schema(build_schema(schema_text)) == schema_text
return schema_text
def build_single_field_schema(field: GraphQLField):
query = GraphQLObjectType(name="Query", fields={"singleField": field})
return GraphQLSchema(query=query)
def describe_type_system_printer():
def prints_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLString))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: String
}
"""
)
def prints_list_of_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLList(GraphQLString)))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String]
}
"""
)
def prints_non_null_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLNonNull(GraphQLString)))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: String!
}
"""
)
def prints_non_null_list_of_string_field():
schema = build_single_field_schema(
GraphQLField(GraphQLNonNull(GraphQLList(GraphQLString)))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String]!
}
"""
)
def prints_list_of_non_null_string_field():
schema = build_single_field_schema(
GraphQLField((GraphQLList(GraphQLNonNull(GraphQLString))))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String!]
}
"""
)
def prints_non_null_list_of_non_null_string_field():
schema = build_single_field_schema(
GraphQLField(GraphQLNonNull(GraphQLList(GraphQLNonNull(GraphQLString))))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String!]!
}
"""
)
def prints_object_field():
foo_type = GraphQLObjectType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
schema = GraphQLSchema(types=[foo_type])
assert expect_printed_schema(schema) == dedent(
"""
type Foo {
str: String
}
"""
)
def prints_string_field_with_int_arg():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString, args={"argOne": GraphQLArgument(GraphQLInt)}
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int): String
}
"""
)
def prints_string_field_with_int_arg_with_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLInt, default_value=2)},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = 2): String
}
"""
)
def prints_string_field_with_string_arg_with_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(
GraphQLString, default_value="tes\t de\fault"
)
},
)
)
assert expect_printed_schema(schema) == dedent(
r"""
type Query {
singleField(argOne: String = "tes\t de\fault"): String
}
"""
)
def prints_string_field_with_int_arg_with_default_null():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLInt, default_value=None)},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = null): String
}
"""
)
def prints_string_field_with_non_null_int_arg():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLNonNull(GraphQLInt))},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int!): String
}
"""
)
def prints_string_field_with_multiple_args():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String): String
}
"""
)
def prints_string_field_with_multiple_args_first_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt, default_value=1),
"argTwo": GraphQLArgument(GraphQLString),
"argThree": GraphQLArgument(GraphQLBoolean),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = 1, argTwo: String, argThree: Boolean): String
}
"""
)
def prints_string_field_with_multiple_args_second_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString, default_value="foo"),
"argThree": GraphQLArgument(GraphQLBoolean),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String = "foo", argThree: Boolean): String
}
""" # noqa: E501
)
def prints_string_field_with_multiple_args_last_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString),
"argThree": GraphQLArgument(GraphQLBoolean, default_value=False),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String, argThree: Boolean = false): String
}
""" # noqa: E501
)
def prints_schema_with_description():
schema = GraphQLSchema(
description="Schema description.", query=GraphQLObjectType("Query", {})
)
assert expect_printed_schema(schema) == dedent(
'''
"""Schema description."""
schema {
query: Query
}
type Query
'''
)
def prints_custom_query_root_types():
schema = GraphQLSchema(query=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
query: CustomType
}
type CustomType
"""
)
def prints_custom_mutation_root_types():
schema = GraphQLSchema(mutation=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
mutation: CustomType
}
type CustomType
"""
)
def prints_custom_subscription_root_types():
schema = GraphQLSchema(subscription=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
subscription: CustomType
}
type CustomType
"""
)
def prints_interface():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
bar_type = GraphQLObjectType(
name="Bar",
fields={"str": GraphQLField(GraphQLString)},
interfaces=[foo_type],
)
schema = GraphQLSchema(types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo {
str: String
}
interface Foo {
str: String
}
"""
)
def prints_multiple_interfaces():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
baz_type = GraphQLInterfaceType(
name="Baz", fields={"int": GraphQLField(GraphQLInt)}
)
bar_type = GraphQLObjectType(
name="Bar",
fields={
"str": GraphQLField(GraphQLString),
"int": GraphQLField(GraphQLInt),
},
interfaces=[foo_type, baz_type],
)
schema = GraphQLSchema(types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo & Baz {
str: String
int: Int
}
interface Foo {
str: String
}
interface Baz {
int: Int
}
"""
)
def prints_hierarchical_interface():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
baz_type = GraphQLInterfaceType(
name="Baz",
interfaces=[foo_type],
fields={
"int": GraphQLField(GraphQLInt),
"str": GraphQLField(GraphQLString),
},
)
bar_type = GraphQLObjectType(
name="Bar",
fields={
"str": GraphQLField(GraphQLString),
"int": GraphQLField(GraphQLInt),
},
interfaces=[foo_type, baz_type],
)
query = GraphQLObjectType(name="Query", fields={"bar": GraphQLField(bar_type)})
schema = GraphQLSchema(query, types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo & Baz {
str: String
int: Int
}
interface Foo {
str: String
}
interface Baz implements Foo {
int: Int
str: String
}
type Query {
bar: Bar
}
"""
)
def prints_unions():
foo_type = GraphQLObjectType(
name="Foo", fields={"bool": GraphQLField(GraphQLBoolean)}
)
bar_type = GraphQLObjectType(
name="Bar", fields={"str": GraphQLField(GraphQLString)}
)
single_union = GraphQLUnionType(name="SingleUnion", types=[foo_type])
multiple_union = GraphQLUnionType(
name="MultipleUnion", types=[foo_type, bar_type]
)
schema = GraphQLSchema(types=[single_union, multiple_union])
assert expect_printed_schema(schema) == dedent(
"""
union SingleUnion = Foo
type Foo {
bool: Boolean
}
union MultipleUnion = Foo | Bar
type Bar {
str: String
}
"""
)
def prints_input_type():
input_type = GraphQLInputObjectType(
name="InputType", fields={"int": GraphQLInputField(GraphQLInt)}
)
schema = GraphQLSchema(types=[input_type])
assert expect_printed_schema(schema) == dedent(
"""
input InputType {
int: Int
}
"""
)
def prints_custom_scalar():
odd_type = GraphQLScalarType(name="Odd")
schema = GraphQLSchema(types=[odd_type])
assert expect_printed_schema(schema) == dedent(
"""
scalar Odd
"""
)
def prints_custom_scalar_with_speicified_by_url():
foo_type = GraphQLScalarType(
name="Foo", specified_by_url="https://example.com/foo_spec"
)
schema = GraphQLSchema(types=[foo_type])
assert expect_printed_schema(schema) == dedent(
"""
scalar Foo @specifiedBy(url: "https://example.com/foo_spec")
"""
)
def prints_enum():
rgb_type = GraphQLEnumType(
name="RGB", values=dict.fromkeys(("RED", "GREEN", "BLUE"))
)
schema = GraphQLSchema(types=[rgb_type])
assert expect_printed_schema(schema) == dedent(
"""
enum RGB {
RED
GREEN
BLUE
}
"""
)
def prints_empty_types():
schema = GraphQLSchema(
types=[
GraphQLEnumType("SomeEnum", cast(Dict[str, Any], {})),
GraphQLInputObjectType("SomeInputObject", {}),
GraphQLInterfaceType("SomeInterface", {}),
GraphQLObjectType("SomeObject", {}),
GraphQLUnionType("SomeUnion", []),
]
)
assert expect_printed_schema(schema) == dedent(
"""
enum SomeEnum
input SomeInputObject
interface SomeInterface
type SomeObject
union SomeUnion
"""
)
def prints_custom_directives():
simple_directive = GraphQLDirective(
"simpleDirective", [DirectiveLocation.FIELD]
)
complex_directive = GraphQLDirective(
"complexDirective",
[DirectiveLocation.FIELD, DirectiveLocation.QUERY],
description="Complex Directive",
args={
"stringArg": GraphQLArgument(GraphQLString),
"intArg": GraphQLArgument(GraphQLInt, default_value=-1),
},
is_repeatable=True,
)
schema = GraphQLSchema(directives=[simple_directive, complex_directive])
assert expect_printed_schema(schema) == dedent(
'''
directive @simpleDirective on FIELD
"""Complex Directive"""
directive @complexDirective(stringArg: String, intArg: Int = -1) repeatable on FIELD | QUERY
''' # noqa: E501
)
def prints_an_empty_description():
schema = build_single_field_schema(GraphQLField(GraphQLString, description=""))
assert expect_printed_schema(schema) == dedent(
'''
type Query {
""""""
singleField: String
}
'''
)
def one_line_prints_a_short_description():
schema = build_single_field_schema(
GraphQLField(GraphQLString, description="This field is awesome")
)
assert expect_printed_schema(schema) == dedent(
'''
type Query {
"""This field is awesome"""
singleField: String
}
'''
)
def prints_introspection_schema():
schema = GraphQLSchema()
output = print_introspection_schema(schema)
assert output == dedent(
'''
"""
Directs the executor to include this field or fragment only when the `if` argument is true.
"""
directive @include(
"""Included when true."""
if: Boolean!
) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
"""
Directs the executor to skip this field or fragment when the `if` argument is true.
"""
directive @skip(
"""Skipped when true."""
if: Boolean!
) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
"""Marks an element of a GraphQL schema as no longer supported."""
directive @deprecated(
"""
Explains why this element was deprecated, usually also including a suggestion for how to access supported similar data. Formatted using the Markdown syntax, as specified by [CommonMark](https://commonmark.org/).
"""
reason: String = "No longer supported"
) on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE
"""Exposes a URL that specifies the behaviour of this scalar."""
directive @specifiedBy(
"""The URL that specifies the behaviour of this scalar."""
url: String!
) on SCALAR
"""
A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all available types and directives on the server, as well as the entry points for query, mutation, and subscription operations.
"""
type __Schema {
description: String
"""A list of all types supported by this server."""
types: [__Type!]!
"""The type that query operations will be rooted at."""
queryType: __Type!
"""
If this server supports mutation, the type that mutation operations will be rooted at.
"""
mutationType: __Type
"""
If this server support subscription, the type that subscription operations will be rooted at.
"""
subscriptionType: __Type
"""A list of all directives supported by this server."""
directives: [__Directive!]!
}
"""
The fundamental unit of any GraphQL Schema is the type. There are many kinds of types in GraphQL as represented by the `__TypeKind` enum.
Depending on the kind of a type, certain fields describe information about that type. Scalar types provide no information beyond a name, description and optional `specifiedByUrl`, while Enum types provide their values. Object and Interface types provide the fields they describe. Abstract types, Union and Interface, provide the Object types possible at runtime. List and NonNull types compose other types.
"""
type __Type {
kind: __TypeKind!
name: String
description: String
specifiedByUrl: String
fields(includeDeprecated: Boolean = false): [__Field!]
interfaces: [__Type!]
possibleTypes: [__Type!]
enumValues(includeDeprecated: Boolean = false): [__EnumValue!]
inputFields(includeDeprecated: Boolean = false): [__InputValue!]
ofType: __Type
}
"""An enum describing what kind of type a given `__Type` is."""
enum __TypeKind {
"""Indicates this type is a scalar."""
SCALAR
"""
Indicates this type is an object. `fields` and `interfaces` are valid fields.
"""
OBJECT
"""
Indicates this type is an interface. `fields`, `interfaces`, and `possibleTypes` are valid fields.
"""
INTERFACE
"""Indicates this type is a union. `possibleTypes` is a valid field."""
UNION
"""Indicates this type is an enum. `enumValues` is a valid field."""
ENUM
"""
Indicates this type is an input object. `inputFields` is a valid field.
"""
INPUT_OBJECT
"""Indicates this type is a list. `ofType` is a valid field."""
LIST
"""Indicates this type is a non-null. `ofType` is a valid field."""
NON_NULL
}
"""
Object and Interface types are described by a list of Fields, each of which has a name, potentially a list of arguments, and a return type.
"""
type __Field {
name: String!
description: String
args(includeDeprecated: Boolean = false): [__InputValue!]!
type: __Type!
isDeprecated: Boolean!
deprecationReason: String
}
"""
Arguments provided to Fields or Directives and the input fields of an InputObject are represented as Input Values which describe their type and optionally a default value.
"""
type __InputValue {
name: String!
description: String
type: __Type!
"""
A GraphQL-formatted string representing the default value for this input value.
"""
defaultValue: String
isDeprecated: Boolean!
deprecationReason: String
}
"""
One possible value for a given Enum. Enum values are unique values, not a placeholder for a string or numeric value. However an Enum value is returned in a JSON response as a string.
"""
type __EnumValue {
name: String!
description: String
isDeprecated: Boolean!
deprecationReason: String
}
"""
A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.
In some cases, you need to provide options to alter GraphQL's execution behavior in ways field arguments will not suffice, such as conditionally including or skipping a field. Directives provide this by describing additional information to the executor.
"""
type __Directive {
name: String!
description: String
isRepeatable: Boolean!
locations: [__DirectiveLocation!]!
args: [__InputValue!]!
}
"""
A Directive can be adjacent to many parts of the GraphQL language, a __DirectiveLocation describes one such possible adjacencies.
"""
enum __DirectiveLocation {
"""Location adjacent to a query operation."""
QUERY
"""Location adjacent to a mutation operation."""
MUTATION
"""Location adjacent to a subscription operation."""
SUBSCRIPTION
"""Location adjacent to a field."""
FIELD
"""Location adjacent to a fragment definition."""
FRAGMENT_DEFINITION
"""Location adjacent to a fragment spread."""
FRAGMENT_SPREAD
"""Location adjacent to an inline fragment."""
INLINE_FRAGMENT
"""Location adjacent to a variable definition."""
VARIABLE_DEFINITION
"""Location adjacent to a schema definition."""
SCHEMA
"""Location adjacent to a scalar definition."""
SCALAR
"""Location adjacent to an object type definition."""
OBJECT
"""Location adjacent to a field definition."""
FIELD_DEFINITION
"""Location adjacent to an argument definition."""
ARGUMENT_DEFINITION
"""Location adjacent to an interface definition."""
INTERFACE
"""Location adjacent to a union definition."""
UNION
"""Location adjacent to an enum definition."""
ENUM
"""Location adjacent to an enum value definition."""
ENUM_VALUE
"""Location adjacent to an input object type definition."""
INPUT_OBJECT
"""Location adjacent to an input object field definition."""
INPUT_FIELD_DEFINITION
}
''' # noqa: E501
)
def describe_print_value():
def print_value_convenience_function():
assert print_value(1.5, GraphQLFloat) == "1.5"
assert print_value("foo", GraphQLString) == '"foo"'
``` |
{
"source": "jhgg/graphql-py",
"score": 2
} |
#### File: core/execution/__init__.py
```python
import collections
from ..error import GraphQLError, format_error
from ..utils import type_from_ast, is_nullish
from ..language import ast
from .values import get_variable_values, get_argument_values
from ..type.definition import (
GraphQLScalarType,
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLEnumType,
GraphQLList,
GraphQLNonNull,
)
from ..type.introspection import (
SchemaMetaFieldDef,
TypeMetaFieldDef,
TypeNameMetaFieldDef,
)
from ..type.directives import (
GraphQLIncludeDirective,
GraphQLSkipDirective,
)
Undefined = object()
"""
Terminology
"Definitions" are the generic name for top-level statements in the document.
Examples of this include:
1) Operations (such as a query)
2) Fragments
"Operations" are a generic name for requests in the document.
Examples of this include:
1) query,
2) mutation
"Selections" are the statements that can appear legally and at
single level of the query. These include:
1) field references e.g "a"
2) fragment "spreads" e.g. "...c"
3) inline fragment "spreads" e.g. "...on Type { a }"
"""
class ExecutionContext(object):
"""Data that must be available at all points during query execution.
Namely, schema of the type system that is currently executing,
and the fragments defined in the query document"""
__slots__ = ['schema', 'fragments', 'root', 'operation', 'variables', 'errors']
def __init__(self, schema, root, document_ast, operation_name, args):
"""Constructs a ExecutionContext object from the arguments passed
to execute, which we will pass throughout the other execution
methods."""
errors = []
operations = {}
fragments = {}
for statement in document_ast.definitions:
if isinstance(statement, ast.OperationDefinition):
name = ''
if statement.name:
name = statement.name.value
operations[name] = statement
elif isinstance(statement, ast.FragmentDefinition):
fragments[statement.name.value] = statement
if not operation_name and len(operations) != 1:
raise GraphQLError(
'Must provide operation name '
'if query contains multiple operations')
op_name = operation_name or next(iter(operations.keys()))
operation = operations.get(op_name)
if not operation:
raise GraphQLError('Unknown operation name: {}'.format(op_name))
variables = get_variable_values(schema, operation.variable_definitions or [], args)
self.schema = schema
self.fragments = fragments
self.root = root
self.operation = operation
self.variables = variables
self.errors = errors
class ExecutionResult(object):
"""The result of execution. `data` is the result of executing the
query, `errors` is null if no errors occurred, and is a
non-empty array if an error occurred."""
__slots__ = ['data', 'errors']
def __init__(self, data, errors=None):
self.data = data
self.errors = errors
def execute(schema, root, ast, operation_name='', args=None):
"""Implements the "Evaluating requests" section of the spec."""
assert schema, 'Must provide schema'
ctx = ExecutionContext(schema, root, ast, operation_name, args)
try:
data = execute_operation(ctx, root, ctx.operation)
except GraphQLError as e:
ctx.errors.append(e)
data = None
if not ctx.errors:
return ExecutionResult(data)
formatted_errors = list(map(format_error, ctx.errors))
return ExecutionResult(data, formatted_errors)
def execute_operation(ctx, root, operation):
"""Implements the "Evaluating operations" section of the spec."""
type = get_operation_root_type(ctx.schema, operation)
fields = collect_fields(ctx, type, operation.selection_set, {}, set())
if operation.operation == 'mutation':
return execute_fields_serially(ctx, type, root, fields)
return execute_fields(ctx, type, root, fields)
def get_operation_root_type(schema, operation):
op = operation.operation
if op == 'query':
return schema.get_query_type()
elif op == 'mutation':
mutation_type = schema.get_mutation_type()
if not mutation_type:
raise GraphQLError(
'Schema is not configured for mutations',
[operation]
)
return mutation_type
raise GraphQLError(
'Can only execute queries and mutations',
[operation]
)
def execute_fields_serially(ctx, parent_type, source, fields):
"""Implements the "Evaluating selection sets" section of the spec
for "write" mode."""
results = {}
for response_name, field_asts in fields.items():
result = resolve_field(ctx, parent_type, source, field_asts)
if result is not Undefined:
results[response_name] = result
return results
def execute_fields(ctx, parent_type, source, fields):
"""Implements the "Evaluating selection sets" section of the spec
for "read" mode."""
# FIXME: just fallback to serial execution for now.
return execute_fields_serially(ctx, parent_type, source, fields)
def collect_fields(ctx, type, selection_set, fields, prev_fragment_names):
for selection in selection_set.selections:
directives = selection.directives
if isinstance(selection, ast.Field):
if not should_include_node(ctx, directives):
continue
name = get_field_entry_key(selection)
if name not in fields:
fields[name] = []
fields[name].append(selection)
elif isinstance(selection, ast.InlineFragment):
if not should_include_node(ctx, directives) or \
not does_fragment_condition_match(ctx, selection, type):
continue
collect_fields(
ctx, type, selection.selection_set,
fields, prev_fragment_names)
elif isinstance(selection, ast.FragmentSpread):
frag_name = selection.name.value
if frag_name in prev_fragment_names or \
not should_include_node(ctx, directives):
continue
prev_fragment_names.add(frag_name)
fragment = ctx.fragments.get(frag_name)
frag_directives = fragment.directives
if not fragment or \
not should_include_node(ctx, frag_directives) or \
not does_fragment_condition_match(ctx, fragment, type):
continue
collect_fields(
ctx, type, fragment.selection_set,
fields, prev_fragment_names)
return fields
def should_include_node(ctx, directives):
"""Determines if a field should be included based on the @include and
@skip directives, where @skip has higher precidence than @include."""
if directives:
skip_ast = None
for directive in directives:
if directive.name.value == GraphQLSkipDirective.name:
skip_ast = directive
break
if skip_ast:
args = get_argument_values(
GraphQLSkipDirective.args,
skip_ast.arguments,
ctx.variables,
)
return not args.get('if')
include_ast = None
for directive in directives:
if directive.name.value == GraphQLIncludeDirective.name:
include_ast = directive
break
if include_ast:
args = get_argument_values(
GraphQLIncludeDirective.args,
include_ast.arguments,
ctx.variables,
)
return bool(args.get('if'))
return True
def does_fragment_condition_match(ctx, fragment, type_):
conditional_type = type_from_ast(ctx.schema, fragment.type_condition)
if type(conditional_type) == type(type_):
return True
if isinstance(conditional_type, (GraphQLInterfaceType, GraphQLUnionType)):
return conditional_type.is_possible_type(type_)
return False
def get_field_entry_key(node):
"""Implements the logic to compute the key of a given field’s entry"""
if node.alias:
return node.alias.value
return node.name.value
class ResolveInfo(object):
__slots__ = ['field_name', 'field_ast', 'return_type', 'parent_type', 'context']
def __init__(self, field_name, field_asts, return_type, parent_type, context):
self.field_name = field_name
self.field_ast = field_asts
self.return_type = return_type
self.parent_type = parent_type
self.context = context
@property
def schema(self):
return self.context.schema
@property
def fragments(self):
return self.context.fragments
@property
def root_value(self):
return self.context.root_value
@property
def operation(self):
return self.context.operation
@property
def variable_values(self):
return self.context.variables
def resolve_field(ctx, parent_type, source, field_asts):
"""A wrapper function for resolving the field, that catches the error
and adds it to the context's global if the error is not rethrowable."""
field_ast = field_asts[0]
field_name = field_ast.name.value
field_def = get_field_def(ctx.schema, parent_type, field_name)
if not field_def:
return Undefined
return_type = field_def.type
resolve_fn = field_def.resolver or default_resolve_fn
# Build a dict of arguments from the field.arguments AST, using the variables scope to fulfill any variable references.
# TODO: find a way to memoize, in case this field is within a list type.
args = get_argument_values(
field_def.args, field_ast.arguments, ctx.variables
)
# The resolve function's optional third argument is a collection of
# information about the current execution state.
info = ResolveInfo(
field_name,
field_asts,
return_type,
parent_type,
ctx
)
# If an error occurs while calling the field `resolve` function, ensure that it is wrapped as a GraphQLError with locations.
# Log this error and return null if allowed, otherwise throw the error so the parent field can handle it.
try:
result = resolve_fn(source, args, info)
except Exception as e:
reported_error = GraphQLError(str(e), [field_ast], e)
if isinstance(return_type, GraphQLNonNull):
raise reported_error
ctx.errors.append(reported_error)
return None
return complete_value_catching_error(
ctx, return_type, field_asts, info, result
)
def complete_value_catching_error(ctx, return_type, field_asts, info, result):
# If the field type is non-nullable, then it is resolved without any
# protection from errors.
if isinstance(return_type, GraphQLNonNull):
return complete_value(ctx, return_type, field_asts, info, result)
# Otherwise, error protection is applied, logging the error and
# resolving a null value for this field if one is encountered.
try:
return complete_value(ctx, return_type, field_asts, info, result)
except Exception as e:
ctx.errors.append(e)
return None
def complete_value(ctx, return_type, field_asts, info, result):
"""Implements the instructions for completeValue as defined in the
"Field entries" section of the spec.
If the field type is Non-Null, then this recursively completes the value for the inner type. It throws a field error
if that completion returns null, as per the "Nullability" section of the spec.
If the field type is a List, then this recursively completes the value for the inner type on each item in the list.
If the field type is a Scalar or Enum, ensures the completed value is a legal value of the type by calling the `serialize`
method of GraphQL type definition.
Otherwise, the field type expects a sub-selection set, and will complete the value by evaluating all sub-selections."""
# If field type is NonNull, complete for inner type, and throw field error if result is null.
if isinstance(return_type, GraphQLNonNull):
completed = complete_value(
ctx, return_type.of_type, field_asts, info, result
)
if completed is None:
raise GraphQLError(
'Cannot return null for non-nullable type.',
field_asts
)
return completed
# If result is null-like, return null.
if is_nullish(result):
return None
# If field type is List, complete each item in the list with the inner type
if isinstance(return_type, GraphQLList):
assert isinstance(result, collections.Iterable), \
'User Error: expected iterable, but did not find one.'
item_type = return_type.of_type
return [complete_value_catching_error(
ctx, item_type, field_asts, info, item
) for item in result]
# If field type is Scalar or Enum, serialize to a valid value, returning null if coercion is not possible.
if isinstance(return_type, (GraphQLScalarType, GraphQLEnumType)):
serialized_result = return_type.serialize(result)
if is_nullish(serialized_result):
return None
return serialized_result
# Field type must be Object, Interface or Union and expect sub-selections.
if isinstance(return_type, GraphQLObjectType):
object_type = return_type
elif isinstance(return_type, (GraphQLInterfaceType, GraphQLUnionType)):
object_type = return_type.resolve_type(result)
else:
object_type = None
if not object_type:
return None
# Collect sub-fields to execute to complete this value.
subfield_asts = {}
visited_fragment_names = set()
for field_ast in field_asts:
selection_set = field_ast.selection_set
if selection_set:
subfield_asts = collect_fields(
ctx, object_type, selection_set,
subfield_asts, visited_fragment_names)
return execute_fields(ctx, object_type, result, subfield_asts)
def default_resolve_fn(source, args, info):
"""If a resolve function is not given, then a default resolve behavior is used which takes the property of the source object
of the same name as the field and returns it as the result, or if it's a function, returns the result of calling that function."""
name = info.field_name
property = getattr(source, name, None)
if callable(property):
return property()
return property
def get_field_def(schema, parent_type, field_name):
"""This method looks up the field on the given type defintion.
It has special casing for the two introspection fields, __schema
and __typename. __typename is special because it can always be
queried as a field, even in situations where no other fields
are allowed, like on a Union. __schema could get automatically
added to the query type, but that would require mutating type
definitions, which would cause issues."""
if field_name == SchemaMetaFieldDef.name and schema.get_query_type() == parent_type:
return SchemaMetaFieldDef
elif field_name == TypeMetaFieldDef.name and schema.get_query_type() == parent_type:
return TypeMetaFieldDef
elif field_name == TypeNameMetaFieldDef.name:
return TypeNameMetaFieldDef
return parent_type.get_fields().get(field_name)
```
#### File: tests/core_language/test_lexer.py
```python
from pytest import raises
from graphql.core.language.error import LanguageError
from graphql.core.language.source import Source
from graphql.core.language.lexer import Lexer, Token, TokenKind
def lex_one(s):
return Lexer(Source(s)).next_token()
def test_skips_whitespace():
assert lex_one("""
foo
""") == Token(TokenKind.NAME, 6, 9, 'foo')
assert lex_one("""
#comment
foo#comment
""") == Token(TokenKind.NAME, 18, 21, 'foo')
assert lex_one(""",,,foo,,,""") == Token(TokenKind.NAME, 3, 6, 'foo')
def test_errors_respect_whitespace():
with raises(LanguageError) as excinfo:
lex_one("""
?
""")
assert str(excinfo.value) == \
'Syntax Error GraphQL (3:5) Unexpected character "?"\n' \
'\n' \
'2: \n' \
'3: ?\n' \
' ^\n' \
'4: \n'
def test_lexes_strings():
assert lex_one('"simple"') == Token(TokenKind.STRING, 0, 8, 'simple')
assert lex_one('" white space "') == Token(TokenKind.STRING, 0, 15, ' white space ')
assert lex_one('"quote \\""') == Token(TokenKind.STRING, 0, 10, 'quote "')
assert lex_one('"escaped \\n\\r\\b\\t\\f"') == Token(TokenKind.STRING, 0, 20, 'escaped \n\r\b\t\f')
assert lex_one('"slashes \\\\ \\/"') == Token( TokenKind.STRING, 0, 15, 'slashes \\ /')
assert lex_one(u'"unicode \\u1234\\u5678\\u90AB\\uCDEF"') == Token(TokenKind.STRING, 0, 34, u'unicode \u1234\u5678\u90AB\uCDEF')
def test_lex_reports_useful_string_errors():
with raises(LanguageError) as excinfo:
lex_one('"no end quote')
assert 'Syntax Error GraphQL (1:14) Unterminated string' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('"multi\nline"')
assert 'Syntax Error GraphQL (1:7) Unterminated string' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('"multi\rline"')
assert 'Syntax Error GraphQL (1:7) Unterminated string' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one(u'"multi\u2028line"')
assert 'Syntax Error GraphQL (1:7) Unterminated string' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one(u'"multi\u2029line"')
assert 'Syntax Error GraphQL (1:7) Unterminated string' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('"bad \\z esc"')
assert 'Syntax Error GraphQL (1:7) Bad character escape sequence' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('"bad \\x esc"')
assert 'Syntax Error GraphQL (1:7) Bad character escape sequence' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('"bad \\u1 esc"')
assert 'Syntax Error GraphQL (1:7) Bad character escape sequence' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('"bad \\u0XX1 esc"')
assert 'Syntax Error GraphQL (1:7) Bad character escape sequence' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('"bad \\uXXXX esc"')
assert 'Syntax Error GraphQL (1:7) Bad character escape sequence' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('"bad \\uFXXX esc"')
assert 'Syntax Error GraphQL (1:7) Bad character escape sequence' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('"bad \\uXXXF esc"')
assert 'Syntax Error GraphQL (1:7) Bad character escape sequence' in str(excinfo.value)
def test_lexes_numbers():
assert lex_one('4') == Token(TokenKind.INT, 0, 1, '4')
assert lex_one('4.123') == Token(TokenKind.FLOAT, 0, 5, '4.123')
assert lex_one('-4') == Token(TokenKind.INT, 0, 2, '-4')
assert lex_one('9') == Token(TokenKind.INT, 0, 1, '9')
assert lex_one('0') == Token(TokenKind.INT, 0, 1, '0')
assert lex_one('00') == Token(TokenKind.INT, 0, 1, '0')
assert lex_one('-4.123') == Token(TokenKind.FLOAT, 0, 6, '-4.123')
assert lex_one('0.123') == Token(TokenKind.FLOAT, 0, 5, '0.123')
assert lex_one('-1.123e4') == Token(TokenKind.FLOAT, 0, 8, '-1.123e4')
assert lex_one('-1.123e-4') == Token(TokenKind.FLOAT, 0, 9, '-1.123e-4')
assert lex_one('-1.123e4567') == Token(TokenKind.FLOAT, 0, 11, '-1.123e4567')
def test_lex_reports_useful_number_errors():
with raises(LanguageError) as excinfo:
lex_one('+1')
assert 'Syntax Error GraphQL (1:1) Unexpected character "+"' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('1.')
assert 'Syntax Error GraphQL (1:3) Invalid number' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('1.A')
assert 'Syntax Error GraphQL (1:3) Invalid number' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('-A')
assert 'Syntax Error GraphQL (1:2) Invalid number' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('1.0e+4')
assert 'Syntax Error GraphQL (1:5) Invalid number' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('1.0e')
assert 'Syntax Error GraphQL (1:5) Invalid number' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('1.0eA')
assert 'Syntax Error GraphQL (1:5) Invalid number' in str(excinfo.value)
def test_lexes_punctuation():
assert lex_one('!') == Token(TokenKind.BANG, 0, 1)
assert lex_one('$') == Token(TokenKind.DOLLAR, 0, 1)
assert lex_one('(') == Token(TokenKind.PAREN_L, 0, 1)
assert lex_one(')') == Token(TokenKind.PAREN_R, 0, 1)
assert lex_one('...') == Token(TokenKind.SPREAD, 0, 3)
assert lex_one(':') == Token(TokenKind.COLON, 0, 1)
assert lex_one('=') == Token(TokenKind.EQUALS, 0, 1)
assert lex_one('@') == Token(TokenKind.AT, 0, 1)
assert lex_one('[') == Token(TokenKind.BRACKET_L, 0, 1)
assert lex_one(']') == Token(TokenKind.BRACKET_R, 0, 1)
assert lex_one('{') == Token(TokenKind.BRACE_L, 0, 1)
assert lex_one('|') == Token(TokenKind.PIPE, 0, 1)
assert lex_one('}') == Token(TokenKind.BRACE_R, 0, 1)
def test_lex_reports_useful_unknown_character_error():
with raises(LanguageError) as excinfo:
lex_one('..')
assert 'Syntax Error GraphQL (1:1) Unexpected character "."' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one('?')
assert 'Syntax Error GraphQL (1:1) Unexpected character "?"' in str(excinfo.value)
with raises(LanguageError) as excinfo:
lex_one(u'\u203B')
assert r'Syntax Error GraphQL (1:1) Unexpected character "\u203b"' in str(excinfo.value)
```
#### File: tests/core_language/test_source.py
```python
from graphql.core import Source
def test_source_eq():
s1 = Source('foo', 'bar')
s2 = Source('foo', 'bar')
assert s1 == s2
s3 = Source('bar', 'baz')
assert s1 != s3
s4 = 'not a source'
assert s1 != s4
``` |
{
"source": "jhgg/graphql-relay-py",
"score": 3
} |
#### File: graphql_relay/connection/arrayconnection.py
```python
from graphql_relay.utils import base64, unbase64
from .connectiontypes import Connection, PageInfo, Edge
def connectionFromArray(data, args={}, **kwargs):
'''
A simple function that accepts an array and connection arguments, and returns
a connection object for use in GraphQL. It uses array offsets as pagination,
so pagination will only work if the array is static.
'''
full_args = dict(args, **kwargs)
before = full_args.get('before')
after = full_args.get('after')
first = full_args.get('first')
last = full_args.get('last')
count = len(data)
# Slice with cursors
begin = max(getOffset(after, -1), -1) + 1
end = min(getOffset(before, count + 1), count)
if begin >= count or begin>=end:
return emptyConnection()
# Save the pre-slice cursors
firstPresliceCursor = offsetToCursor(begin)
lastPresliceCursor = offsetToCursor(min(end, count)-1)
# Slice with limits
if first != None:
end = min(begin+first, end)
if last != None:
begin = max(end-last, begin)
if begin >= count or begin>=end:
return emptyConnection()
sliced_data = data[begin:end]
edges = [
Edge(node, cursor=offsetToCursor(i+begin))
for i, node in enumerate(sliced_data)
]
# Construct the connection
firstEdge = edges[0]
lastEdge = edges[len(edges) - 1]
return Connection(
edges,
PageInfo(
startCursor=firstEdge.cursor,
endCursor=lastEdge.cursor,
hasPreviousPage= (firstEdge.cursor != firstPresliceCursor),
hasNextPage= (lastEdge.cursor != lastPresliceCursor)
)
)
def connectionFromPromisedArray(dataPromise, args={}, **kwargs):
'''
A version of the above that takes a promised array, and returns a promised
connection.
'''
# TODO: Promises not implemented
raise Exception('connectionFromPromisedArray is not implemented yet')
# return dataPromise.then(lambda data:connectionFromArray(data, args))
def emptyConnection():
'''
Helper to get an empty connection.
'''
return Connection(
[],
PageInfo(
startCursor=None,
endCursor=None,
hasPreviousPage=False,
hasNextPage=False,
)
)
PREFIX = 'arrayconnection:'
def offsetToCursor(offset):
'''
Creates the cursor string from an offset.
'''
return base64(PREFIX + str(offset))
def cursorToOffset(cursor):
'''
Rederives the offset from the cursor string.
'''
try:
return int(unbase64(cursor)[len(PREFIX):len(PREFIX)+10])
except:
return None
def cursorForObjectInConnection(data, _object):
'''
Return the cursor associated with an object in an array.
'''
if _object not in data:
return None
offset = data.index(_object)
return offsetToCursor(offset)
def getOffset(cursor, defaultOffset=0):
'''
Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default.
'''
if cursor == None:
return defaultOffset
offset = cursorToOffset(cursor)
try:
return int(offset)
except:
return defaultOffset
```
#### File: graphql_relay/connection/connection.py
```python
from graphql.core.type import (
GraphQLArgument,
GraphQLBoolean,
GraphQLInt,
GraphQLNonNull,
GraphQLList,
GraphQLObjectType,
GraphQLString,
GraphQLField
)
class ConnectionConfig(object):
'''
Returns a GraphQLFieldConfigArgumentMap appropriate to include
on a field whose return type is a connection type.
'''
def __init__(self, name, nodeType, edgeFields=None, connectionFields=None):
self.name = name
self.nodeType = nodeType
self.edgeFields = edgeFields
self.connectionFields = connectionFields
class GraphQLConnection(object):
def __init__(self, edgeType, connectionType):
self.edgeType = edgeType
self.connectionType = connectionType
connectionArgs = {
'before': GraphQLArgument(GraphQLString),
'after': GraphQLArgument(GraphQLString),
'first': GraphQLArgument(GraphQLInt),
'last': GraphQLArgument(GraphQLInt),
}
def resolveMaybeThunk(f):
if hasattr(f, '__call__'):
return f()
return f
def connectionDefinitions(*args, **kwargs):
if len(args) == 1 and not kwargs and isinstance(args[0], ConnectionConfig):
config = args[0]
else:
config = ConnectionConfig(*args, **kwargs)
name, nodeType = config.name, config.nodeType
edgeFields = config.edgeFields or {}
connectionFields = config.connectionFields or {}
edgeType = GraphQLObjectType(
name+'Edge',
description='An edge in a connection.',
fields=lambda: dict({
'node': GraphQLField(
nodeType,
description='The item at the end of the edge',
),
'cursor': GraphQLField(
GraphQLNonNull(GraphQLString),
description='A cursor for use in pagination',
)
}, **resolveMaybeThunk(edgeFields))
)
connectionType = GraphQLObjectType(
name+'Connection',
description='A connection to a list of items.',
fields=lambda: dict({
'pageInfo': GraphQLField(
GraphQLNonNull(pageInfoType),
description='The Information to aid in pagination',
),
'edges': GraphQLField(
GraphQLList(edgeType),
description='Information to aid in pagination.',
)
}, **resolveMaybeThunk(connectionFields))
)
return GraphQLConnection(edgeType, connectionType)
# The common page info type used by all connections.
pageInfoType = GraphQLObjectType(
'PageInfo',
description='Information about pagination in a connection.',
fields=lambda:{
'hasNextPage': GraphQLField(
GraphQLNonNull(GraphQLBoolean),
description='When paginating forwards, are there more items?',
),
'hasPreviousPage': GraphQLField(
GraphQLNonNull(GraphQLBoolean),
description='When paginating backwards, are there more items?',
),
'startCursor': GraphQLField(
GraphQLString,
description='When paginating backwards, the cursor to continue.',
),
'endCursor': GraphQLField(
GraphQLString,
description='When paginating forwards, the cursor to continue.',
),
}
)
```
#### File: graphql_relay/mutation/mutation.py
```python
from graphql.core.type import (
GraphQLArgument,
GraphQLInputObjectType,
GraphQLNonNull,
GraphQLObjectType,
GraphQLString,
GraphQLField,
)
from graphql.core.error import GraphQLError
def mutationWithClientMutationId(name, inputFields, outputFields, mutateAndGetPayload):
augmentedInputFields = dict(inputFields,
clientMutationId=GraphQLField(GraphQLNonNull(GraphQLString))
)
augmentedOutputFields = dict(outputFields,
clientMutationId=GraphQLField(GraphQLNonNull(GraphQLString))
)
inputType = GraphQLInputObjectType(
name+'Input',
fields=augmentedInputFields,
)
outputType = GraphQLObjectType(
name+'Payload',
fields=augmentedOutputFields,
)
def resolver(__, args, info, *_):
input = args.get('input')
if not input:
# TODO: Should be raised by Graphql
raise GraphQLError('Input not provided')
payload = mutateAndGetPayload(input, info)
try:
payload.clientMutationId = input['clientMutationId']
except:
raise GraphQLError('Cannot set clientMutationId in the payload object %s'%repr(payload))
return payload
return GraphQLField(
outputType,
args={
'input': GraphQLArgument(GraphQLNonNull(inputType)),
},
resolver=resolver
)
```
#### File: tests/node/test_node.py
```python
from collections import namedtuple
from pytest import raises
from graphql.core import graphql
from graphql.core.type import (
GraphQLSchema,
GraphQLObjectType,
GraphQLField,
GraphQLArgument,
GraphQLList,
GraphQLNonNull,
GraphQLInt,
GraphQLString,
GraphQLBoolean,
GraphQLID,
)
from graphql_relay.node.node import nodeDefinitions
User = namedtuple('User', ['id', 'name'])
Photo = namedtuple('Photo', ['id', 'width'])
userData = {
'1': User(id=1, name='<NAME>'),
'2': User(id=2, name='<NAME>'),
}
photoData = {
'3': Photo(id=3, width=300),
'4': Photo(id=4, width=400),
}
def getNode(id, info):
assert info.schema == schema
if id in userData:
return userData[id]
else:
return photoData[id]
def getNodeType(obj):
if obj.id in userData:
return userType
else:
return photoType
_nodeDefinitions = nodeDefinitions(getNode, getNodeType)
nodeField, nodeInterface = _nodeDefinitions.nodeField, _nodeDefinitions.nodeInterface
userType = GraphQLObjectType(
'User',
fields= lambda: {
'id': GraphQLField(GraphQLNonNull(GraphQLID)),
'name': GraphQLField(GraphQLString),
},
interfaces= [nodeInterface]
)
photoType = GraphQLObjectType(
'Photo',
fields= lambda: {
'id': GraphQLField(GraphQLNonNull(GraphQLID)),
'width': GraphQLField(GraphQLInt),
},
interfaces= [nodeInterface]
)
queryType = GraphQLObjectType(
'Query',
fields= lambda: {
'node': nodeField,
}
)
schema = GraphQLSchema(query=queryType)
def test_include_connections_and_edge_types():
query = '''
{
node(id: "1") {
id
}
}
'''
expected = {
'node': {
'id': '1',
}
}
result = graphql(schema, query)
assert result.errors == None
assert result.data == expected
``` |
{
"source": "jhgg/jeev",
"score": 2
} |
#### File: jeev/adapter/__init__.py
```python
from ..utils import importing
def get_adapter_by_name(name):
if '.' not in name:
name = 'jeev.adapter.%s.adapter' % name
return importing.import_dotted_path(name)
``` |
{
"source": "jhgg/relaypy",
"score": 2
} |
#### File: relaypy/relaypy/server.py
```python
from wsgi_graphql import wsgi_graphql
from wsgiref.simple_server import make_server
from webob.dec import wsgify
from webob.exc import HTTPNotFound
from webob.static import DirectoryApp, FileApp
from .schema import StarWarsSchema
def server():
graphql = wsgi_graphql(StarWarsSchema)
static = DirectoryApp('build', index_page=None)
index = FileApp('index.html')
graphiql = FileApp('graphiql.html')
@wsgify
def mount_graphql(request):
if request.path_info_peek() == '':
return request.get_response(index)
if request.path_info_peek() == 'graphiql':
return request.get_response(graphiql)
popped = request.path_info_pop()
if popped == 'graphql':
return request.get_response(graphql)
elif popped == 'static':
return request.get_response(static)
raise HTTPNotFound()
server = make_server('127.0.0.1', 5000, mount_graphql)
print "Python GraphQL server running on http://127.0.0.1:5000/graphql"
print "React with Relay UI available on http://127.0.0.1:5000"
server.serve_forever()
``` |
{
"source": "jhgoebbert/BSTIM-Covid19",
"score": 2
} |
#### File: BSTIM-Covid19/src/plot_utils.py
```python
import matplotlib
from matplotlib.collections import PatchCollection
import matplotlib.cm
from matplotlib.axes import Axes
import matplotlib.transforms as transforms
from matplotlib import pyplot as plt
from matplotlib import rc
from shapely.geometry import Polygon
from collections import OrderedDict, defaultdict
import numpy as np
from shapely.ops import cascaded_union
from descartes import PolygonPatch
import matplotlib.patheffects as PathEffects
from matplotlib.gridspec import SubplotSpec, GridSpec, GridSpecFromSubplotSpec
import pymc3 as pm
import seaborn as sns
from itertools import product
import re
plt.style.use('ggplot')
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
matplotlib.rcParams["font.family"] = "Bitstream Charter"
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['axes.labelsize'] = 22
matplotlib.rcParams['axes.titlesize'] = 22
def plot_counties(
ax,
counties,
values=None,
edgecolors=None,
contourcolor="white",
hatch_surround=None,
xlim=None,
ylim=None,
background=True,
xticks=True,
yticks=True,
grid=True,
frame=True,
xlabel="Longitude [in dec. degrees]",
ylabel="Latitude [in dec. degrees]",
lw=1):
polygons = [r["shape"] for r in counties.values()]
# extend german borders :S and then shrink them again after unifying
# gets rid of many holes at the county boundaries
contour = cascaded_union([pol.buffer(0.01)
for pol in polygons]).buffer(-0.01)
xmin, ymin, xmax, ymax = contour.bounds
if xlim is None:
xlim = [xmin, xmax]
if ylim is None:
ylim = [ymin, ymax]
surround = PolygonPatch(Polygon([(xlim[0], ylim[0]), (xlim[0], ylim[1]), (
xlim[1], ylim[1]), (xlim[1], ylim[0])]).difference(contour))
contour = PolygonPatch(contour, lw=lw)
pc = PatchCollection([PolygonPatch(p, lw=lw)
for p in polygons], cmap=matplotlib.cm.viridis, alpha=1.0)
if values is not None:
if isinstance(values, (dict, OrderedDict)):
values = np.array([values.setdefault(r, np.nan)
for r in counties.keys()])
elif isinstance(values, str):
values = np.array([r.setdefault(values, np.nan)
for r in counties.values()])
else:
assert np.size(values) == len(counties), "Number of values ({}) doesn't match number of counties ({})!".format(
np.size(values), len(counties))
#pc.set_clim(np.min(values), np.max(values))
nans = np.isnan(values)
values[nans] = 0
values = np.ma.MaskedArray(values, mask=nans)
pc.set(array=values, cmap='viridis')
else:
pc.set_facecolors("none")
if edgecolors is not None:
if isinstance(edgecolors, (dict, OrderedDict)):
edgecolors = np.array([edgecolors.setdefault(r, "none")
for r in counties.keys()])
elif isinstance(edgecolors, str):
edgecolors = np.array([r.setdefault(edgecolors, "none")
for r in counties.values()])
pc.set_edgecolors(edgecolors)
else:
pc.set_edgecolors("none")
if hatch_surround is not None:
surround.set_hatch(hatch_surround)
surround.set_facecolor("none")
ax.add_patch(surround)
ax.add_collection(pc)
if contourcolor is not None:
contour.set_edgecolor(contourcolor)
contour.set_facecolor("none")
ax.add_patch(contour)
if isinstance(background, bool):
ax.patch.set_visible(background)
else:
ax.patch.set_color(background)
ax.grid(grid)
ax.set_frame_on(frame)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
ax.set_aspect(1.43)
if xlabel:
ax.set_xlabel(xlabel, fontsize=14)
if ylabel:
ax.set_ylabel(ylabel, fontsize=14)
ax.tick_params(axis="x", which="both", bottom=xticks, labelbottom=xticks)
ax.tick_params(axis="y", which="both", left=yticks, labelleft=yticks)
return pc, contour, surround
def pairplot(
df,
labels={},
diagonal_kind="kde",
lower_kind="kde",
upper_kind="empty",
spec=GridSpec(
1,
1)[0],
xlabelrotation=0,
ylabelrotation=90,
ylabels=True,
xlabels=True,
xtickrotation=60,
fig=plt.gcf(),
lower_kwargs={},
diagonal_kwargs={},
upper_kwargs={},
rasterized=False,
tick_args={}):
N = len(df.columns)
axes = np.empty((N, N), dtype=object)
g = GridSpecFromSubplotSpec(N, N, subplot_spec=spec)
fake_axes = {}
for y in range(N):
fake_axes[(y, 0)] = plt.Subplot(fig, g[y, 0])
fake_axes[(y, 0)].set_visible(False)
for x in range(1, N):
fake_axes[(0, x)] = plt.Subplot(fig, g[0, x])
fake_axes[(0, x)].set_visible(False)
for y, v2 in enumerate(df.columns):
for x, v1 in enumerate(df.columns):
if np.all(np.isnan(df[v1])) or np.all(np.isnan(df[v2])):
axes[y, x] = plt.Subplot(fig, g[y, x], **share_args)
kind = "noframe"
else:
if y < x: # upper triangle
kind = upper_kind
kwargs = upper_kwargs
elif y == x: # diagonal
kind = diagonal_kind
kwargs = diagonal_kwargs
else: # lower triangle
kind = lower_kind
kwargs = lower_kwargs
if x == y and kind == "kde":
share_args = {"sharex": fake_axes[(0, x)]}
tick_args_default = {
"left": False,
"labelleft": False,
"bottom": (
y == N - 1),
"labelbottom": (
y == N - 1),
"labelsize": 18,
"length": 6}
else:
share_args = {"sharex": fake_axes[(
0, x)], "sharey": fake_axes[(y, 0)]}
tick_args_default = {
"labelleft": (
x == 0),
"labelright": (
x == N - 1),
"labelbottom": (
y == N - 1),
"left": (
x == 0),
"right": (
x == N - 1),
"bottom": (
y == N - 1),
"labelsize": 18,
"length": 6}
tick_args_default.update(tick_args)
tick_args = tick_args_default
axes[y, x] = plt.Subplot(fig, g[y, x], **share_args)
axes[y, x].tick_params(
axis="x", labelrotation=xtickrotation, **tick_args)
if kind == "noframe":
axes[y, x].set_frame_on(False)
axes[y, x].set_xticks([])
axes[y, x].set_yticks([])
elif kind == "empty":
axes[y, x].set_visible(False)
elif kind == "scatter":
axes[y, x].scatter(df[v1], df[v2], **kwargs)
elif kind == "reg":
sns.regplot(df[v1], df[v2], ax=axes[y, x], **kwargs)
elif kind == "kde":
if x == y:
sns.kdeplot(df[v1], shade=True,
ax=axes[y, x], legend=False, **kwargs)
axes[y, x].grid(False)
else:
sns.kdeplot(df[v1], df[v2], shade=True, shade_lowest=False,
ax=axes[y, x], legend=False, **kwargs)
# kde
else:
raise NotImplementedError(
"Subplot kind must be 'empty', 'scatter', 'reg' or 'kde'.")
axes[y, x].set_rasterized(rasterized)
if x == 0 and ylabels:
axes[y, x].set_ylabel(labels.setdefault(
v2, v2), rotation=ylabelrotation, ha='right', va="center", fontsize=18)
axes[y, x].tick_params(**tick_args)
else:
axes[y, x].set_ylabel("")
axes[y, x].tick_params(**tick_args)
if y == N - 1 and xlabels:
axes[y, x].set_xlabel(labels.setdefault(
v1, v1), rotation=xlabelrotation, ha='center', va="top", fontsize=18)
else:
axes[y, x].set_xlabel("")
fig.add_subplot(axes[y, x])
positive = np.all(df.values >= 0)
for y in range(N):
if np.all(np.isnan(df.iloc[:, y])):
continue
μ = df.iloc[:, y].mean()
σ = df.iloc[:, y].std()
if positive:
fake_axes[(y, 0)].set_yticks((0, μ, μ + 3 * σ))
fake_axes[(y, 0)].set_ylim((0, μ + 4 * σ))
else:
fake_axes[(y, 0)].set_yticks((μ - 3 * σ, μ, μ + 3 * σ))
fake_axes[(y, 0)].set_ylim((μ - 4 * σ, μ + 4 * σ))
fake_axes[(y, 0)].yaxis.set_major_formatter(
matplotlib.ticker.FormatStrFormatter('%.1f'))
for x in range(N):
if np.all(np.isnan(df.iloc[:, y])):
continue
μ = df.iloc[:, x].mean()
σ = df.iloc[:, x].std()
if positive:
fake_axes[(0, x)].set_xticks((0, μ, μ + 3 * σ))
fake_axes[(0, x)].set_xlim((0, μ + 4 * σ))
else:
fake_axes[(0, x)].set_xticks((μ - 3 * σ, μ, μ + 3 * σ))
fake_axes[(0, x)].set_xlim((μ - 4 * σ, μ + 4 * σ))
fake_axes[(0, x)].xaxis.set_major_formatter(
matplotlib.ticker.FormatStrFormatter('%.1f'))
return np.array(axes)
def rhatplot(trace,
var_names=None,
var_args={},
fig=plt.gcf(),
sp=GridSpec(1,
1)[:,
:],
bound=None,
ylabels=True,
yticks=True,
yticklabels=True,
title=r"$\hat R$",
labelsize=22):
if var_names is None:
var_names = trace.varnames
var_args = defaultdict(
lambda: {"color": "C1", "label": None, "markersize": 1}, **var_args)
num_groups = len(var_names)
tp = trace.point(0)
rhat = pm.gelman_rubin(trace, varnames=var_names)
minval = np.min([np.min(rhat[name])
for name in var_names if len(rhat[name]) > 0])
maxval = np.max([np.max(rhat[name])
for name in var_names if len(rhat[name]) > 0])
if bound is None:
bound = maxval
bound_label = str(bound)
gl, gz, gt = re.match(r"([0-9]+\.)(0*)(.*)", bound_label).groups()
gt = str(round(int(gt) / 10**(len(gt) - 1)))[0]
bound_label = gl + gz + gt
grid = GridSpecFromSubplotSpec(
num_groups,
1,
sp,
height_ratios=[
np.prod(
tp[name].shape) +
2 for name in var_names])
axes = []
for j, name in enumerate(var_names):
if len(tp[name]) == 0:
continue
ax = fig.add_subplot(grid[j], sharex=axes[0]
if len(axes) > 0 else None)
args = var_args[name]
yticks_ = []
yticklabels_ = []
for i, idx in enumerate(product(*(range(s) for s in tp[name].shape))):
yticks_.append(-i)
yticklabels_.append("{}".format(np.squeeze(idx)))
if name in rhat:
ax.plot(rhat[name], yticks_, "o", markersize=args["markersize"])
ax.set_ylim([yticks_[-1] - 1, 1])
if not yticklabels:
ax.set_yticklabels([])
elif yticklabels:
ax.set_yticklabels(yticklabels_)
else:
ax.set_yticklabels(yticklabels)
if not yticks:
ax.set_yticks([])
elif yticks:
ax.set_yticks(yticks_)
else:
ax.set_yticks(yticks)
if ylabels:
bbox = ax.get_position()
if ylabels:
label = args["label"]
else:
label = ylabels[j]
if label is None:
label = name
fig.text(bbox.x0 - 0.01, bbox.y0 + bbox.height / 2, label,
ha="right", va="center", fontsize=labelsize)
# ax.set_ylabel(label, rotation=0)
axes.append(ax)
axes[-1].set_xticks([1.0, bound])
axes[-1].set_xticklabels(["1.0", bound_label])
axes[-1].set_xlim([min(minval, 1.0) - 0.01, max(bound, maxval) + 0.01])
for ax in axes[:-1]:
for tick in ax.get_xticklabels():
tick.set_visible(False)
axes[0].set_title(title)
return axes, grid
# because the trace loading doesnt load energy stats properly...
def energyplot(
energies, fill_color=(
"C0", "C1"), fill_alpha=(
1, 0.5), fig=plt.gcf(), sp=GridSpec(
1, 1)[
:, :]):
for i, energy in enumerate(energies):
mean_energy, trans_energy = energy - energy.mean(), np.diff(energy)
ax = fig.add_subplot(sp)
pm.kdeplot(mean_energy, label="Marginal Energy", ax=ax,
shade=fill_alpha[0], kwargs_shade={"color": fill_color[0]})
pm.kdeplot(trans_energy, label="Energy Transition", ax=ax,
shade=fill_alpha[1], kwargs_shade={"color": fill_color[1]})
ax.plot([], label="chain {:>2} BFMI = {:.2f}".format(
i, pm.bfmi({"energy": energy})), alpha=0)
ax.legend()
ax.set_xticks([])
ax.set_yticks([])
# because the default forest plot is not flexible enough #sad
def forestplot(trace, var_labels=None, var_args={}, fig=plt.gcf(),
sp=GridSpec(1, 1)[:, :], combine=False, credible_interval=0.95):
if var_labels is None:
var_labels = trace.varnames
var_args = defaultdict(
lambda: {
"color": "C1",
"label": None,
"interquartile_linewidth": 2,
"credible_linewidth": 1},
**var_args)
num_groups = len(var_labels)
tp = trace.point(0)
# create indices
for i, var_label in enumerate(var_labels):
name = var_label if isinstance(var_label, str) else var_label[0]
cart = product(*(range(s) for s in tp[name].shape))
if isinstance(var_label, str):
var_labels[i] = (var_label, map(np.squeeze, cart), cart)
else:
var_labels[i] = tuple(var_label) + (cart,)
def plot_var_trace(ax, y, var_trace, credible_interval=0.95, **args):
endpoint = (1 - credible_interval) / 2
qs = np.quantile(var_trace, [endpoint, 1.0 - endpoint, 0.25, 0.75])
ax.plot(qs[:2], [y, y], color=args["color"],
linewidth=args["credible_linewidth"])
ax.plot(qs[2:], [y, y], color=args["color"],
linewidth=args["interquartile_linewidth"])
ax.plot([np.mean(var_trace)], [y], "o",
color=args["color"], markersize=args["markersize"])
grid = GridSpecFromSubplotSpec(
num_groups,
1,
sp,
height_ratios=[
np.prod(
tp[name].shape) +
2 for (
name,
idxs,
carts) in var_labels])
axes = []
for j, (name, idxs, carts) in enumerate(var_labels):
if len(tp[name]) == 0:
continue
ax = fig.add_subplot(grid[j])
args = var_args[name]
yticks = []
yticklabels = []
# plot label
# plot variable stats
for i, (idx, cart) in enumerate(zip(idxs, carts)):
yticks.append(-i)
yticklabels.append("{}".format(idx))
if combine:
var_trace = trace[name][(slice(-1),) + cart]
plot_var_trace(ax, -i, var_trace,
credible_interval=credible_interval, **args)
else:
for c, chain in enumerate(trace.chains):
var_trace = trace.get_values(name, chains=chain)[
(slice(-1),) + cart]
plot_var_trace(ax, -i + 0.25 - c / (trace.nchains - 1) * 0.5,
var_trace, credible_interval=credible_interval, **args)
ax.set_yticks(yticks)
ax.set_ylim([yticks[-1] - 1, 1])
ax.set_yticklabels(yticklabels)
label = args["label"]
if label is None:
label = name
ax.set_ylabel(label)
# ax.set_frame_on(False)
axes.append(ax)
return axes, grid
``` |
{
"source": "jhgoebbert/jupyter-jsc-notebooks",
"score": 2
} |
#### File: Interactive Widgets/soln/on_trait_change.py
```python
from ipywidgets import *
w = Text(placeholder='Search')
def handle_submit(args):
print(args['new'])
w.observe(handle_submit, names='value')
w
```
#### File: exercises/soln/remote_iter.py
```python
import ipyparallel
def remote_iterator(view, name):
"""Return an iterator on an object living on a remote engine."""
it_name = '_%s_iter' % name
view.execute('%s = iter(%s)' % (it_name,name), block=True)
ref = parallel.Reference(it_name)
while True:
try:
yield view.apply_sync(lambda x: x.next(), ref)
# This causes the StopIteration exception to be raised.
except parallel.RemoteError as e:
if e.ename == 'StopIteration':
raise StopIteration
else:
raise e
```
#### File: exercises/soln/remote_iter_slightly_better.py
```python
import ipyparallel
class remote_iterator:
"""Return an iterator on an object living on a remote engine."""
def __init__(self, view, name):
self.view = view
self.name = name
def __iter__(self):
return self
def __next__(self):
it_name = '_%s_iter' % self.name
self.view.execute('%s = iter(%s)' % (it_name, self.name), block=True)
next_ref = ipyparallel.Reference(it_name + '.next')
while True:
try:
yield self.view.apply_sync(next_ref)
# This causes the StopIteration exception to be raised.
except ipyparallel.RemoteError as e:
if e.ename == 'StopIteration':
raise StopIteration
else:
raise e
```
#### File: 001-Jupyter/002-JupyterExtensions/test.py
```python
def add(a, b):
return a+b
print(add(2,3))
``` |
{
"source": "jhgoebbert/jupyter-libertem-proxy",
"score": 2
} |
#### File: jupyter-libertem-proxy/test/test_setupcall.py
```python
def test_setupcall():
"""
Test the call of the setup function
"""
import jupyter_libertem_proxy as jx
print("\nRunning test_setupcall...")
print(jx.setup_libertem())
``` |
{
"source": "jhgoh/pylhe",
"score": 2
} |
#### File: pylhe/tests/test_api.py
```python
from sys import version_info
import pytest
import pylhe
python37plus_only = pytest.mark.skipif(
version_info < (3, 7), reason="requires Python3.7+"
)
@python37plus_only
def test_top_level_api():
assert dir(pylhe) == [
"LHEEvent",
"LHEEventInfo",
"LHEFile",
"LHEInit",
"LHEParticle",
"LHEProcInfo",
"__version__",
"loads",
"read_lhe",
"read_lhe_init",
"read_lhe_with_attributes",
"read_num_events",
"register_awkward",
"to_awkward",
"visualize",
]
@python37plus_only
def test_awkward_api():
assert dir(pylhe.awkward) == ["register_awkward", "to_awkward"]
def test_load_version():
assert pylhe.__version__
```
#### File: pylhe/tests/test_lhe_reader.py
```python
import gzip
import os
import shutil
from pathlib import Path
from tempfile import NamedTemporaryFile
import pytest
import skhep_testdata
import pylhe
TEST_FILE = skhep_testdata.data_path("pylhe-testfile-pr29.lhe")
@pytest.fixture(scope="session")
def testdata_gzip_file():
test_data = skhep_testdata.data_path("pylhe-testfile-pr29.lhe")
tmp_path = Path(NamedTemporaryFile().name)
# create what is basically pylhe-testfile-pr29.lhe.gz
with open(test_data, "rb") as readfile:
with gzip.open(tmp_path, "wb") as writefile:
shutil.copyfileobj(readfile, writefile)
yield tmp_path
# teardown
os.remove(tmp_path)
def test_gzip_open(tmpdir, testdata_gzip_file):
assert pylhe._extract_fileobj(TEST_FILE)
assert pylhe._extract_fileobj(testdata_gzip_file)
# Needs path-like object, not a fileobj
with pytest.raises(TypeError):
with open(TEST_FILE, "rb") as fileobj:
pylhe._extract_fileobj(fileobj)
with open(TEST_FILE, "rb") as fileobj:
assert isinstance(pylhe._extract_fileobj(TEST_FILE), type(fileobj))
assert isinstance(pylhe._extract_fileobj(Path(TEST_FILE)), type(fileobj))
assert isinstance(pylhe._extract_fileobj(testdata_gzip_file), gzip.GzipFile)
assert isinstance(pylhe._extract_fileobj(Path(testdata_gzip_file)), gzip.GzipFile)
def test_read_num_events(testdata_gzip_file):
assert pylhe.read_num_events(TEST_FILE) == 791
assert pylhe.read_num_events(TEST_FILE) == pylhe.read_num_events(testdata_gzip_file)
def test_lhe_init(testdata_gzip_file):
assert pylhe.read_lhe_init(TEST_FILE) == pylhe.read_lhe_init(testdata_gzip_file)
init_data = pylhe.read_lhe_init(TEST_FILE)
init_info = init_data["initInfo"]
assert init_info["beamA"] == pytest.approx(1.0)
assert init_info["beamB"] == pytest.approx(2.0)
assert init_info["energyA"] == pytest.approx(1.234567)
assert init_info["energyB"] == pytest.approx(2.345678)
assert init_info["PDFgroupA"] == pytest.approx(3.0)
assert init_info["PDFgroupB"] == pytest.approx(4.0)
assert init_info["PDFsetA"] == pytest.approx(5.0)
assert init_info["PDFsetB"] == pytest.approx(6.0)
assert init_info["weightingStrategy"] == pytest.approx(7.0)
assert init_info["numProcesses"] == pytest.approx(8.0)
def test_read_lhe(testdata_gzip_file):
assert pylhe.read_lhe(TEST_FILE)
assert pylhe.read_lhe(testdata_gzip_file)
```
#### File: pylhe/tests/test_visualize.py
```python
import itertools
import skhep_testdata
import pylhe
def test_visualize(tmpdir):
lhe_file = skhep_testdata.data_path("pylhe-testfile-pr29.lhe")
events = pylhe.read_lhe_with_attributes(lhe_file)
start_event = 1
stop_event = 2
filename = tmpdir.join(f"event{start_event}.pdf")
for idx, event in enumerate(itertools.islice(events, start_event, stop_event)):
pylhe.visualize(event, filename)
``` |
{
"source": "jhgorrell/jenkins-job-wrecker",
"score": 3
} |
#### File: jenkins-job-wrecker/jenkins_job_wrecker/helpers.py
```python
import xml.etree.ElementTree as ET
def get_bool(txt):
trues = ['true', 'True', 'Yes', 'yes', '1']
return txt in trues
def gen_raw(xml, parent):
raw = {}
raw['xml'] = ET.tostring(xml)
parent.append({'raw': raw})
``` |
{
"source": "jhh67/chapel",
"score": 3
} |
#### File: TOML/BurntSushi/test_compare.py
```python
import compare
import json
import os
def main():
json_files = [j for j in os.listdir('.') if j.endswith('.json')]
for json_file in json_files:
print('testing ', json_file)
with open(json_file, 'r') as handle:
json_data = json.load(handle)
json_string = json.dumps(json_data)
json_recycled = json.loads(json_string)
if compare.json_diff(json_data, json_recycled):
print('failed to parse json: ', json_file)
if __name__ == '__main__':
main()
```
#### File: studies/amr/regressions.py
```python
def approximatelyEqual(x,y,tolerance):
if x==y:
return True
try:
if abs(float(x) - float(y)) < tolerance:
return True
except:
return False
def approximateDiff(file_name_1, file_name_2, tolerance):
file_1 = open(file_name_1, 'r')
file_2 = open(file_name_2, 'r')
lines_1 = file_1.readlines()
lines_2 = file_2.readlines()
difference = False
any_difference = False
if not(len(lines_1) == len(lines_2)):
print "Files are of different length"
difference = True
any_difference = True
if not(difference):
for i in range(len(lines_1)):
s1 = lines_1[i].split();
s2 = lines_2[i].split();
difference = False
#==== Compare line lengths ====
if not(len(s1) == len(s2)):
print "Line ", i, ": number of elements differs"
print file_name_1, ": ", lines_1[i]
print file_name_2, ": ", lines_2[i]
difference = True
any_difference = True
#==== Compare individual elements ====
if not(difference):
for j in range(len(s1)):
difference = not(approximatelyEqual(s1[j],s2[j],tolerance))
if difference:
print "Line ", i, ", element ", j, ": elements differ"
print file_name_1, ": ", s1[j]
print file_name_2, ": ", s2[j]
return any_difference
import os
tolerance = 1e-4
base_path = os.getcwd()
advection_grid_dir = base_path + "/advection/grid"
os.chdir(advection_grid_dir)
os.system("make output")
output_file = "_output/fort.q0020"
regression_file = "regression_data/fort.q0020"
advection_grid_diff = approximateDiff(output_file, regression_file, tolerance)
os.system("make clean")
os.chdir(base_path)
advection_level_dir = base_path + "/advection/level"
os.chdir(advection_level_dir)
os.system("make output")
output_file = "_output/fort.q0020"
regression_file = "regression_data/fort.q0020"
advection_level_diff = approximateDiff(output_file, regression_file, tolerance)
os.system("make clean")
os.chdir(base_path)
advection_amr_dir = base_path + "/advection/amr"
os.chdir(advection_amr_dir)
os.system("make output")
output_file = "_output/fort.q0015"
regression_file = "regression_data/fort.q0015"
advection_amr_diff = approximateDiff(output_file, regression_file, tolerance)
os.system("make clean")
os.chdir(base_path)
diffusion_grid_dir = base_path + "/diffusion/grid"
os.chdir(diffusion_grid_dir)
os.system("make output")
output_file = "_output/fort.q0020"
regression_file = "regression_data/fort.q0020"
diffusion_grid_diff = approximateDiff(output_file, regression_file, tolerance)
os.system("make clean")
os.chdir(base_path)
diffusion_level_dir = base_path + "/diffusion/level"
os.chdir(diffusion_level_dir)
os.system("make output")
output_file = "_output/fort.q0025"
regression_file = "regression_data/fort.q0025"
diffusion_level_diff = approximateDiff(output_file, regression_file, tolerance)
os.system("make clean")
os.chdir(base_path)
if advection_grid_diff:
print "Warning: Change in advection/grid"
else:
print "No change in advection/grid"
if advection_level_diff:
print "Warning: Change in advection/level"
else:
print "No change in advection/level"
if advection_amr_diff:
print "Warning: Change in advection/amr"
else:
print "No change in advection/amr"
if diffusion_grid_diff:
print "Warning: Change in diffusion/grid"
else:
print "No change in diffusion/grid"
if diffusion_level_diff:
print "Warning: Change in diffusion/level"
else:
print "No change in diffusion/level"
```
#### File: utils/analyzer/entrypoint.py
```python
import argparse
import os
import sys
from subprocess import call, check_call, CalledProcessError
from time import sleep
from typing import List, Tuple
def main():
settings, rest = parse_arguments()
if settings.wait:
wait()
if settings.build_llvm or settings.build_llvm_only:
build_llvm()
if settings.build_llvm_only:
return
sys.exit(test(rest))
def wait():
# It is an easy on CPU way of keeping the docker container running
# while the user has a terminal session in that container.
while True:
sleep(3600)
def parse_arguments() -> Tuple[argparse.Namespace, List[str]]:
parser = argparse.ArgumentParser()
parser.add_argument('--wait', action='store_true')
parser.add_argument('--build-llvm', action='store_true')
parser.add_argument('--build-llvm-only', action='store_true')
return parser.parse_known_args()
def build_llvm():
os.chdir('/build')
try:
if is_cmake_needed():
cmake()
ninja()
except CalledProcessError:
print("Build failed!")
sys.exit(1)
def is_cmake_needed():
return "build.ninja" not in os.listdir()
CMAKE_COMMAND = "cmake -G Ninja -DCMAKE_BUILD_TYPE=Release " \
"-DCMAKE_INSTALL_PREFIX=/analyzer -DLLVM_TARGETS_TO_BUILD=X86 " \
"-DLLVM_ENABLE_PROJECTS=\"clang;openmp\" -DLLVM_BUILD_RUNTIME=OFF " \
"-DLLVM_ENABLE_TERMINFO=OFF -DCLANG_ENABLE_ARCMT=OFF " \
"-DCLANG_ENABLE_STATIC_ANALYZER=ON"
def cmake():
check_call(CMAKE_COMMAND + ' /llvm-project/llvm', shell=True)
def ninja():
check_call("ninja install", shell=True)
def test(args: List[str]) -> int:
os.chdir("/projects")
return call("/scripts/SATest.py " + " ".join(args), shell=True)
if __name__ == '__main__':
main()
```
#### File: scripts/llvm_checksum/llvm_checksum.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import logging
import re
import sys
from argparse import ArgumentParser
from project_tree import *
SVN_DATES_REGEX = re.compile(r"\$(Date|LastChangedDate)[^\$]+\$")
def main():
parser = ArgumentParser()
parser.add_argument(
"-v", "--verbose", action="store_true", help="enable debug logging")
parser.add_argument(
"-c",
"--check",
metavar="reference_file",
help="read checksums from reference_file and " +
"check they match checksums of llvm_path.")
parser.add_argument(
"--partial",
action="store_true",
help="ignore projects from reference_file " +
"that are not checked out in llvm_path.")
parser.add_argument(
"--multi_dir",
action="store_true",
help="indicates llvm_path contains llvm, checked out " +
"into multiple directories, as opposed to a " +
"typical single source tree checkout.")
parser.add_argument("llvm_path")
args = parser.parse_args()
if args.check is not None:
with open(args.check, "r") as f:
reference_checksums = ReadLLVMChecksums(f)
else:
reference_checksums = None
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
llvm_projects = CreateLLVMProjects(not args.multi_dir)
checksums = ComputeLLVMChecksums(args.llvm_path, llvm_projects)
if reference_checksums is None:
WriteLLVMChecksums(checksums, sys.stdout)
sys.exit(0)
if not ValidateChecksums(reference_checksums, checksums, args.partial):
sys.stdout.write("Checksums differ.\nNew checksums:\n")
WriteLLVMChecksums(checksums, sys.stdout)
sys.stdout.write("Reference checksums:\n")
WriteLLVMChecksums(reference_checksums, sys.stdout)
sys.exit(1)
else:
sys.stdout.write("Checksums match.")
def ComputeLLVMChecksums(root_path, projects):
"""Compute checksums for LLVM sources checked out using svn.
Args:
root_path: a directory of llvm checkout.
projects: a list of LLVMProject instances, which describe checkout paths,
relative to root_path.
Returns:
A dict mapping from project name to project checksum.
"""
hash_algo = hashlib.sha256
def collapse_svn_substitutions(contents):
# Replace svn substitutions for $Date$ and $LastChangedDate$.
# Unfortunately, these are locale-specific.
return SVN_DATES_REGEX.sub("$\1$", contents)
def read_and_collapse_svn_subsitutions(file_path):
with open(file_path, "rb") as f:
contents = f.read()
new_contents = collapse_svn_substitutions(contents)
if contents != new_contents:
logging.debug("Replaced svn keyword substitutions in %s", file_path)
logging.debug("\n\tBefore\n%s\n\tAfter\n%s", contents, new_contents)
return new_contents
project_checksums = dict()
# Hash each project.
for proj in projects:
project_root = os.path.join(root_path, proj.relpath)
if not os.path.exists(project_root):
logging.info("Folder %s doesn't exist, skipping project %s", proj.relpath,
proj.name)
continue
files = list()
def add_file_hash(file_path):
if os.path.islink(file_path) and not os.path.exists(file_path):
content = os.readlink(file_path)
else:
content = read_and_collapse_svn_subsitutions(file_path)
hasher = hash_algo()
hasher.update(content)
file_digest = hasher.hexdigest()
logging.debug("Checksum %s for file %s", file_digest, file_path)
files.append((file_path, file_digest))
logging.info("Computing checksum for %s", proj.name)
WalkProjectFiles(root_path, projects, proj, add_file_hash)
# Compute final checksum.
files.sort(key=lambda x: x[0])
hasher = hash_algo()
for file_path, file_digest in files:
file_path = os.path.relpath(file_path, project_root)
hasher.update(file_path)
hasher.update(file_digest)
project_checksums[proj.name] = hasher.hexdigest()
return project_checksums
def WriteLLVMChecksums(checksums, f):
"""Writes checksums to a text file.
Args:
checksums: a dict mapping from project name to project checksum (result of
ComputeLLVMChecksums).
f: a file object to write into.
"""
for proj in sorted(checksums.keys()):
f.write("{} {}\n".format(checksums[proj], proj))
def ReadLLVMChecksums(f):
"""Reads checksums from a text file, produced by WriteLLVMChecksums.
Returns:
A dict, mapping from project name to project checksum.
"""
checksums = {}
while True:
line = f.readline()
if line == "":
break
checksum, proj = line.split()
checksums[proj] = checksum
return checksums
def ValidateChecksums(reference_checksums,
new_checksums,
allow_missing_projects=False):
"""Validates that reference_checksums and new_checksums match.
Args:
reference_checksums: a dict of reference checksums, mapping from a project
name to a project checksum.
new_checksums: a dict of checksums to be checked, mapping from a project
name to a project checksum.
allow_missing_projects:
When True, reference_checksums may contain more projects than
new_checksums. Projects missing from new_checksums are ignored.
When False, new_checksums and reference_checksums must contain checksums
for the same set of projects. If there is a project in
reference_checksums, missing from new_checksums, ValidateChecksums
will return False.
Returns:
True, if checksums match with regards to allow_missing_projects flag value.
False, otherwise.
"""
if not allow_missing_projects:
if len(new_checksums) != len(reference_checksums):
return False
for proj, checksum in new_checksums.items():
# We never computed a checksum for this project.
if proj not in reference_checksums:
return False
# Checksum did not match.
if reference_checksums[proj] != checksum:
return False
return True
if __name__ == "__main__":
main()
```
#### File: utils/git/pre-push.py
```python
import argparse
import collections
import os
import re
import shutil
import subprocess
import sys
import time
import getpass
from shlex import quote
VERBOSE = False
QUIET = False
dev_null_fd = None
z40 = '0000000000000000000000000000000000000000'
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def log(*args, **kwargs):
if QUIET:
return
print(*args, **kwargs)
def log_verbose(*args, **kwargs):
if not VERBOSE:
return
print(*args, **kwargs)
def die(msg):
eprint(msg)
sys.exit(1)
def ask_confirm(prompt):
while True:
query = input('%s (y/N): ' % (prompt))
if query.lower() not in ['y', 'n', '']:
print('Expect y or n!')
continue
return query.lower() == 'y'
def get_dev_null():
"""Lazily create a /dev/null fd for use in shell()"""
global dev_null_fd
if dev_null_fd is None:
dev_null_fd = open(os.devnull, 'w')
return dev_null_fd
def shell(cmd, strip=True, cwd=None, stdin=None, die_on_failure=True,
ignore_errors=False, text=True, print_raw_stderr=False):
# Escape args when logging for easy repro.
quoted_cmd = [quote(arg) for arg in cmd]
cwd_msg = ''
if cwd:
cwd_msg = ' in %s' % cwd
log_verbose('Running%s: %s' % (cwd_msg, ' '.join(quoted_cmd)))
err_pipe = subprocess.PIPE
if ignore_errors:
# Silence errors if requested.
err_pipe = get_dev_null()
start = time.time()
p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=err_pipe,
stdin=subprocess.PIPE,
universal_newlines=text)
stdout, stderr = p.communicate(input=stdin)
elapsed = time.time() - start
log_verbose('Command took %0.1fs' % elapsed)
if p.returncode == 0 or ignore_errors:
if stderr and not ignore_errors:
if not print_raw_stderr:
eprint('`%s` printed to stderr:' % ' '.join(quoted_cmd))
eprint(stderr.rstrip())
if strip:
if text:
stdout = stdout.rstrip('\r\n')
else:
stdout = stdout.rstrip(b'\r\n')
if VERBOSE:
for l in stdout.splitlines():
log_verbose('STDOUT: %s' % l)
return stdout
err_msg = '`%s` returned %s' % (' '.join(quoted_cmd), p.returncode)
eprint(err_msg)
if stderr:
eprint(stderr.rstrip())
if die_on_failure:
sys.exit(2)
raise RuntimeError(err_msg)
def git(*cmd, **kwargs):
return shell(['git'] + list(cmd), **kwargs)
def get_revs_to_push(range):
commits = git('rev-list', range).splitlines()
# Reverse the order so we print the oldest commit first
commits.reverse()
return commits
def handle_push(args, local_ref, local_sha, remote_ref, remote_sha):
'''Check a single push request (which can include multiple revisions)'''
log_verbose('Handle push, reproduce with '
'`echo %s %s %s %s | pre-push.py %s %s'
% (local_ref, local_sha, remote_ref, remote_sha, args.remote,
args.url))
# Handle request to delete
if local_sha == z40:
if not ask_confirm('Are you sure you want to delete "%s" on remote "%s"?' % (remote_ref, args.url)):
die("Aborting")
return
# Push a new branch
if remote_sha == z40:
if not ask_confirm('Are you sure you want to push a new branch/tag "%s" on remote "%s"?' % (remote_ref, args.url)):
die("Aborting")
range=local_sha
return
else:
# Update to existing branch, examine new commits
range='%s..%s' % (remote_sha, local_sha)
# Check that the remote commit exists, otherwise let git proceed
if "commit" not in git('cat-file','-t', remote_sha, ignore_errors=True):
return
revs = get_revs_to_push(range)
if not revs:
# This can happen if someone is force pushing an older revision to a branch
return
# Print the revision about to be pushed commits
print('Pushing to "%s" on remote "%s"' % (remote_ref, args.url))
for sha in revs:
print(' - ' + git('show', '--oneline', '--quiet', sha))
if len(revs) > 1:
if not ask_confirm('Are you sure you want to push %d commits?' % len(revs)):
die('Aborting')
for sha in revs:
msg = git('log', '--format=%B', '-n1', sha)
if 'Differential Revision' not in msg:
continue
for line in msg.splitlines():
for tag in ['Summary', 'Reviewers', 'Subscribers', 'Tags']:
if line.startswith(tag + ':'):
eprint('Please remove arcanist tags from the commit message (found "%s" tag in %s)' % (tag, sha[:12]))
if len(revs) == 1:
eprint('Try running: llvm/utils/git/arcfilter.sh')
die('Aborting (force push by adding "--no-verify")')
return
if __name__ == '__main__':
if not shutil.which('git'):
die('error: cannot find git command')
argv = sys.argv[1:]
p = argparse.ArgumentParser(
prog='pre-push', formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
verbosity_group = p.add_mutually_exclusive_group()
verbosity_group.add_argument('-q', '--quiet', action='store_true',
help='print less information')
verbosity_group.add_argument('-v', '--verbose', action='store_true',
help='print more information')
p.add_argument('remote', type=str, help='Name of the remote')
p.add_argument('url', type=str, help='URL for the remote')
args = p.parse_args(argv)
VERBOSE = args.verbose
QUIET = args.quiet
lines = sys.stdin.readlines()
sys.stdin = open('/dev/tty', 'r')
for line in lines:
local_ref, local_sha, remote_ref, remote_sha = line.split()
handle_push(args, local_ref, local_sha, remote_ref, remote_sha)
```
#### File: lit/lit/LitTestCase.py
```python
import unittest
import lit.discovery
import lit.LitConfig
import lit.worker
"""
TestCase adaptor for providing a Python 'unittest' compatible interface to 'lit'
tests.
"""
class UnresolvedError(RuntimeError):
pass
class LitTestCase(unittest.TestCase):
def __init__(self, test, lit_config):
unittest.TestCase.__init__(self)
self._test = test
self._lit_config = lit_config
def id(self):
return self._test.getFullName()
def shortDescription(self):
return self._test.getFullName()
def runTest(self):
# Run the test.
result = lit.worker._execute(self._test, self._lit_config)
# Adapt the result to unittest.
if result.code is lit.Test.UNRESOLVED:
raise UnresolvedError(result.output)
elif result.code.isFailure:
self.fail(result.output)
def load_test_suite(inputs):
import platform
windows = platform.system() == 'Windows'
# Create the global config object.
lit_config = lit.LitConfig.LitConfig(
progname='lit',
path=[],
quiet=False,
useValgrind=False,
valgrindLeakCheck=False,
valgrindArgs=[],
noExecute=False,
debug=False,
isWindows=windows,
params={})
# Perform test discovery.
tests = lit.discovery.find_tests_for_inputs(lit_config, inputs, False)
test_adaptors = [LitTestCase(t, lit_config) for t in tests]
# Return a unittest test suite which just runs the tests in order.
return unittest.TestSuite(test_adaptors)
```
#### File: re2-src/benchlog/benchplot.py
```python
import argparse # for ArgumentParser
import subprocess # for Popen
import tempfile # for NamedTemporaryFile
import os # for remove
class gnuplot(object):
output = "result.png"
script = """
set terminal png size 1024, 768
set output "{}.png"
set title "re2 benchlog"
set datafile separator ";"
set grid x y
set ylabel "MB/s"
set autoscale
plot """
template = """'{}' using 1:5:xticlabels(2) with linespoints linewidth 3 title "{}",\\\n"""
benchdata = dict()
tempfiles = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""
remove all temporary files
"""
for filename in self.tempfiles:
os.remove(filename)
def parse_re2_benchlog(self, filename):
"""
parse the input benchlog and return a dictionary contain bench data
"""
benchdata = self.benchdata
with open(filename) as f:
for raw in f.readlines():
data = raw.split('\t')
if len(data) == 4:
data = data[0].split('/') + data[1:]
data = list(map(str.strip, data))
if not benchdata.get(data[0]):
benchdata[data[0]] = [ data[1:] ]
else:
benchdata[data[0]].append(data[1:])
def gen_csv(self):
"""
generate temporary csv files
"""
for name, data in self.benchdata.items():
with tempfile.NamedTemporaryFile(delete=False) as f:
for index, line in enumerate(data):
f.write('{};{}\n'.format(index, ';'.join(line)).encode())
self.tempfiles.append(f.name)
self.script = self.script + self.template.format(f.name, name)
def run(self):
self.gen_csv()
script = self.script[:-3].format(self.output)
command = subprocess.Popen(['gnuplot'], stdin=subprocess.PIPE)
command.communicate(script.encode())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='generate plots for benchlog')
parser.add_argument('benchlog', type=str, help='benchlog generated by re2')
args = parser.parse_args()
try:
subprocess.Popen(['gnuplot'], stdin=subprocess.PIPE)
except FileNotFoundError:
print('you can install "gnuplot" to generate plots automatically')
exit(1)
with gnuplot() as plot:
plot.output = args.benchlog
plot.parse_re2_benchlog(args.benchlog)
plot.run()
```
#### File: util/chplenv/chpl_comm.py
```python
import sys
import chpl_platform, overrides
from utils import memoize
@memoize
def get():
comm_val = overrides.get('CHPL_COMM')
if not comm_val:
platform_val = chpl_platform.get('target')
# Use ugni on cray-xc series
if platform_val == 'cray-xc':
comm_val = 'ugni'
# Use ofi on hpe-cray-ex
elif platform_val == 'hpe-cray-ex':
comm_val = 'ofi'
# Use gasnet on cray-cs and hpe-apollo
elif platform_val in ('cray-cs', 'hpe-apollo'):
comm_val = 'gasnet'
else:
comm_val = 'none'
return comm_val
def _main():
comm_val = get()
sys.stdout.write("{0}\n".format(comm_val))
if __name__ == '__main__':
_main()
```
#### File: util/chplenv/chpl_compiler.py
```python
import optparse
import os
import sys
from distutils.spawn import find_executable
import chpl_platform, chpl_locale_model, overrides
from utils import error, memoize, warning
#
# If we can't find a file $CHPL_HOME/make/Makefile.<compiler_val>,
# that suggests that this is a compiler that we're not familiar with.
# In practice, this will cause our Makefiles to use defaults like CC
# and CXX to compile things, for better or worse.
#
@memoize
def validate_compiler(compiler_val):
if compiler_val != 'llvm':
import chpl_home_utils
chpl_home = chpl_home_utils.get_chpl_home()
comp_makefile = os.path.join(chpl_home, 'make', 'compiler', 'Makefile.{0}'.format(compiler_val))
if not os.path.isfile(comp_makefile):
warning('Unknown compiler: "{0}"'.format(compiler_val))
@memoize
def get_prgenv_compiler():
platform_val = chpl_platform.get('target')
if platform_val.startswith('cray-x') or platform_val == 'hpe-cray-ex':
subcompiler = os.environ.get('PE_ENV', 'none')
if subcompiler != 'none':
return "cray-prgenv-{0}".format(subcompiler.lower())
else:
warning("Compiling on {0} without a PrgEnv loaded".format(platform_val))
return 'none'
# Don't use CC / CXX to set other variables if any of
# CHPL_HOST_COMPILER
# CHPL_HOST_CC
# CHPL_HOST_CXX
# CHPL_TARGET_COMPILER
# CHPL_TARGET_CC
# CHPL_TARGET_CXX
# are overridden by the user (in config file or env vars).
#
# Additionally, for the target compiler, don't use CC / CXX
# if we would like to default to LLVM.
@memoize
def should_consider_cc_cxx(flag):
default_llvm = default_to_llvm(flag)
if default_llvm:
return False
if (overrides.get('CHPL_HOST_COMPILER') != None or
overrides.get('CHPL_HOST_CC') != None or
overrides.get('CHPL_HOST_CXX') != None or
overrides.get('CHPL_TARGET_COMPILER') != None or
overrides.get('CHPL_TARGET_CC') != None or
overrides.get('CHPL_TARGET_CXX') != None):
# A compilation configuration setting was adjusted,
# so require CHPL_HOST_CC etc rather than using CC
return False
if flag == 'target' and get_prgenv_compiler() != 'none':
# On an XC etc with a PrgEnv compiler,
# setting CC/CXX should only impact the host compiler.
return False
return True
# Figures out the compiler family (e.g. gnu) from the CC/CXX enviro vars
# Returns '' if CC / CXX are not set and 'unknown' if they are set
# to something too complex.
@memoize
def get_compiler_from_cc_cxx():
cc_compiler = 'unknown'
cxx_compiler = 'unknown'
warn = False
compiler_val = 'unknown'
cc_val = overrides.get('CC', '')
cxx_val = overrides.get('CXX', '')
if cc_val == '' and cxx_val == '':
return ''
if cc_val:
cc_compiler = get_compiler_from_command(cc_val)
if cxx_val:
cxx_compiler = get_compiler_from_command(cxx_val)
if cc_val and cxx_val:
if cc_compiler == cxx_compiler:
compiler_val = cc_compiler
else:
error("Conflicting compiler families for CC and CXX settings\n"
" {0} -> {1}\n"
" {2} -> {3}\n"
"Set CHPL_HOST_COMPILER and CHPL_TARGET_COMPILER to the "
"desired compiler family".format(cc_val, cc_compiler,
cxx_val, cxx_compiler))
compiler_val = 'unknown'
else:
# if we get here, CC or CXX is provided, but not both.
# Usually we warn in that case.
# Check to see if the command name matches the default
# for the compiler family.
# In that event, omit the warning.
if cc_val:
compiler_val = cc_compiler
warn = (get_compiler_name_c(compiler_val) != cc_val)
if cxx_val:
compiler_val = cxx_compiler
warn = (get_compiler_name_cxx(compiler_val) != cxx_val)
if compiler_val == 'unknown':
error("Could not infer CHPL_TARGET_COMPILER from "
"CC={0} CXX={1}".format(cc_val, cxx_val));
else:
if warn and cc_val:
error('CC is set but not CXX -- please set both\n')
if warn and cxx_val:
error('CXX is set but not CC -- please set both\n')
return compiler_val
# Returns True if the compiler defaults to LLVM
def default_to_llvm(flag):
ret = False
if flag == 'target':
import chpl_llvm
has_llvm = chpl_llvm.get()
if has_llvm == 'bundled' or has_llvm == 'system':
# Default to CHPL_TARGET_COMPILER=llvm when CHPL_LLVM!=none
ret = True
return ret
@memoize
def get(flag='host'):
if flag == 'host':
compiler_val = overrides.get('CHPL_HOST_COMPILER', '')
elif flag == 'target':
compiler_val = overrides.get('CHPL_TARGET_COMPILER', '')
else:
error("Invalid flag: '{0}'".format(flag), ValueError)
default_llvm = False
if not compiler_val:
default_llvm = default_to_llvm(flag)
# If allowable, look at CC/CXX
if should_consider_cc_cxx(flag):
compiler_val = get_compiler_from_cc_cxx()
if compiler_val:
validate_compiler(compiler_val)
return compiler_val
prgenv_compiler = get_prgenv_compiler()
if default_llvm:
compiler_val = 'llvm'
elif prgenv_compiler != 'none':
# The cray platforms are a special case in that we want to
# "cross-compile" by default. (the compiler is different between host
# and target, but the platform is the same).
if flag == 'host':
compiler_val = 'gnu'
else:
compiler_val = prgenv_compiler
else:
platform_val = chpl_platform.get(flag)
locale_model_val = chpl_locale_model.get()
# Normal compilation (not "cross-compiling")
# inherit the host compiler if the target compiler is not set and
# the host and target platforms are the same
if flag == 'target':
if chpl_platform.get('host') == platform_val:
compiler_val = get('host')
elif platform_val.startswith('pwr'):
compiler_val = 'ibm'
elif platform_val == 'darwin' or platform_val == 'freebsd':
if find_executable('clang'):
compiler_val = 'clang'
else:
compiler_val = 'gnu'
elif locale_model_val == 'gpu':
if find_executable('clang'):
compiler_val = 'clang'
else:
error("clang not found. The 'gpu' locale model is supported "
"with clang only.")
else:
compiler_val = 'gnu'
validate_compiler(compiler_val)
return compiler_val
@memoize
def get_path_component(flag='host'):
val = get(flag=flag)
if val == 'clang' and flag == 'target':
import chpl_llvm
has_llvm = chpl_llvm.get()
if has_llvm == 'bundled':
# selecting the included clang - distinguish that
# with 'llvm' in the path component.
val = 'llvm'
return get(flag)
# This array consists of tuples of
# ( family-name, c-compilation-command-name, c++compilation-command-name)
# where family-name corresponds to CHPL_TARGET_COMPILER etc settings e.g. gnu.
# This table only includes the cases where it is reasonable to
# infer the family from the command name.
COMPILERS = [ ('gnu', 'gcc', 'g++'),
('clang', 'clang', 'clang++'),
('ibm', 'xlc', 'xlC'),
('intel', 'icc', 'icpc'),
('pgi', 'pgicc', 'pgc++') ]
# given a compiler command string, (e.g. "gcc" or "/path/to/clang++"),
# figure out the compiler family (e.g. gnu or clang),
# and the C and C++ variants of that command
def get_compiler_from_command(command):
# the following adjustments are to handle a command like
# /path/to/gcc-10 --some-option
# where we are looking for just the 'gcc' part.
first = command.split()[0]
basename = os.path.basename(first)
name = basename.split('-')[0].strip()
for tup in COMPILERS:
if name == tup[1] or name == tup[2]:
return tup[0]
# if it was not one of the above cases we don't know how to
# go from the command name to the compiler family.
# E.g. cc/CC/mpicc could be many compilers.
#
# We could consider trying to run it to figure it out.
return 'unknown'
def get_compiler_name_c(compiler):
for tup in COMPILERS:
if compiler == tup[0]:
return tup[1]
# handle special cases not in the COMPILERS table
if compiler_is_prgenv(compiler):
return 'cc'
elif compiler in ['llvm', 'allinea']:
return 'clang'
elif compiler == 'mpi-gnu':
return 'mpicc'
elif 'gnu' in compiler:
return 'gcc'
return 'unknown-c-compiler'
def get_compiler_name_cxx(compiler):
for tup in COMPILERS:
if compiler == tup[0]:
return tup[2]
# handle special cases not in the COMPILERS table
if compiler_is_prgenv(compiler):
return 'CC'
elif compiler in ['llvm', 'allinea']:
return 'clang++'
elif compiler == 'mpi-gnu':
return 'mpicxx'
elif 'gnu' in compiler:
return 'g++'
return 'unknown-c++-compiler'
def compiler_is_prgenv(compiler_val):
return compiler_val.startswith('cray-prgenv')
# flag should be host or target
# lang should be c or cxx (aka c++)
# this function returns an array of arguments
# e.g. ['clang', '--gcc-toolchain=/usr']
@memoize
def get_compiler_command(flag, lang):
flag_upper = flag.upper()
lang_upper = lang.upper()
if lang_upper == 'C++':
lang_upper = 'CXX'
elif lang_upper == 'C':
lang_upper = 'CC'
if flag_upper == 'HOST' or flag_upper == 'TARGET':
pass
else:
error('unknown flag {0}'.format(flag))
if lang_upper == 'CC' or lang_upper == 'CXX':
pass
else:
error('unknown lang {0}'.format(lang))
# construct CHPL_HOST_CC / CHPL_TARGET_CXX etc
varname = 'CHPL_' + flag_upper + '_' + lang_upper
command = overrides.get(varname, '');
if command:
return command.split()
compiler_val = get(flag=flag)
# If other settings allow it, look also at CC/CXX.
if should_consider_cc_cxx(flag):
cc_cxx_val = overrides.get(lang_upper, '')
if cc_cxx_val:
return cc_cxx_val.split()
if lang_upper == 'CC':
command = [get_compiler_name_c(compiler_val)]
elif lang_upper == 'CXX':
command = [get_compiler_name_cxx(compiler_val)]
# Adjust the path in two situations:
# CHPL_TARGET_COMPILER=llvm -- means use the selected llvm/clang
# CHPL_TARGET_COMPILER=clang with CHPL_LLVM=bundled -- use bundled clang
if compiler_val == 'clang' or compiler_val == 'llvm':
import chpl_llvm
llvm_val = chpl_llvm.get()
if llvm_val == 'none' and compiler_val == 'llvm':
error("Cannot use CHPL_TARGET_COMPILER=llvm when CHPL_LLVM=none")
if llvm_val == 'bundled' or compiler_val == 'llvm':
if (flag == 'host' and
llvm_val == 'bundled' and
compiler_val == 'clang'):
# don't change the prefix in this setting
# (bundled LLVM might not be built yet)
pass
else:
command = chpl_llvm.get_llvm_clang(lang_upper)
return command
# Returns any -I options needed to find bundled headers
#
# Can include other compiler args but *needs to work both
# for C and C++ compilation*.
#
# flag should be host or target.
# returns a Python list of -I flags
@memoize
def get_bundled_compile_args(flag):
paths = [ ]
# TODO - port over third-party arg gathering
return paths
# Returns any -I options needed for this compiler / system
# to find headers
#
# Can include other compiler args but *needs to work both
# for C and C++ compilation*.
#
# flag should be host or target.
# returns a Python list of -I flags
@memoize
def get_system_compile_args(flag):
platform_val = chpl_platform.get(flag)
compiler_val = get(flag)
paths = [ ]
# For PrgEnv compilation with LLVM, gather arguments from PrgEnv driver
if compiler_val == 'llvm' and flag == 'target':
import chpl_llvm
(comp_args, link_args) = chpl_llvm.get_clang_prgenv_args()
paths.extend(comp_args)
# FreeBSD uses /usr/local but compilers don't search there by default
if platform_val == 'freebsd':
paths.append('-I/usr/local/include')
# Add Homebrew include directory if Homebrew is installed
homebrew_prefix = chpl_platform.get_homebrew_prefix()
if homebrew_prefix:
paths.append('-I' + homebrew_prefix + '/include')
return paths
# Returns any -L options needed to find bundled libraries
#
# Can include other link args but *needs to work both
# for C and C++ compilation*.
#
# flag should be host or target.
# returns a Python list of -L flags
@memoize
def get_bundled_link_args(flag):
paths = [ ]
# TODO - port over third-party arg gathering
return paths
# Returns any -L options needed for this compiler / system
# to find libraries
#
# Can include other link args but *needs to work both
# for C and C++ compilation*.
#
# flag should be host or target.
# returns a Python list of -L flags
@memoize
def get_system_link_args(flag):
platform_val = chpl_platform.get(flag)
compiler_val = get(flag)
paths = [ ]
# For PrgEnv compilation with LLVM, gather arguments from PrgEnv driver
if compiler_val == 'llvm' and flag == 'target':
import chpl_llvm
(comp_args, link_args) = chpl_llvm.get_clang_prgenv_args()
paths.extend(link_args)
# FreeBSD uses /usr/local but compilers don't search there by default
if platform_val == 'freebsd':
paths.append('-L/usr/local/lib')
# Add Homebrew lib directory if Homebrew is installed
homebrew_prefix = chpl_platform.get_homebrew_prefix()
if homebrew_prefix:
paths.append('-L' + homebrew_prefix + '/lib')
return paths
def validate_inference_matches(flag, lang):
flag_upper = flag.upper()
lang_upper = lang.upper()
if lang_upper == 'C++':
lang_upper = 'CXX'
elif lang_upper == 'C':
lang_upper = 'CC'
compiler = get(flag)
cmd = get_compiler_command(flag, lang)
inferred = get_compiler_from_command(cmd[0])
if (inferred != 'unknown' and
inferred != compiler and
not (compiler == 'llvm' and inferred == 'clang')):
error("Conflicting compiler families: "
"CHPL_{0}_COMPILER={1} but CHPL_{0}_{2}={3} but has family {4}"
.format(flag_upper, compiler, lang_upper, cmd, inferred))
return False
return True
# Issue an error if, after all the various inferences are done,
# CHPL_HOST_CC / CXX is inconsintent with CHPL_HOST_COMPILER
# and similarly for TARGET variants.
@memoize
def validate_compiler_settings():
validate_inference_matches('host', 'c')
validate_inference_matches('host', 'c++')
validate_inference_matches('target', 'c')
validate_inference_matches('target', 'c++')
def _main():
parser = optparse.OptionParser(usage='usage: %prog [--host|target])')
parser.add_option('--host', dest='flag', action='store_const',
const='host', default='host')
parser.add_option('--target', dest='flag', action='store_const',
const='target')
(options, args) = parser.parse_args()
compiler_val = get(options.flag)
sys.stdout.write("{0}\n".format(compiler_val))
if __name__ == '__main__':
_main()
```
#### File: util/chplenv/chpl_home_utils.py
```python
import optparse
import os
import re
import sys
import chpl_bin_subdir, chpl_python_version, overrides
from utils import memoize
@memoize
def get_chpl_home():
chpl_home = overrides.get('CHPL_HOME', '')
if not chpl_home:
dirname = os.path.dirname
chpl_home = dirname(dirname(dirname(os.path.realpath(__file__))))
return chpl_home
@memoize
def get_chpl_runtime_incl():
default = os.path.join(get_chpl_home(), 'runtime', 'include')
chpl_runtime_incl = overrides.get('CHPL_RUNTIME_INCL', default)
return chpl_runtime_incl
@memoize
def get_chpl_runtime_lib():
default = os.path.join(get_chpl_home(), 'lib')
chpl_runtime_lib = overrides.get('CHPL_RUNTIME_LIB', default)
return chpl_runtime_lib
@memoize
def get_chpl_third_party():
default = os.path.join(get_chpl_home(), 'third-party')
chpl_third_party = overrides.get('CHPL_THIRD_PARTY', default)
return chpl_third_party
# Fix paths in the passed string according to the provided list of pairs
# in tofix
def add_vars_to_paths_helper(s, tofix):
lines = s.splitlines()
output = [ ]
for line in lines:
# Find things that look like absolute paths
# Note that this needs to handle e.g. -I/some/directory
# (and not think it's a relative path starting with I)
pattern = r'/[^ ]+'
fixed = line
for m in re.findall(pattern, line):
origpath = m
path = os.path.realpath(origpath)
if os.path.isfile(path):
# Leave out the filename, since we're only trying to replace
# directories. This prevents clang++ from being replaced
# with e.g. clang-6.0 when clang++ is a symbolic link.
origpath = os.path.dirname(origpath)
path = os.path.dirname(path)
for kv in tofix:
key = kv[0]
val = kv[1];
if path.startswith(val):
rel = os.path.relpath(path, val)
fixed = fixed.replace(origpath, key + "/" + rel)
break
output.append(fixed)
return "\n".join(output)
# Fix paths in the passed string to use $CHPL_HOME, $CHPL_THIRD_PARTY, etc
def add_vars_to_paths(s):
tofix = [ ]
tofix.append( ['$CHPL_RUNTIME_LIB', get_chpl_runtime_lib()] )
tofix.append( ['$CHPL_RUNTIME_INCL', get_chpl_runtime_incl()] )
tofix.append( ['$CHPL_THIRD_PARTY', get_chpl_third_party()] )
tofix.append( ['$CHPL_HOME', get_chpl_home()] )
return add_vars_to_paths_helper(s, tofix)
# Get the chpl-venv install directory:
# $CHPL_HOME/third-party/chpl-venv/install/chpldeps
@memoize
def get_chpldeps():
chpl_venv = os.path.join(get_chpl_third_party(), 'chpl-venv',
'install', 'chpldeps')
return chpl_venv
@memoize
def using_chapel_module():
chpl_home = overrides.get('CHPL_HOME', None)
chpl_module_home = os.environ.get('CHPL_MODULE_HOME', None)
if chpl_home and chpl_module_home:
return os.path.normpath(chpl_home) == os.path.normpath(chpl_module_home)
return False
def _main():
parser = optparse.OptionParser()
parser.add_option('--home', action='store_const',
dest='func', const=get_chpl_home)
parser.add_option('--third-party', action='store_const',
dest='func', const=get_chpl_third_party)
parser.add_option('--chpldeps', action='store_const',
dest='func', const=get_chpldeps)
parser.add_option('--using-module', action='store_const',
dest='func', const=using_chapel_module)
(options, args) = parser.parse_args()
if options.func:
sys.stdout.write("{0}\n".format(options.func()))
if __name__ == '__main__':
_main()
```
#### File: util/chplenv/chpl_launcher.py
```python
from distutils.spawn import find_executable
import sys
import chpl_comm, chpl_comm_substrate, chpl_platform, overrides
from utils import error, memoize, warning
def slurm_prefix(base_launcher, platform_val):
""" If salloc is available and we're on a cray-cs/hpe-apollo, prefix with slurm-"""
if platform_val in ('cray-cs', 'hpe-apollo') and find_executable('salloc'):
return 'slurm-{}'.format(base_launcher)
return base_launcher
@memoize
def get():
launcher_val = overrides.get('CHPL_LAUNCHER')
comm_val = chpl_comm.get()
substrate_val = chpl_comm_substrate.get()
if comm_val == 'gasnet' and substrate_val == 'udp':
if not launcher_val:
launcher_val = 'amudprun'
elif launcher_val not in ('none', 'amudprun'):
error('CHPL_LAUNCHER={} is not supported for CHPL_COMM=gasnet '
'CHPL_COMM_SUBSTRATE=udp, CHPL_LAUNCHER=amudprun is '
'required'.format(launcher_val))
if not launcher_val:
platform_val = chpl_platform.get('target')
if platform_val.startswith('cray-x') or platform_val.startswith('hpe-cray-'):
has_aprun = find_executable('aprun')
has_slurm = find_executable('srun')
if has_aprun and has_slurm:
launcher_val = 'none'
elif has_aprun:
launcher_val = 'aprun'
elif has_slurm:
launcher_val = 'slurm-srun'
else:
# FIXME: Need to detect aprun/srun differently. On a cray
# system with an eslogin node, it is possible that aprun
# will not be available on the eslogin node (only on the
# login node).
#
# has_aprun and has_slurm should look other places
# (maybe the modules?) to decide.
# (thomasvandoren, 2014-08-12)
warning('Cannot detect launcher on this system. Please '
'set CHPL_LAUNCHER in the environment.')
elif comm_val == 'gasnet':
if substrate_val == 'smp':
launcher_val = 'smp'
elif substrate_val == 'mpi':
launcher_val = slurm_prefix('gasnetrun_mpi', platform_val)
elif substrate_val == 'ibv':
launcher_val = slurm_prefix('gasnetrun_ibv', platform_val)
elif substrate_val == 'ucx':
launcher_val = slurm_prefix('gasnetrun_ucx', platform_val)
elif substrate_val == 'ofi':
launcher_val = slurm_prefix('gasnetrun_ofi', platform_val)
else:
if platform_val in ('cray-cs', 'hpe-apollo') and find_executable('srun'):
launcher_val = 'slurm-srun'
else:
launcher_val = 'none'
if launcher_val is None:
launcher_val = 'none'
return launcher_val
def _main():
launcher_val = get()
sys.stdout.write("{0}\n".format(launcher_val))
if __name__ == '__main__':
_main()
```
#### File: util/chplenv/chpl_libfabric.py
```python
import sys
import glob
import os
import chpl_comm, chpl_comm_debug, chpl_launcher, chpl_platform
import overrides, third_party_utils
from utils import error, memoize, try_run_command, warning
@memoize
def get():
comm_val = chpl_comm.get()
if comm_val == 'ofi':
libfabric_val = overrides.get('CHPL_LIBFABRIC')
platform_val = chpl_platform.get('target')
if not libfabric_val:
cmd_exists, returncode = try_run_command(['pkg-config',
'--exists',
'libfabric'])[0:2]
if cmd_exists and returncode == 0:
libfabric_val = 'system'
else:
libfabric_val = 'bundled'
if libfabric_val == 'none':
error("CHPL_LIBFABRIC must not be 'none' when CHPL_COMM is ofi")
if platform_val == 'hpe-cray-ex' and libfabric_val != 'system':
warning('CHPL_LIBFABRIC!=system is discouraged on HPE Cray EX')
else:
libfabric_val = 'none'
return libfabric_val
@memoize
def get_uniq_cfg_path():
base_uniq_cfg = third_party_utils.default_uniq_cfg_path()
if chpl_comm_debug.get() == 'debug':
suffix = '-debug'
else:
suffix = ''
return base_uniq_cfg + suffix
# returns 2-tuple of lists
# (compiler_bundled_args, compiler_system_args)
@memoize
def get_compile_args():
args = ([ ], [ ])
libfabric_val = get()
if libfabric_val == 'bundled':
ucp_val = get_uniq_cfg_path()
args = third_party_utils.get_bundled_compile_args('libfabric',
ucp=ucp_val)
elif libfabric_val == 'system':
flags = [ ]
# Allow overriding pkg-config via LIBFABRIC_DIR, for platforms
# without pkg-config.
libfab_dir_val = overrides.get('LIBFABRIC_DIR')
if libfab_dir_val:
args[1].append('-I' + libfab_dir_val + '/include')
else:
# Try using pkg-config to get the compile-time flags.
x = third_party_utils.pkgconfig_get_system_compile_args('libfabric')
args = x
if libfabric_val == 'system' or libfabric_val == 'bundled':
flags = [ ]
launcher_val = chpl_launcher.get()
ofi_oob_val = overrides.get_environ('CHPL_COMM_OFI_OOB')
if 'mpi' in launcher_val or ( ofi_oob_val and 'mpi' in ofi_oob_val ):
mpi_dir_val = overrides.get_environ('MPI_DIR')
if mpi_dir_val:
flags.append('-I' + mpi_dir_val + '/include')
args[1].extend(flags)
return args
# returns 2-tuple of lists
# (linker_bundled_args, linker_system_args)
@memoize
def get_link_args():
args = ([ ], [ ])
libfabric_val = get()
if libfabric_val == 'bundled':
args = third_party_utils.get_bundled_link_args('libfabric',
ucp=get_uniq_cfg_path(),
libs=['libfabric.la'])
elif libfabric_val == 'system':
libs = [ ]
# Allow overriding pkg-config via LIBFABRIC_DIR, for platforms
# without pkg-config.
libfab_dir_val = overrides.get('LIBFABRIC_DIR')
if libfab_dir_val:
libs.extend(['-L' + libfab_dir_val + '/lib',
'-Wl,-rpath,' + libfab_dir_val + '/lib',
'-lfabric'])
else:
# Try using pkg-config to get the libraries to link
# libfabric with.
tup = third_party_utils.pkgconfig_get_system_link_args('libfabric')
# put the two lists together (but expect tup[0] to be empty)
pclibs = tup[0] + tup[1]
# add -Wl,-rpath for the -L options
# this was a workaround and is probably not needed anymore
for pcl in pclibs:
libs.append(pcl)
if pcl.startswith('-L'):
libs.append(pcl.replace('-L', '-Wl,-rpath,', 1))
args(1).extend(libs)
if libfabric_val == 'system' or libfabric_val == 'bundled':
libs = [ ]
launcher_val = chpl_launcher.get()
ofi_oob_val = overrides.get_environ('CHPL_COMM_OFI_OOB')
if 'mpi' in launcher_val or ( ofi_oob_val and 'mpi' in ofi_oob_val ):
mpi_dir_val = overrides.get_environ('MPI_DIR')
if mpi_dir_val:
mpi_lib_dir = os.path.join(mpi_dir_val, 'lib64')
if not os.path.exists(mpi_lib_dir):
mpi_lib_dir = os.path.join(mpi_dir_val, 'lib')
if not os.path.exists(mpi_lib_dir):
mpi_lib_dir = None
if mpi_lib_dir:
libs.append('-L' + mpi_lib_dir)
libs.append('-Wl,-rpath,' + mpi_lib_dir)
mpi_lib_name = 'mpi'
if glob.glob(mpi_lib_dir + '/libmpich.*'):
mpi_lib_name = 'mpich'
libs.append('-l' + mpi_lib_name)
# If we're using the PMI2 out-of-band support we have to reference
# libpmi2 explicitly, except on Cray XC systems.
platform_val = chpl_platform.get('target')
if platform_val == 'hpe-cray-ex' or ofi_oob_val == 'pmi2':
libs.append('-lpmi2')
args[1].extend(libs)
return args
def _main():
libfabric_val = get()
sys.stdout.write("{0}\n".format(libfabric_val))
if __name__ == '__main__':
_main()
```
#### File: util/devel/look_for_calls.py
```python
import contextlib
import fnmatch
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
def log_error(msg, fatal=False):
"""Log an error, exiting if fatal"""
sys.stdout.write('Error: {0}\n'.format(msg))
if fatal:
sys.exit(2)
def run_command(cmd):
"""Simple subprocess wrapper. Similar to subprocess.check_output, but that
is only available after Python 2.7, and we still support 2.6 :("""
try:
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
log_error('command not found: "{0}"'.format(cmd[0]), fatal=True)
output = process.communicate()
output = (output[0].decode(), output[1].decode())
if process.returncode != 0:
msg = 'command failed: "{0}"\nstderr was:\n{1}'.format(cmd, output[1])
log_error(msg, fatal=True)
else:
return output[0]
def find_files(search_dir, extensions):
"""Return a list of absolute paths to files with any of the provided
extensions in the search_dir."""
source_files = []
for root, _, filenames in os.walk(search_dir):
for ext in extensions:
for filename in fnmatch.filter(filenames, '*.{0}'.format(ext)):
source_files.append(os.path.join(root, filename))
return source_files
def find_source_files(search_dir):
"""Return a list of absolute paths to any C/C++ source files in
the search_dir"""
cpp_sources = ['cc', 'cp', 'cxx', 'cpp', 'CPP', 'c++', 'C']
cpp_headers = ['hh', 'H', 'hp', 'hxx', 'hpp', 'HPP', 'h++', 'tcc']
c_sources = ['c']
c_headers = ['h']
code_file_exts = set(cpp_sources + cpp_headers + c_sources + c_headers)
return find_files(search_dir, code_file_exts)
def build_cscope_ref(src_files, cscope_database):
"""Build a cscope cross-reference for crazy fast searching of src_files"""
# -bu -- build cross ref only, wipe any old cross refs
# -q -- enable faster symbol lookup
# -k -- turns off default include dir (don't include/parse system files
# since we only care if our files call the system allocator)
db_name = '-f{0}'.format(cscope_database)
cscope_cmd = ['cscope', '-bu', '-q', '-k', db_name] + src_files
run_command(cscope_cmd)
def cscope_find_calls(func_call, cscope_database):
"""Run cscope searching for calls to func_call"""
# -d -- don't rebuild the cross-reference, use the one we already built
# -L3 -- line based "Find functions calling this function"
db_name = '-f{0}'.format(cscope_database)
cscope_cmd = ['cscope', '-d', '-L3{0}'.format(func_call), db_name]
return run_command(cscope_cmd)
@contextlib.contextmanager
def get_tmp_dir():
"""Contextlib for a temp dir. Would use tempfile.TemporaryDirectory(), but
that's python >= 3.2"""
try:
tmp_dir = tempfile.mkdtemp()
yield tmp_dir
finally:
shutil.rmtree(tmp_dir)
def check_for_calls(functions, search_dir, exclude_paths=None, rel_paths=True):
"""Check source files in search_dir for calls to functions. Don't check
files that contain any of the exclude_paths. Report files relative to
search_dir/../ if rel_paths is True, otherwise use absolute paths.
Returns True calls were found, False otherwise"""
rel_dir = ''
if rel_paths:
rel_dir = os.path.abspath(os.path.join(search_dir, '..')) + os.path.sep
src_files = find_source_files(search_dir)
if exclude_paths:
for exclude_path in exclude_paths:
src_files = [s for s in src_files if exclude_path not in s]
with get_tmp_dir() as tmp_dir:
cscope_database_name = os.path.join(tmp_dir, 'cscope')
build_cscope_ref(src_files, cscope_database_name)
found_calls = False
for func in functions:
# If func is a tuple consider the first element to be the function
# we're searching for and the second an alternative to suggest
# to the user.
alternative = None
if isinstance(func, tuple):
func, alternative = func
out = cscope_find_calls(func, cscope_database_name)
if out:
found_calls = True
msg = 'found call to "{0}"'.format(func)
if alternative:
msg += ' consider using "{0}" instead'.format(alternative)
log_error(msg)
sys.stdout.write(out.replace(rel_dir, '') + '\n')
return found_calls
def get_alloc_funcs():
"""Return a list of the possible C alloc/dealloc routines"""
# If list element is a tuple then consider the second element to
# be an alternative for the first. If the list element is a
# not a tuple then their is no known alternative.
std = [('malloc', 'chpl_mem_alloc'),
('calloc', 'chpl_mem_calloc'),
('realloc', 'chpl_mem_realloc'),
('free', 'chpl_mem_free')]
align = ['aligned_alloc', 'posix_memalign', 'memalign']
page_align = ['valloc', 'pvalloc']
string = ['strdup', 'strndup', 'asprintf', 'vasprintf']
obscure = ['getline', 'getdelim']
return std + align + page_align + string + obscure
def get_exit_funcs():
"""Return a list of the possible C exit routines"""
# TODO look for abort too (need to remove calls from the runtime first)
std = ['exit']
return std
def main():
"""Parse options and check for calls"""
class MyParser(optparse.OptionParser):
"""Optparse wrapper that doesn't strip newlines from the epilog"""
def format_epilog(self, formatter):
return self.epilog
parser = MyParser(epilog='\n{0}'.format(__doc__))
parser.add_option('--search-dir', dest='search_dir', default=os.getcwd(),
help='directory to check for alloc calls [default: CWD]')
parser.add_option('--exclude-paths', dest='exclude_paths', default='',
help='comma separated list of (sub)paths/files to skip')
parser.add_option('--abs-paths', dest='abs_paths', action="store_true",
help='report abs paths vs. rel to --search-dir/../')
parser.add_option('--check-alloc', dest='check_alloc', action="store_true",
help='check for calls to the system allocator')
parser.add_option('--check-exit', dest='check_exit', action="store_true",
help='check for calls to exit routines')
parser.add_option('--check-calls', dest='check_calls', default='',
help='comma separated list of calls to check for')
options = parser.parse_args()[0]
check_calls = [x.strip() for x in options.check_calls.split(',') if x]
if options.check_exit:
check_calls += get_exit_funcs()
if options.check_alloc or not check_calls:
check_calls += get_alloc_funcs()
search_dir = os.path.abspath(options.search_dir)
exclude_paths = [x.strip() for x in options.exclude_paths.split(',') if x]
rel_paths = not options.abs_paths
return check_for_calls(check_calls, search_dir, exclude_paths, rel_paths)
if __name__ == "__main__":
sys.exit(main())
```
#### File: gen-LAPACK/extern-tool/main.py
```python
import Passes
from Passes import *
from xmltool import *
import sys
root = None
def main():
if len( sys.argv ) < 2:
print "Path to LAPACK required"
exit(-1)
elif len( sys.argv ) > 2:
print "Too many arguments. Only path to LAPACK required"
exit(-1)
Passes.lapack_root = sys.argv[1] # root directory of LAPACK
Passes.lapack_src = Passes.lapack_root + "/SRC"
Passes.lapack_matgen_src = Passes.lapack_root + "/TESTING/MATGEN"
Passes.lapack_install_src = Passes.lapack_root + "/INSTALL"
Passes.lapacke_include = Passes.lapack_root + "/lapacke/include"
Passes.blas_src = Passes.lapack_root + "/BLAS/SRC"
root = ET.Element( "root" );
DumpCodePass.apply( root )
if __name__ == "__main__": main()
```
#### File: gen-LAPACK/extern-tool/Passes.py
```python
import xmltool
from xmltool import *
import codetools
from codetools import *
import copy
'''
About this script:
This script was developed to be a magic bullet for taking LAPACK fortran
and LAPACKE C code and documentation and turning it into the
ChaLAPACK and LAPACK interface modules.
It is not intended to be 'general puropse' and may break with other (maybe newer)
versions of LAPACK.
The idea here was to adopt a static-pass pattern that would be applied to an XML tree.
This is the pattern to be adopted by all Pass classes
class GenericPass ( Pass ):
dependencies = [] # list of Pass inheriting classes that must
# be completed before this pass is run
complete = False # static variable signifying that the pass
# had been successfully completed
@staticmethod
def apply( xml_tree ):
selfname = GenericPass
Pass.resolve( selfname, xml_tree ) # Resolve all of this passes dependencies
# potentially resolving their dependencies
print "[",selfname,"]"
# Work to be done in this pass
selfname.complete = True # Signify that this pass was completed successfully.
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
Pass is the parent class of all pass classes, and contains the parsed
input.xml file (input_xml), where pass specific inputs are found
'''
# Relative paths to LAPACK and its subdirectories.
lapack_root = "../LAPACK"
lapack_src = lapack_root + "/SRC"
lapack_matgen_src = lapack_root + "/TESTING/MATGEN"
lapack_install_src = lapack_root + "/INSTALL"
lapacke_include = lapack_root + "/lapacke/include"
blas_src = lapack_root + "/BLAS/SRC"
# Parses the documentation out from the text, stripping the comment tokens
# Captures: [1,text] raw documentation text, stripped of comment tokens
f_comment_regex = re.compile( r"(?:^|\n)\*>?(?P<text>.*)" )
# Parses the source out from the text
# Captures: [1,text] raw source code text
f_source_regex = re.compile( r"(?:^|\n)(?!\*>?)(?P<text>.*)" )
# Parses function declaration from the c header file
# Captures: [1,returns] return type, [2,name] function name, [3,arguments] full unparsed param list
c_func_decl_regex = re.compile( r"(?P<returns>\w+)\s+(?P<name>\w+)\s*\(\s*(?P<arguments>(?:\w+\s+)?\w+(?:\s*\*\s*|\s+)\w+(?:\s*,\s*(?:\w+\s+)?\w+(?:\s*\*\s*|\s+)\w+)*)?\s*\)\s*;" )
# Parses arguments to a function
# Captures: [1,modifier]? const modifier, [2,type] type, [3,refdepth] string containing whitespace and/or astierisk(s), [3,name] param name
c_args_regex = re.compile( r"(?:(?P<modifier>const)\s+)?(?P<type>\w+)(?P<refdepth>(?:\s*\*+\s*)|\s+)(?P<name>\w+)" )
# Parsers function declaration and argument documentation from the fortran code (Does not yet work with source. requires $ delimiting as well)
# Captures: [1,type]? return type, [2,name] function name, [3,arguments] full unparsed argument names,
doc_func_regex = re.compile( r"(?:(?P<type>(?:[\w\*]+)(?: +\w+)*)\s+)?(?:(?:SUBROUTINE)|(?:FUNCTION))\s+(?P<name>\w+)\(\s*(?P<arguments>(?:\w+\s*(?:,\s*\w+\s*)*)?)\s*\)" )
# Parses the scalar arguments from the documentation (TODO determine if source works too.)
# Captures: [1,body] full unparsed text of scalar arguments documentation
doc_scalarargs_regex = re.compile( r"Scalar Arguments\s+\.\.\s+(?P<body>(?:.|\n)*?)\s+\.\." )
doc_scalarargs_decls_regex = re.compile( r"(?P<type>(?:[\w\*]+)(?: +\w+)*) +(?P<names>\w+(?:\s*,(?:\s*\$\s*)?\s*\w+)*)" )
# Parses the array arguments from the documentation (TODO determine if source works too.)
# Captures: [1,body] full unparsed text of array arguments documentation
doc_arrayargs_regex = re.compile( r"Array Arguments\s+\.\.\s+(?P<body>(?:.|\n)*?)\s+\.\." )
doc_arrayargs_decls_regex = re.compile( r"(?P<type>(?:[\w\*]+)(?: +\w+)*) +(?P<names>\w+(?:\([\s\S]*?\))?(?:\s*,(?:\s*\$\s*)?\s*\w+(?:\([\s\S]*?\))?)*)" )
doc_arrayargs_decls_names_dims_regex = re.compile( r"(?P<name>\w+)(?:\((?P<dimensions>.*?)\))?," )
doc_functionargs_regex = re.compile( r"Function Arguments\s+\.\.\s+(?P<body>(?:.|\n)*?)\s+\.\." )
doc_functionargs_decls_regex = doc_scalarargs_decls_regex
# Parses the argument information from the documentation
# Captures: [1,name] name, [2,intent] intent, [3,body] full unparsed body of argument document
doc_args_regex = re.compile( r"\s+\\param\[(?P<intent>\w+(?:,\w+)*)\]\s+(?P<name>\w+)\s+\\verbatim\s+(?P<body>(?:[\s\S])+?)\s*\\endverbatim" )
# Parses the typeinfo group of doc_args_regex
# Captures: [1,name] argument name, [2,type] type, [3,array]? captures array keyword if exists, [4,dimension] captures text describing dimensions
doc_args_typeinfo_regex = re.compile( r"(?P<name>\w+)\s+is\s+(?P<type>\w+)(?: (?P<array>array), dimension\s+(?P<dimensions>.*))?" )
# Parses the argument documentation and provides the matrix size of an array (if there is one)
# Captures: [1] 1st dimension, [2] 2nd dimension
doc_args_dimensions_regex = re.compile( r"(\w+)[- ]by[- ](\w+)(?: coefficient)? matrix" )
# Parses the human documentation of the fortran base of scalar ints to determine how (if at all) they relate to matrix arrays
# Captures: [1,what] the semantic information of relation (order, rows, columns, rank) [2,who] an unbroken sentence of names referring to matrices/arrays
scalar_matrix_relation_regex = re.compile( r"(?:number\s+of\s+)?(?P<what>\w+)\s+(?:(?:of)|(?:in))\s+(?:the\s+)?(?:input\s+)?(?:(?:matrix)|(?:matrices)|(?:submatrix))?(?:\s+)?(?P<who>(?:(?:(?:\w+\( \w+ \))|(?:\w+))\s*)+)" );
# Parses the function purpose documentation from the documentation
# Captures: [1,body] the human readable text documenting the purpose of the function
doc_purpose_regex = re.compile( r"\\par\s+Purpose:\s+=+\s+\\verbatim\s+(?P<body>(?:[\s\S]+?))\s+\\endverbatim" )
# Parses function names
# Captures: [1,type] literal type of matrix, [2,config] configuration type of matrix, [3,function] function group
#func_name_group_regex = re.compile( r"^(?P<type>[dszc]|(?:ds)|(?:zc))(?P<config>(?:bd)|(?:di)|(?:gb)|(?:ge)|(?:gg)|(?:gt)|(?:hb)|(?:he)|(?:hg)|(?:hp)|(?:hs)|(?:op)|(?:or)|(?:pb)|(?:po)|(?:pp)|(?:pt)|(?:sb)|(?:sp)|(?:st)|(?:sy)|(?:tb)|(?:tg)|(?:tp)|(?:tr)|(?:tz)|(?:un)|(?:up))(?P<function>.+)" )
func_name_group_regex = re.compile( r"^(?P<type>(?:(?:ds)|(?:zc)|[dszc]))(?P<config>\w\w)(?P<function>\w\w\w*)" )
'''
class ResolutionFailure ( Exception )
Purpose:
Exception for errors encountered during Pass.resolve( ) calls.
Member Functions:
__init__( self, value ):
constructor. Value is the Pass class who errored during resolution
__str__( self ):
returns string stating which Pass class had an error.
Member Variables:
value:
the Pass class that errored during resolution
'''
class ResolutionFailure ( Exception ):
def __init__(self, value):
self.value = value
def __str__(self):
return "Error applying " + repr(self.value) + " to tree"
'''
class GeneralPassFailure ( Exception )
Purpose:
Generic exception class that is thrown when passes encounter critical errors
Member Functions:
__init__( self, message ):
constructor. Message is the message from the Pass to the user
__str__( self ):
returns the message to from the Pass to the user
Member Variables:
message:
the message to from the Pass to the user
'''
class GeneralPassFailure ( Exception ):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
'''
class Pass
Purpose:
Parent of all other pass classes.
Container of input xml file.
Dependency resolver.
Member Functions:
resolve( staticclass, xml_tree ):
recursively resolves dependencies of pass staticclass onto xml_tree
apply( xml_tree ):
abstract static method(?).
raises NotImplementedError exception
Member Variables:
compete:
boolean has-complete-pass-somewhere-once.
if false, pass has never been performed or failed to perform
if true, pass has been performed once.
dependencies:
list of dependencies that must be completed before pass can be applied
input_xml:
the user input xml that give input from the user to the passes
especially for passes that could not do automated finding in the source text
'''
class Pass:
complete = False
dependencies = []
input_xml = loadxml( "./input.xml" )
@staticmethod
def resolve( staticclass, xml_tree ):
print "Resolving", staticclass, "Dependencies"
for dep in staticclass.dependencies:
if not dep.complete:
dep.apply( xml_tree )
if not dep.complete:
raise ResolutionFailure( dep )
print "Resolved", staticclass, "Dependencies"
@staticmethod
def apply( xml_tree ):
raise NotImplementedError
'''
class CreateTreePas ( Pass )
Purpose:
takes in xmlelement with a root node, creates <LAPACK> and <LAPACKE> root nodes
'''
class CreateTreePass ( Pass ):
dependencies = []
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CreateTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
SubElement( xml_tree, "LAPACK" )
SubElement( xml_tree, "LAPACKE" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DocumentSplitPass ( Pass )
Purpose:
Strip documentation (fortran comments) from code into seperate nodes
under common file node under LAPACK node.
'''
class DocumentSplitPass ( Pass ):
dependencies = [CreateTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DocumentSplitPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
text_node = SubElement( xml_tree.find( "./LAPACK" ), "text" )
src_files = []
source_dirs = [lapack_src, lapack_matgen_src, blas_src]
for dir in source_dirs:
for file in os.listdir( dir ):
if fnmatch.fnmatch( file, '*.f' ):
src_files.append( dir + "/" + file )
file_count = 1
for file in src_files:
sys.stdout.write("%s ( %d : %d ) \r" % (file, file_count, len(src_files) ) )
sys.stdout.flush()
file_node = SubElement( text_node, "file" )
file_node.set( "name", file )
src_node = SubElement( file_node, "source" )
doc_node = SubElement( file_node, "documentation" )
src_node.text = str()
doc_node.text = str()
file_read = open( file ).read()
for doc_match in f_comment_regex.finditer( file_read ):
doc_node.text += doc_match.group( "text" ) + "\n"
# Disabled the disabling# Disabled. Works. No use. Unnecessary load on tree.
for src_match in f_source_regex.finditer( file_read ):
src_node.text += src_match.group( "text" ) + "\n"
file_count += 1
sys.stdout.write(" \r")
sys.stdout.flush()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class LAPACKFunctionDefinePass ( Pass )
Purpose:
find fortran functions in the documentation and put them in the
<procedures> node under <LAPACK>
'''
class LAPACKFunctionDefinePass ( Pass ):
dependencies = [DocumentSplitPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = LAPACKFunctionDefinePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = SubElement( lapack_node, "procedures" )
file_nodes = text_node.findall( "./file" )
file_count = 1
for file_node in file_nodes:
sys.stdout.write("%s ( %d : %d ) \r" % (file_node.get("name"), file_count, len(file_nodes) ) )
sys.stdout.flush()
file_doc = file_node.find( "./documentation" )
for proc_decl in doc_func_regex.finditer( file_doc.text ):
proc_node = SubElement( procs_node, "procedure" )
proc_node.set( "name", proc_decl.group( "name" ) )
proc_node.set( "file-name", file_node.get( "name" ) )
if proc_decl.group( "type" ) != None:
proc_node.set( "return-type", proc_decl.group( "type" ) )
#print "\t", proc_decl.group("name")
arguments = proc_decl.group( "arguments" ).split( "," );
if len( arguments ) >= 1 and arguments[0] != "":
args_node = SubElement( proc_node, "arguments-list" )
arg_counter = 0
for arg in arguments:
#print "\t\t",arg.strip()
arg_node = SubElement( args_node, "argument" )
arg_node.set( "name", arg.strip() )
arg_node.set( "position", str(arg_counter) )
arg_counter += 1
#SubElement( proc_node, "documentation" )
file_count += 1
sys.stdout.write(" \r")
sys.stdout.flush()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FuncPurposeDocPass ( Pass )
Purpose:
collect function purpose documentation from fortran text
'''
class FuncPurposeDocPass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FuncPurposeDocPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
purpose_match = doc_purpose_regex.search( doc_node.text )
purpose = purpose_match.group( "body" ) if purpose_match != None else "Unspecified"
SubElement( proc_node, "purpose" ).text = purpose
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FuncArgsDocPass ( Pass ):
Purpose:
collect argument documentation from fortran text
'''
class FuncArgsDocPass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FuncArgsDocPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
for arg_match in doc_args_regex.finditer( doc_node.text ):
#print "\"",proc_file_name,"\"", arg_match.group()
arg_name = arg_match.group( "name" ).strip()
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + arg_name + "']" )
arg_node.set( "intent", arg_match.group( "intent" ) )
dim_match = doc_args_dimensions_regex.search( arg_match.group( "body" ) )
if dim_match != None:
arg_node.set( "matrix-size", dim_match.group(1) +"," + dim_match.group(2) )
SubElement( arg_node, "documentation" ).text = arg_match.group( "body" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FuncArgsTypePass ( Pass )
NON FUNCTIONAL
Purpose:
collect argument names and types under the Scalar Arguments
and Array Arguments header and include in tree for semantic understanding
'''
class FuncArgsTypePass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FuncArgsTypePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
# attribute scalar arguments
scalars = doc_scalarargs_regex.search( doc_node.text )
if scalars != None:
for line in doc_scalarargs_decls_regex.finditer( scalars.group( "body" ) ):
names_list = re.sub( r"[\s$]", "", line.group("names") ).split( "," )
#print line.group( "type" ), ":", names_list
type = line.group( "type" )
#skip any "IMPLICIT" 'typed' arguments
if type.lower() == "implicit":
continue
for name in names_list:
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + name + "']" )
if arg_node == None:
#print "Non-match: argument", name, "of", proc_node.get( "name" ), "in", proc_file_name
#prettyprintxml( proc_node.find("./arguments-list") )
continue
arg_node.set( "type", type )
arg_node.set( "semantic", "scalar" )
# attribute array arguments
arrays = doc_arrayargs_regex.search( doc_node.text )
if arrays != None:
for line in doc_arrayargs_decls_regex.finditer( arrays.group( "body" ) ):
name_list = re.sub( r"[\s$]", "", line.group("names") ) + ","
type = line.group( "type" )
for name_match in doc_arrayargs_decls_names_dims_regex.finditer( name_list ):
name = name_match.group( "name" )
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + name + "']" )
if arg_node == None:
#print "Non-match: argument", name, "of", proc_node.get( "name" ), "in", proc_file_name
continue
dimensions = name_match.group( "dimensions") if name_match.group( "dimensions") != None else ""
arg_node.set( "type", type )
arg_node.set( "semantic", "array" )
arg_node.set( "dimensions", dimensions )
# attribute function arguments
functions = doc_functionargs_regex.search( doc_node.text )
if functions != None:
for line in doc_functionargs_decls_regex.finditer( functions.group( "body" ) ):
names_list = re.sub( r"[\s$]", "", line.group("names") ).split( "," )
#print line.group( "type" ), ":", names_list
type = line.group( "type" )
#skip any "IMPLICIT" 'typed' arguments
if type.lower() == "external":
continue
for name in names_list:
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + name + "']" )
if arg_node == None:
#print "Non-match: argument", name, "of", proc_node.get( "name" ), "in", proc_file_name
#prettyprintxml( proc_node.find("./arguments-list") )
continue
arg_node.set( "type", type )
arg_node.set( "semantic", "function" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class AssociateArgsToArrayPass ( Pass ):
Purpose:
Threshes out scalar-argument pairing for array concepts.
'''
class AssociateArgsToArrayPass ( Pass ):
dependencies = [FuncArgsTypePass, FuncArgsDocPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = AssociateArgsToArrayPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
#proc_info = {} # {func_name} => { arg name } => [ what, what, who ]
for proc_node in procs_node.findall( "./procedure" ):
proc_name = proc_node.get( "name" )
'''
if not proc_name in proc_info:
proc_info[ proc_name ] = {}
'''
base_name = proc_name.lower()
match = func_name_group_regex.search( base_name );
if match == None:
#print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
pass
arg_names = [ arg.get("name") for arg in proc_node.findall( "./arguments-list/argument" ) ]
for arg_node in proc_node.findall( "./arguments-list/argument" ):
doc_node = arg_node.find( "documentation" )
if doc_node == None or arg_node.get("semantic") != "scalar" or arg_node.get("type").lower() != "integer":
continue
what = []
who = []
string = []
for m in scalar_matrix_relation_regex.finditer( doc_node.text ):
if not m.group( "what" ) in ["rows", "columns", "order", "rank"] :
continue
names = m.group( "who" ).strip()
names_list = []
if " and " in names:
names_list = [ name.strip() for name in names.split( "and" ) ]
else:
names_list = [ names ]
nameHasSpace = False
for name in names_list:
if " " in name:
nameHasSpace = True
break
if nameHasSpace:
#print names, " contains non names. Skipping."
continue
removes = []
for name in names_list:
if not name in arg_names:
removes.append( name )
for rm in removes:
names_list.remove( rm )
if len( names_list ) == 0:
#print "Names list had no argument names. Skipping"
continue
what.append( m.group( "what" ) )
who.append( names_list )
string.append( re.sub( "\s+", " ", m.group(0) ) )
if len( what ) == 0 and len( who ) == 0:
continue
#proc_info[ proc_name ][ arg_node.get( "name" ) ] = [ what, who, string]
associate_array = str()
associate_field = str()
first = True
for i in range( len( who ) ):
for array in who[i]:
associate_array += ( "," if not first else "" ) + array
associate_field += ( "," if not first else "" ) + what[i]
first = False
arg_node.set( "associate-array", associate_array )
arg_node.set( "associate-field", associate_field )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseLAPACKPass ( Pass )
Purpose:
Tie together all passes over the LAPACK fortran source code and
resulting semantic analysis
'''
class BaseLAPACKPass ( Pass ):
dependencies = [FuncArgsTypePass, FuncArgsDocPass, AssociateArgsToArrayPass, FuncPurposeDocPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseLAPACKPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class LAPACKEFunctionDefinePass ( Pass ):
Purpose:
from lapacke.h text define all C function decls
in under the <LAPACKE> tree.
'''
class LAPACKEFunctionDefinePass ( Pass ):
dependencies = [CreateTreePass] # TODO include BaseLAPACKPass when the two need to meet
complete = False
@staticmethod
def apply( xml_tree ):
selfname = LAPACKEFunctionDefinePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
procs_node = SubElement( lapacke_root, "procedures" )
lapacke_header = open( lapacke_include + "/lapacke.h" ).read()
for func_decl in c_func_decl_regex.finditer( lapacke_header ):
#if func_decl.group( "name" ).lower().startswith( "lapacke_" ): continue
if procs_node.find( "./procedure/[@name='" + func_decl.group( "name" ) + "']" ) != None:
#print "proc", func_decl.group( "name" ), "redefined. Skipping"
continue
proc_node = SubElement( procs_node, "procedure" )
proc_node.set( "name", func_decl.group( "name" ) )
proc_node.set( "return-type", func_decl.group( "returns" ) )
args_node = SubElement( proc_node, "arguments-list" )
arg_count = 0
for arg in c_args_regex.finditer( func_decl.group( "arguments" ) ):
arg_node = SubElement( args_node, "argument" )
arg_node.set( "name", arg.group( "name" ) )
arg_node.set( "type", arg.group( "type" ) )
arg_node.set( "refdepth", str( arg.group( "refdepth" ).count("*") ) )
if arg.group( "modifier" ) != None:
arg_node.set( "modifier", arg.group( "modifier" ) )
arg_node.set( "position", str(arg_count) )
arg_count += 1
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseLAPACKEPass ( Pass )
Purpose:
Ties together all passes over the lapacke.h text
and any basic analysis
'''
class BaseLAPACKEPass ( Pass ):
dependencies = [LAPACKEFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class CharacterArraySemanticsCorrectionPass ( Pass )
Purpose:
lapack fortran documentation defines character*1 (single characters) under
array semantics. This corrects that to be a scalar.
'''
class CharacterArraySemanticsCorrectionPass ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CharacterArraySemanticsCorrectionPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
for f_proc in lapack_f_procs.findall( "./procedure" ):
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) == None:
continue
#if f_arg.get( "name" ) == "JOBA":
# print f_proc.get( "name" ), f_arg.get( "name" ), f_arg.get( "type" ).lower()
if f_arg.get( "type" ).lower() == "character*1":
# print f_proc.get( "name" ), f_arg.get( "name" ), f_arg.get( "type" ), f_arg.get( "semantic" ), f_arg.get( "intent" ), f_arg.get( "dimensions" )
if f_arg.get( "semantic" ) == "array":
f_arg.set( "semantic", "scalar" )
if f_arg.get( "dimensions" ) != None:
f_arg.unset( "dimensions" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ArgumentSemanticsBucketingPass ( Pass )
Purpose:
Huristically assign argument semantics by bucketing all arguments.
Any argument who only has a None bucket and one other bucket can
'safely' have those in the None assigned from as semantics of the other.
'''
class ArgumentSemanticsBucketingPass ( Pass ):
dependencies = [CharacterArraySemanticsCorrectionPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketArgumentsSemanticsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
variables = {}
for proc in lapack_f_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
semantic = arg.get( "semantic" ) if arg.get( "semantic" ) != None else "none"
if not arg_name in variables:
variables[ arg_name ] = {}
if not semantic in variables[ arg_name ]:
variables[ arg_name ][ semantic ] = []
variables[ arg_name ][ semantic ].append( proc_name )
for arg in variables:
if len( variables[ arg ] ) > 2:
print arg
for semantic in variables[ arg ]:
print " \"" + semantic + "\"", ":",variables[ arg ][ semantic ]
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class AssociateLAPACKtoLAPACKEPASS ( Pass )
Purpose:
link functions and args from both the C and Fortran world
together with paths from root.
'''
class AssociateFunctionsLAPACKtoLAPACKEPass ( Pass ):
dependencies = [BaseLAPACKEPass, BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = AssociateFunctionsLAPACKtoLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for lapack_c_proc in lapack_c_procs.findall( "./procedure" ):
proc_name = lapack_c_proc.get( "name" ).lower()
base_name = str()
if proc_name.startswith( "lapack_" ):
base_name = proc_name.replace( "lapack_", "" )
elif proc_name.startswith( "lapacke_" ):
base_name = proc_name.replace( "lapacke_", "" )
else:
print "Unknown root of name:", lapack_c_proc.get( "name" )
continue
base_name = base_name.replace( "_work", "" )
base_name = base_name.upper()
#print lapack_c_proc.get("name"), proc_name, base_name
lapack_f_proc = lapack_f_procs.find( "./procedure/[@name='" + base_name + "']" )
if lapack_f_proc == None:
#print "Could not find the fortran analogue of C function", lapack_c_proc.get( "name" ), "from base-name", base_name
continue
SubElement( SubElementUnique( lapack_c_proc, "analogues" ), "analogue" ).text = "./LAPACK/procedures/procedure/[@name='" + lapack_f_proc.get( "name" ) + "']"
SubElement( SubElementUnique( lapack_f_proc, "analogues" ), "analogue" ).text = "./LAPACKE/procedures/procedure/[@name='" + lapack_c_proc.get( "name" ) + "']"
'''
misses = []
for f_arg in lapack_f_proc.findall( "./arguments-list/argument" ):
f_arg_name = f_arg.get( "name" );
c_arg = lapack_c_proc.find( "./arguments-list/argument/[@name='" + f_arg_name.lower() + "']" )
# skip non-analogous args.
# TODO solve/mention matching failure somewhere? Maybe...
if c_arg == None:
#misses.append( f_arg_name )
continue
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + lapack_f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", lapack_f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + lapack_c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", lapack_c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
'''
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DestroyUnassociatedCFunctionsTreePass ( Pass )
Purpose:
Remove procedures from LAPACKE subtree that do not have Fortran analogues
UNUSED
'''
class DestroyUnassociatedCFunctionsTreePass ( Pass ):
dependencies = [AssociateFunctionsLAPACKtoLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DestroyUnassociatedCFunctionsTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_root = xml_tree.find( "./LAPACKE" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for f_proc in lapack_f_procs.findall( "./procedure" ):
if f_proc.find( "./analogues" ) == None:
lapack_f_procs.remove( f_proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DestroyUnassociatedFortranFunctionsTreePass ( Pass )
Purpose:
Remove procedures from LAPACK subtree that do not have C analogues
'''
class DestroyUnassociatedFortranFunctionsTreePass ( Pass ):
dependencies = [AssociateFunctionsLAPACKtoLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DestroyUnassociatedFortranFunctionsTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for f_proc in lapack_f_procs.findall( "./procedure" ):
if f_proc.find( "./analogues" ) == None:
lapack_f_procs.remove( f_proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class EasyAssociateArgsPass ( Pass )
Purpose:
Create association between C and Fortran analogue function arguments
when that association is easy (ie they have the same name)
'''
class EasyAssociateArgsPass ( Pass ):
dependencies = [DestroyUnassociatedFortranFunctionsTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = EasyAssociateArgsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
c_procs = xml_tree.find( "./LAPACKE/procedures" )
for c_proc in c_procs.findall( "./procedure" ):
proc_name = c_proc.get( "name" ).lower()
supposed_f_ana_node = c_proc.find( "./analogues/analogue" )
if supposed_f_ana_node == None:
#print "Proc", c_proc.get( "name" ), "has no Fortran analogues. Skipping"
continue
f_proc = xml_tree.find( supposed_f_ana_node.text )
if f_proc == None:
print "BAD! No analogue where analogue should exist"
return
#continue
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_arg_name = f_arg.get( "name" );
c_arg = c_proc.find( "./arguments-list/argument/[@name='" + f_arg_name.lower() + "']" )
# skip non-analogous args.
if c_arg == None:
continue
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportArgumentAnaloguesPass ( Pass )
Purpose:
Create argument associations of name per the input.xml
using the function association created automatically during runtime
'''
class ImportArgumentAssociationsPass ( Pass ):
dependencies = [EasyAssociateArgsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportArgumentAssociationsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
c_procs = xml_tree.find( "./LAPACKE/procedures" )
pass_input = Pass.input_xml.find( "./pass/[@name='ImportArgumentAnaloguesPass']" )
for in_proc in pass_input.findall( "./procedure" ):
c_proc = c_procs.find( "./procedure/[@name='" + in_proc.get( "name" ) + "']" )
f_proc = xml_tree.find( c_proc.find( "./analogues/analogue" ).text )
for in_arg in in_proc.findall( "./argument" ):
c_arg = c_proc.find( "./arguments-list/argument/[@name='" + in_arg.get( "name" ) + "']" )
f_arg = f_proc.find( "./arguments-list/argument/[@name='" + in_arg.get( "substitution" ) + "']" )
#prettyprintxml( c_arg )
if c_arg == None or f_arg == None:
raise GeneralPassFailure( "Argument speficied in input not found in tree." + c_proc.get("name") +":"+ c_arg.get("name") )
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
#prettyprintxml( c_proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseAssociatePass ( Pass )
Purpose:
Ties together all association of analogues pass
'''
class BaseAssociatePass ( Pass ):
dependencies = [EasyAssociateArgsPass, ImportArgumentAssociationsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseAssociatePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FoldLAPACKtoLAPACKEPass ( Pass ):
Purpose:
take the semantics derived from FuncArgsTypePass and
FuncArgumentDocToSemanticsPass over the LAPACK information
and apply them to functions found in the lapacke.h code.
Especially important for the LAPACK_* C functions.
Also important for any LAPACKE_* C functions that take pointers
to scalars.
'''
class FoldLAPACKSemanticsIntentsToLAPACKEPass ( Pass ):
dependencies = [CharacterArraySemanticsCorrectionPass, BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FoldLAPACKSemanticsIntentsToLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
analogues = c_proc.findall( "./analogues/analogue" )
if len( analogues ) > 1:
#print "proc", c_proc.get( "name" ), "has", len( analogues ), "analogues. skipping"
continue
elif len( analogues ) == 0:
#print "skipping", c_proc.get( "name" )
continue
f_proc = xml_tree.find( analogues[0].text )
for c_arg in c_proc.findall( "./arguments-list/argument" ):
analogues = c_arg.findall( "./analogues/analogue" )
if len( analogues ) > 1:
#print "arg", c_arg.get( "name" ), "has", len( analogues ), "analogues. skipping"
#prettyprintxml( c_proc )
continue
elif len( analogues ) == 0:
continue
f_arg = xml_tree.find( analogues[0].text )
semantic = f_arg.get( "semantic" )
if semantic != None:
c_arg.set( "semantic", semantic )
if semantic == "array":
c_arg.set( "dimensions", f_arg.get( "dimensions" ) )
intent = f_arg.get( "intent" )
if intent != None:
c_arg.set( "intent", intent )
dimensions = f_arg.get( "dimensions" )
if dimensions != None:
c_arg.set( "dimensions", dimensions )
matrix_size = f_arg.get( "matrix-size" )
if matrix_size != None:
c_arg.set( "matrix-size", matrix_size )
associate_array = f_arg.get( "associate-array" )
if associate_array != None:
c_arg.set( "associate-array", associate_array )
associate_field = f_arg.get( "associate-field" )
if associate_field != None:
c_arg.set( "associate-field", associate_field )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportLAPACKESemanticsIntentsPass ( Pass )
Purpose:
Import semantics and intents for LAPACKE arguments
that will may be unspecified after folding through
associations.
Will over-write semantics and intents issued by
FoldLAPACKSemanticsIntentsToLAPACKEPass
'''
class ImportLAPACKESemanticsIntentsPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportLAPACKESemanticsIntentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportLAPACKESemanticsIntentsPass']" )
for assign in pass_input.findall( "./assign" ):
for arg in xml_tree.findall( assign.get( "path" ) ):
semantic = assign.get( "semantic" )
intent = assign.get( "intent" )
if semantic == None and intent == None:
raise GeneralPassFailure( "assignment contains no semantic or intent attributes" + assign.get( "path" ) )
if semantic != None:
arg.set( "semantic", semantic )
if intent != None:
arg.set( "intent", intent )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class TypeSubstitutionPass ( Pass )
Purpose:
Token replacement pass of type tokens.
find-replace pairs are directly taken from input_xml file,
not inferred, detected, found, what-have-you.
No defining of types, purely text replacement.
applied to argument types and return-types of functions.
if replacement occurs, creates original-type and original-return-type
attributes that take the original value of the type and return-type attributes
Developer Note:
May move placement of TypeSubstitutionPass
since it is more related to Chapelizing that semantic
transformations like folding.
'''
class TypeSubstitutionPass ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TypeSubstitutionPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='TypeSubstitutionPass']" )
procs = xml_tree.find( "./LAPACKE/procedures" )
subs = {}
for sub in pass_input.findall( "./substitution" ):
subs[ sub.get( "find" ) ] = sub.get( "replace" )
for proc in procs.findall( "./procedure" ):
proc_type = proc.get( "return-type" )
if proc_type in subs:
proc.set( "original-return-type", proc_type )
proc.set( "return-type", subs[ proc_type ] )
for arg in proc.findall( "./arguments-list/argument" ):
arg_type = arg.get( "type" )
if arg_type in subs:
arg.set( "original-type", arg_type )
arg.set( "type", subs[ arg_type ] )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportTypeArgumentPass ( Pass )
Purpose:
take arguments from the input file and retype
all arguments of the same name within the LAPACKE
tree to be of the type specified.
'''
class ImportArgumentTypePass ( Pass ):
dependencies = [TypeSubstitutionPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportArgumentTypePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportArgumentTypePass']" )
for proc in xml_tree.findall( "./LAPACKE/procedures/procedure" ):
for arg in proc.findall( "./arguments-list/argument" ):
find = pass_input.find( "./argument/[@name='" + arg.get("name") + "']" )
if find == None:
continue
arg.set("type", find.get("type" ) )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseTransformLAPACKEPass ( Pass )
Purpose:
Ties together any transformation passes on the LAPACKE tree
that are unrelated to Chapelizing
Developer Note:
May move placement of TypeSubstitutionPass
since it is more related to Chapelizing that semantic
transformations like folding.
'''
class BaseTransformLAPACKEPass( Pass ):
dependencies = [BaseLAPACKEPass, TypeSubstitutionPass, FoldLAPACKSemanticsIntentsToLAPACKEPass, ImportLAPACKESemanticsIntentsPass, TypeSubstitutionPass, ImportArgumentTypePass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseTransformLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class CreateChapelModuleTreePass ( Pass )
Purpose:
Create chapel-module root, procedures, type-defines, const-defines subtrees
general setup for Chapelization and code generation
'''
class CreateChapelModuleTreePass ( Pass ):
dependencies = [ CreateTreePass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CreateChapelModuleTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
chpl_module = SubElement( xml_tree, "chapel-module" )
procedures = SubElement( chpl_module, "procedures" )
types = SubElement( chpl_module, "type-defines" )
defines = SubElement( chpl_module, "const-defines" )
enums = SubElement( chpl_module, "enum-defines")
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelizeLAPACKE_FunctionsPass ( Pass )
Purpose:
take all LAPACKE_* functions defined in <LAPACKE> tree,
bust them apart to provide most information for later passes on
Chapelizing the LAPACKE_functions
'''
class ChapelizeLAPACKEFunctionsPass ( Pass ):
dependencies = [BaseTransformLAPACKEPass, CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelizeLAPACKEFunctionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
lapacke_procs = lapacke_root.find( "./procedures" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
proc_count = 0
for proc in lapacke_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
module_proc = SubElement( module_procs, "procedure" )
module_proc.set("name", proc_name)
module_proc.set("return-type", proc.get( "return-type" ) )
module_proc_args = SubElement( module_proc, "arguments-list" )
for arg in proc.findall( "./arguments-list/argument" ):
#prettyprintxml( arg )
module_arg = SubElement( module_proc_args, "argument" )
module_arg.set( "name", arg.get("name") )
module_arg.set( "position", arg.get( "position" ) )
module_arg.set( "type", arg.get( "type" ) )
arg_semantic = arg.get( "semantic" ) if arg.get( "semantic" ) != None else ""
arg_intent = arg.get( "intent" ) if arg.get( "intent" ) != None else ""
arg_refs = int( arg.get( "refdepth" ) )
dimensions = arg.get( "dimensions" )
if dimensions != None:
module_arg.set( "dimensions", dimensions )
matrix_size = arg.get( "matrix-size" )
if matrix_size != None:
module_arg.set( "matrix-size", matrix_size )
associate_array = arg.get( "associate-array" )
if associate_array != None:
module_arg.set( "associate-array", associate_array )
associate_field = arg.get( "associate-field" )
if associate_field != None:
module_arg.set( "associate-field", associate_field )
intent = None #"BADSTATE " + arg_semantic + " " + arg_intent + " " + arg_refs
semantic = None #"BADSTATE " + arg_semantic + " " + arg_intent + " " + arg_refs
if arg_refs == 0:
if arg_semantic == "array":
raise GeneralPassFailure( "Attempted array semantic with 0 refdepth " + proc_name + " " +arg.get("name") )
semantic = "scalar"
intent = "none"
if arg_refs == 1:
if arg_semantic == "array":
semantic = "array"
intent = "none"
else:
semantic = "scalar"
intent = "ref"
module_arg.set( "intent", intent )
module_arg.set( "semantic", semantic )
#module_proc.set( "category", "direct" )
proc_count += 1
print "Chapelized", proc_count, "LAPACKE functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class TranslateChapelKeywordsPass ( Pass ):
Purpose:
taking from the input xml file a list of chapel keywords
changes the text of argument names
'''
class TranslateChapelKeywordsPass ( Pass ):
dependencies = [ChapelizeLAPACKEFunctionsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TranslateChapelKeywordsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='TranslateChapelKeywordsPass']" )
global_info = pass_input.find( "./global" )
chpl_module_procs = xml_tree.find( "./chapel-module/procedures" )
global_pre = "" if global_info == None \
or global_info.get( "prefix" ) == None \
else global_info.get( "prefix" )
global_suf = "" if global_info == None \
or global_info.get( "suffix" ) == None \
else global_info.get( "suffix" )
keywords = {}
for keyword in pass_input.findall( "./keyword" ):
symbol = keyword.get( "symbol" )
replacement = "" if keyword.get( "replacement" ) == None \
else keyword.get( "replacement" )
if replacement == "" and global_pre == "" and global_suf == "":
raise GeneralPassFailure( "If no global prefix or suffix is defined, a replacement for a symbol must be defined. (" + symbol + ")" )
keywords[ symbol ] = replacement
for proc in chpl_module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
#print proc_name
# Note: This will break includes if we go
# about replacing their names.
# arguments are fine because nobody cares about
# their names at the late stage of linking
'''
if proc_name in keywords:
if keywords[ proc_name ] == "":
proc_name = global_pre + proc_name + global_suf
else:
proc_name = keywords[ proc_name ]
proc.set( "name", proc_name )
'''
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
#print "\t",arg_name
if arg_name in keywords:
if keywords[ arg_name ] == "":
arg_name = global_pre + arg_name + global_suf
else:
arg_name = keywords[ arg_name ]
#print "\t\t=>",arg_name
arg.set( "name", arg_name )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelerrificLAPACKEFunctionsPass ( Pass ):
Purpose:
Create Chapel-errific, LAPACKE Functions that take chapel arrays and abstract the
dimensions of the arrays and matrices that are stored within.
'''
class ChapelerrificLAPACKEFunctionsPass ( Pass ):
dependencies = [ChapelizeLAPACKEFunctionsPass, TranslateChapelKeywordsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelerrificLAPACKEFunctionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
chapel_module = xml_tree.find( "./chapel-module" );
chapel_procedures = chapel_module.find( "./procedures" )
for proc in chapel_procedures.findall( "./procedure" ):
proc_name = proc.get( "name" )
if proc_name.startswith( "LAPACK_" ) or proc_name.endswith( "_work" ):
continue
base_name = proc_name.replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
#print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
pass
#proc = copy.deepcopy( chpl_proc )
args_node = proc.find( "./arguments-list" )
args_list = [ ]
args_names = []
remove_list = set()
pass_through = {}
for arg in args_node.findall( "./argument" ):
args_list.append( arg )
args_names.append( arg.get("name") )
pass_through[ arg.get("name") ] = arg.get( "name" )
for arg in args_list:
if arg.get( "semantic" ) != "array" :
continue
if arg.get( "dimensions" ) != None:
dimensions = arg.get( "dimensions" ).lower().split(",")
for i in range( len(dimensions) ):
dimension = dimensions[i]
if dimension == "*":
continue
removeVar = None
for find in args_list:
if find.get( "name" ) == dimension:
removeVar = find
break
if removeVar != None:
remove_list.add( removeVar.get("name") )
pass_through[ dimension ] = "(" + arg.get("name") + ".domain.dim("+str(2-i)+").size) : c_int"
'''
else:
print ( dimension + " is not described in the arguments of "+proc.get( "name" ) + " for argument " + arg.get("name") )
'''
if arg.get( "matrix-size" ) != None:
matrix_size = arg.get( "matrix-size" ).lower()
rows = matrix_size.split(",")[0].strip()
cols = matrix_size.split(",")[1].strip()
removeRows = None
removeCols = None
for find in args_list:
if find.get( "name" ) == rows:
removeRows = find
if find.get( "name" ) == cols:
removeCols = find
if removeRows != None and removeCols != None:
pass_through[ rows ] = "(if matrix_order == lapack_memory_order.row_major then " + arg.get("name") + ".domain.dim(1).size else " + arg.get("name") + ".domain.dim(2).size) : c_int"
pass_through[ cols ] = "(if matrix_order == lapack_memory_order.row_major then " + arg.get("name") + ".domain.dim(2).size else " + arg.get("name") + ".domain.dim(1).size) : c_int"
remove_list.add( removeRows.get("name") )
remove_list.add( removeCols.get("name") )
'''
else:
print ( rows + " and " + cols + " are not described in the arguments of "+proc.get( "name" ) )
'''
for arg in args_list:
if arg.get( "semantic" ) != "scalar" :
continue
if arg.get( "type" ) == "c_char":
pass_through[ arg.get("name") ] = arg.get( "name" ) + ".byte(1) : c_char"
associate_array_str = arg.get( "associate-array" )
associate_field_str = arg.get( "associate-field" )
if associate_array_str != None:
array_field_map = {}
arrays = associate_array_str.split(",")
fields = associate_field_str.split(",")
array = ""
field = ""
for i in range( len( arrays ) ) :
arrays[i] = arrays[i].lower()
fields[i] = fields[i].lower()
array_field_map[ arrays[i] ] = fields[i]
for associate_array in arrays:
if associate_array in args_names:
array = associate_array
field = fields[ arrays.index( array ) ]
break;
if field == "rows":
pass_through[ arg.get("name") ] = "(if matrix_order == lapack_memory_order.row_major then " + array + ".domain.dim(1).size else " + array + ".domain.dim(2).size) : c_int"
elif field == "columns":
pass_through[ arg.get("name") ] = "(if matrix_order == lapack_memory_order.row_major then " + array + ".domain.dim(2).size else " + array + ".domain.dim(1).size) : c_int"
elif field == "order" or field == "rank":
pass_through[ arg.get("name") ] = "(" + array + ".domain.dim(1).size) : c_int"
else:
raise GeneralPassFailure( field + " is not a recognized array association field" )
remove_list.add( arg.get("name") )
pass_through_node = SubElement( proc, "pass-through-arguments-list" )
for arg in args_node.findall( "./argument" ):
passing = copy.deepcopy( arg )
passing.text = pass_through[ arg.get( "name" ) ]
pass_through_node.append( passing )
for arg in args_node:
if arg.get("name") in remove_list:
arg.set( "pass-up", "false" )
else:
arg.set( "pass-up", "true" )
'''
for arg in args_node:
if arg.get( "name" ) == "matrix_order":
arg.text = "LAPACK_ROW_MAJOR"
'''
#proc.set( "category", "chapelerrific" )
#proc.set( "call", proc.get("name") )
#proc.set( "name", proc.get("name").replace( "LAPACKE_", "" ) )
#chapel_procedures.append( proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseChapelizePass ( Pass )
Purpose:
Tie together all chapelization passes.
After this point, no more transformations on the
code should occur.
'''
class BaseChapelizePass ( Pass ):
dependencies = [ CreateChapelModuleTreePass, ChapelizeLAPACKEFunctionsPass, TranslateChapelKeywordsPass, ChapelerrificLAPACKEFunctionsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseChapelizePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportTypeDefinesPass ( Pass )
Purpose:
from input xml sets up tags that will be used
to generate typedefs in the module
'''
class ImportTypeDefinesPass ( Pass ):
dependencies = [CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportTypeDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportTypeDefinesPass']" )
module_types = xml_tree.find( "./chapel-module/type-defines" )
for define in pass_input.findall( "./define" ):
module_types.append( define )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportExternConstDefinesPass ( Pass )
Purpose:
from input xml set up tags that will be used
to generate extern const definitions
'''
class ImportExternConstDefinesPass ( Pass ):
dependencies = [CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportExternConstDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportExternConstDefinesPass']" )
module_defs = xml_tree.find( "./chapel-module/const-defines" )
for define in pass_input.findall( "./define" ):
module_defs.append( define )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportEnumeratedTypeDefinesPass ( Pass )
Purpose:
from input xml set up tags that will be used
to generate local enum definitions
'''
class ImportEnumeratedTypeDefinesPass ( Pass ):
dependencies = [CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportEnumeratedTypeDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportEnumeratedTypeDefinesPass']" )
module_defs = xml_tree.find( "./chapel-module/enum-defines" )
for enumeration in pass_input.findall( "./enumeration" ):
module_defs.append( enumeration )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseImportPass ( Pass )
Purpose:
Ties together all passes that import from input xml
'''
class BaseImportPass ( Pass ):
dependencies = [ImportTypeDefinesPass, ImportExternConstDefinesPass, ImportEnumeratedTypeDefinesPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseImportPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseCodegenReadyPass ( Pass )
Purpose:
Ties together all passes that must be completed before all codegen could be done
'''
class BaseCodegenReadyPass ( Pass ):
dependencies = [BaseImportPass, BaseChapelizePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseCodegenReadyPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleExternProcPass ( Pass )
Purpose:
generate chapel code at each procedure in the <chapel-module>
from the details of each procedure.
these are raw, basic extern procs of these functions.
'''
class ChapelModuleExternProcPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleExternProcPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
lapacke_procs = lapacke_root.find( "./procedures" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
proc_count = 0;
for proc in module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
basename = proc_name.replace( "LAPACK_", "" ).replace( "LAPACKE_", "" ).upper()
lapack_node = xml_tree.find( "./LAPACK/procedures/procedure/[@name='" + basename + "']" )
purpose = "" #"For more information, see the documentation for :proc:`" + proc_name + "`, or consult the Netlibs or Intel documentation.\n"
''' #TODO get legal approval for Documentation inclusion.
if lapack_node == None or lapack_node.find( "./purpose" ) == None or lapack_node.find( "./purpose" ).text == None:
purpose = ""
else:
purpose = re.sub( r"[ \t]+", " ", lapack_node.find( "./purpose" ).text )
'''
proc_args = proc.findall( "./arguments-list/argument" )
ordered_args = [None] * len( proc_args )
for arg in proc_args:
ordered_args[ int( arg.get( "position" ) ) ] = arg;
def_code = SegmentProducer( "extern proc " + proc_name )
args_code = ListProducer( ", ", "(", ")" )
for arg in ordered_args:
args_code.append( SegmentProducer(
("" if arg.get("intent") == "none" else arg.get("intent") + " ") + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
arg.get("type")
)
)
return_code = LineProducer( " : " + proc.get( "return-type" ) + ";" )
#doc_comment = CommentProducer( "\nExternal Procedure to " + proc_name + "\n" + ("\nOriginal Fortran LAPACK documentation for " + basename + "::\n\n " + purpose + "\n\n" if purpose != "" else "") )
#doc_comment = CommentProducer( "\nExternal Procedure to " + proc_name + "\n" + purpose + "\n" )
code = SequenceOfProducers()
#code.append( doc_comment )
code.append( def_code )
code.append( args_code )
code.append( return_code )
code_node = SubElement( proc, "code" )
code_node.set( "category", "extern proc" )
code_node.text = code.generate()
proc_count += 1
print "Generated code for", proc_count, "functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleStringToCharWraperProcPass ( Pass )
Purpose:
Create string wrappers to all of the generate external procs from
ChapelModuleExternProcPass
'''
class ChapelModuleStringToCharWraperProcPass ( Pass ):
dependencies = [ChapelModuleExternProcPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleStringToCharWraperProcPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
proc_count = 0
for proc in module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
if proc_name.startswith( "LAPACK_" ):
continue
proc_args = proc.findall( "./arguments-list/argument" )
ordered_args = [None] * len( proc_args )
char_flag = False
for arg in proc_args:
ordered_args[ int( arg.get( "position" ) ) ] = arg;
char_flag = arg.get( "type" ) == "c_char" or char_flag
# skip procedures that dont have char arguments
if not char_flag:
continue
code = SequenceOfProducers()
code.append( SegmentProducer( "inline proc " + proc_name ) )
args_code = ListProducer( ", ", "(", ")" )
for arg in ordered_args:
args_code.append( SegmentProducer(
("" if arg.get("intent") == "none" else arg.get("intent") + " ") + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
( arg.get("type") if arg.get("type") != "c_char" else "string" )
)
)
code.append( args_code )
code.append( SegmentProducer( " : " + proc.get( "return-type" ) ) )
func_body = ScopeProducer()
call_args_producer = ListProducer( ", ", "(", ")" )
for pass_arg in ordered_args:
call_args_producer.append( SegmentProducer( ( pass_arg.get("name" ) if pass_arg.get("type") != "c_char" else pass_arg.get( "name" ) + ".byte(1) : c_char" ) ) )
func_body.append( SegmentProducer( ( "return " if proc.get("return-type") != "void" else "" ) + proc.get("name") ) + call_args_producer + LineProducer( ";" ) )
code.append( func_body )
#code.prepend( CommentProducer( "\nString wrapped procedure of " + proc_name + "\n" ) )
code_node = SubElement( proc, "code" )
code_node.set( "category", "string wrapped" )
code_node.text = code.generate()
proc_count += 1
print "Generated code for", proc_count, "functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleChapelerrificProcPass ( Pass )
Purpose:
Generate code for Chapel-errific upward facing procedures
'''
class ChapelModuleChapelerrificProcPass ( Pass ):
dependencies = [ChapelModuleExternProcPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleChapelerrificProcPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
pass_info = Pass.input_xml.find( "./pass/[@name='ChapelModuleChapelerrificProcPass']" )
helper_use = pass_info.find("./use").text
proc_count = 0
no_repeat = set()
iterative_functions = set()
for case in pass_info.findall("./cases/case" ):
iterative_functions.add( case.get("name") )
for proc in module_procs.findall( "./procedure" ):
if proc.find( "./pass-through-arguments-list" ) == None:
continue
base_name = proc.get("name").replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
#print proc.get("name"), "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
typeToTypeString = { "s" : "real(32)",
"d" : "real(64)",
"c" : "complex(64)",
"z" : "complex(128)",
"ds" : "real(64)",
"zc" : "complex(128)"
}
typeMap = {
"c_float" : "real(32)",
"c_double" : "real(64)",
"c_char" : "string"
}
if (type == "ds" or type == "zc") and not config+func in iterative_functions:
temp_type = type[0]
temp_config = type[1]+config[0]
temp_func = config[1] + func
type = temp_type
config = temp_config
func = temp_func
for name_category in [ (config+func, "untyped chapelerrific") ]: # (type+config+func, "chapelerrific")
[proc_name, category_name] = name_category
code = SequenceOfProducers()
purpose = ""
lapack_node = xml_tree.find( "./LAPACK/procedures/procedure/[@name='" + base_name.upper() + "']" )
purpose = "" #"For more information, see the documentation for :proc:`" + proc_name + "`, or consult the Netlibs or Intel documentation.\n"
''' #TODO get legal approval for Documentation inclusion.
if proc_name in no_repeat:
purpose = "For more information, see the documentation for :proc:`" + proc_name + "`, or consult the Netlibs or Intel documentation.\n"
elif lapack_node == None or lapack_node.find( "./purpose" ) == None or lapack_node.find( "./purpose" ).text == None:
prupose = ""
else:
purpose = ("Original Fortran LAPACK purpose documentation for " + base_name.upper() + "::\n\n " + re.sub( r"[ \t]+", " ", lapack_node.find( "./purpose" ).text ) + "\n\n" )
'''
proc_args = proc.findall( "./arguments-list/argument" )
ordered_args = [None] * len( proc_args )
for arg in proc_args:
ordered_args[ int( arg.get( "position" ) ) ] = arg;
code.append( SegmentProducer( "inline proc " + proc_name ) )
args_doc = str()
args_producer = ListProducer(", ", "(", ")")
for arg in ordered_args:
if arg.get("pass-up") == "true":
args_producer.append( SegmentProducer(
("" if arg.get("intent") == "none" else arg.get("intent") + " ") + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
( arg.get("type") if not arg.get("type") in typeMap else typeMap[ arg.get("type") ] ) + \
( " = " + arg.text if arg.text != None and arg.text.strip() != "" else "" )
)
)
if lapack_node == None or arg.get("name") == "matrix_order":
continue
#print "./arguments-list/argument/[@name='" + arg.get("name").upper() + "']"
lapack_arg_node = lapack_node.find( "./arguments-list/argument/[@name='" + arg.get("name").upper() + "']" )
if lapack_arg_node == None:
continue
#prettyprintxml( lapack_arg_node )
''' #TODO get legal approval for Documentation inclusion.
if (not proc_name in no_repeat) and lapack_arg_node.find( "./documentation" ) != None:
#arg_doc = " " + arg.get(arg.get("name").upper() + " : " + arg.get("type") + ( "" if arg.get("intent") == "none" else arg.get("intent").strip() ) + "\n"
text = re.sub( r"\n", "\n ", re.sub( r"[ \t]+", " ", lapack_node.find( "./arguments-list/argument/[@name='" + arg.get("name").upper() + "']/documentation" ).text ) )
arg_doc = " " + text + "\n\n"
if args_doc == "":
args_doc = "Original Fortran LAPACK argument documentation for " + base_name.upper() + "::\n\n"
args_doc += arg_doc
'''
#args_doc += "\n\n"
#code.prepend( CommentProducer( "\n" + ("Polymorphic " if category_name == "untyped chapelerrific" else "" ) + "Chapel idiomatic procedure of " + proc.get("name") + " for the type " + typeToTypeString[type] + ".\n\n" + purpose + args_doc ) )
code.prepend( CommentProducer( "\n" + "Wrapped procedure of " + proc.get("name") + " for the type " + typeToTypeString[type] + ".\n") )
code.append( args_producer )
code.append( SegmentProducer( ": " + proc.get( "return-type" ) ) )
func_body = ScopeProducer()
call_args_producer = ListProducer( ", ", "(", ")" )
for pass_arg in proc.findall( "./pass-through-arguments-list/argument" ):
call_args_producer.append( SegmentProducer( pass_arg.text ) )
func_body.append( SegmentProducer( ( "return " if proc.get("return-type") != "void" else "" ) + helper_use + "." + proc.get("name") ) + call_args_producer + LineProducer( ";" ) )
code.append( func_body )
code_node = SubElement( proc, "code" )
code_node.set( "category", category_name )
code_node.text = code.generate()
no_repeat.add( proc_name )
proc_count += 1
print "Generated code for", proc_count, "functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleExternTypeDefinesPass ( Pass )
Purpose:
from the imported external type defines generate
external type code at each define tag
'''
class ChapelModuleExternTypeDefinesPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleExternTypeDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/type-defines" )
for define in module_defs.findall( "./define" ):
def_str = ("/*"+ define.find("./description").text + "*/\n" if define.find("./description") != None else "")
if define.get( "external" ) != None and define.get("external").lower() == "yes":
def_str += "extern "
def_str += "type " + define.get( "alias" ) + " "
if define.get( "base-type" ) != None:
def_str += "= " + define.get( "base-type" )
def_str += ";"
SubElement( define, "code" ).text = def_str
#prettyprintxml( module_defs )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleExternConstDefinesPass ( Pass ):
Purpose:
from the imported external const defines generate
eternal const code at each define tag
'''
class ChapelModuleExternConstDefinesPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleExternConstDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/const-defines" )
for define in module_defs.findall( "./define" ):
def_str = ("/*"+ define.find("./description").text + "*/\n" if define.find("./description") != None else "")
if define.get( "external" ) != None and define.get( "external" ).lower() == "yes":
def_str += "extern "
def_str += "const " + define.get( "symbol" ) + " : " + define.get( "type" ) + " "
if define.get( "value" ) != None:
def_str += " = " + define.get( "value" )
def_str += ";"
SubElement( define, "code" ).text = def_str
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleEnumDefinesPass ( Pass ):
Purpose:
from the imported enumeration defines generate
enum code at each enumeration tag
'''
class ChapelModuleEnumDefinesPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleEnumDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/enum-defines" )
for define in module_defs.findall( "./enumeration" ):
values = ListProducer(", ", "{", "}")
for value in define.findall( "./value" ):
values.append( SegmentProducer( value.get("name") + ( " = " + value.text if value.text != None and value.text.strip() != "" else "" ) ) )
description_node = define.find("./description")
if description_node != None:
SubElement( define, "code" ).text = CommentProducer( description_node.text ).generate() + ( SegmentProducer( "enum " + define.get("name") ) + values + LineProducer(";") ).generate()
else:
SubElement( define, "code" ).text = ( SegmentProducer( "enum " + define.get("name") ) + values + LineProducer(";") ).generate()
#prettyprintxml( module_defs )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseCodeGenerationPass ( Pass )
Purpose:
Ties together all code generation passes before
any text dumping into a file.
'''
class BaseCodeGenerationPass ( Pass ):
dependencies = [ChapelModuleExternProcPass, ChapelModuleStringToCharWraperProcPass, ChapelModuleChapelerrificProcPass, ChapelModuleExternTypeDefinesPass, ChapelModuleExternConstDefinesPass, ChapelModuleEnumDefinesPass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseCodeGenerationPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DumpCodePass ( Pass )
Purpose:
traverses <chapel-module> tree, collecting generated code text
and gently places it into the file defined in the input xml
pass information
'''
class DumpCodePass ( Pass ):
dependencies = [BaseCodeGenerationPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DumpCodePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='DumpCodePass']" )
module_root = xml_tree.find( "./chapel-module" )
ChaLAPACK_info = pass_input.find( "./main-module" )
helper_info = pass_input.find( "./helper-module" )
module_name = ChaLAPACK_info.get( "name" )
module_file = open( ChaLAPACK_info.get( "file-name" ), "w" )
module_file.write( pass_input.find("copyright").text )
module_file.write( "/*\n" + ChaLAPACK_info.find("./description").text + "\n*/\n" )
module_file.write( "module " + module_name + " {\n" )
for use in ChaLAPACK_info.findall( "./use" ):
module_file.write( "use " + use.text + ";\n" )
module_file.write( "\n" )
# inject types, consts, enums
for defn in module_root.findall( "./type-defines/define" ):
module_file.write( defn.find("./code").text + "\n" )
module_file.write( "\n\n" )
for defn in module_root.findall( "./const-defines/define" ):
module_file.write( defn.find("./code").text + "\n" )
module_file.write( "\n\n" )
for defn in module_root.findall( "./enum-defines/enumeration" ):
module_file.write( defn.find("./code").text + "\n" )
module_file.write( "\n" )
# inject helper module
if helper_info.get("no-doc") == "all":
module_file.write( "pragma \"no doc\"\n" )
module_file.write( "/*\n" + helper_info.find( "./description" ).text + "\n*/\n" )
module_file.write( "module " + helper_info.get("name") + " {\n" )
for use in helper_info.findall( "./use" ):
module_file.write( "use " + use.text + ";\n" )
module_file.write( "\n" )
nodoc_helper_procs = helper_info.get("no-doc") == "internals" or helper_info.get("no-doc") == "procedures" or helper_info.get("no-doc") == "all"
for proc in module_root.findall( "./procedures/procedure" ):
code = proc.find( "./code/[@category='extern proc']")
if code != None:
if nodoc_helper_procs:
module_file.write( "pragma \"no doc\"\n" )
module_file.write( code.text + "\n" )
code = proc.find( "./code/[@category='string wrapped']")
if code != None:
if nodoc_helper_procs:
module_file.write( "pragma \"no doc\"\n" )
module_file.write( code.text + "\n" )
module_file.write( "} // " + helper_info.get("name") + "\n" )
for proc in module_root.findall( "./procedures/procedure" ):
code = proc.find( "./code/[@category='untyped chapelerrific']" )
if code != None:
module_file.write( code.text + "\n" )
module_file.write("} // " + module_name + "\n")
module_file.close()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
Below are passes that were used to explore the LAPACK source, or are Passes that were removed from the main set.
EXTREME caution is advised if using them. They may (probably) not work with the current set of main passes
'''
class CountFunctions( Pass ):
dependencies = [BaseLAPACKPass, BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CountFunctions
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack = 0
for proc in xml_tree.findall( "./LAPACK/procedures/procedure" ):
lapack += 1
lapacke = 0
for proc in xml_tree.findall( "./LAPACKE/procedures/procedure" ):
lapacke += 1
print "LAPACK", lapack, "LAPACKE", lapacke
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class CreateAbstractLAPACKTreePass ( Pass )
Purpose:
Create Abstract-LAPACK tree
'''
class CreateAbstractLAPACKTreePass ( Pass ):
dependencies = [BaseChapelizePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CreateAbstractLAPACKTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = SubElement( xml_tree, "Abstract-LAPACK" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BucketLAPACKFunctionGroups ( Pass )
Purpose:
bucket LAPACK functions by their base function, type, and matrix type
'''
class BucketLAPACKFunctionGroupsPass ( Pass ):
dependencies = [CreateAbstractLAPACKTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketLAPACKFunctionGroupsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
funcs = set()
for proc in module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
#print proc_name
if proc_name in funcs:
raise GeneralPassFailure( "DOUBLE HIT " + proc_name )
else:
funcs.add( proc_name )
# we only care about LAPACKE_ functions
if proc_name.startswith( "LAPACK_" ) or proc_name.endswith( "_work" ):
continue
base_name = proc_name.replace( "LAPACKE_", "" ) #.replace( "_work", "" )
match = func_name_group_regex.search( base_name );
if match == None:
print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if config != "ge":
continue
group_node = abstract_lapack.find( "./group/[@name='" + func + "']" )
if group_node == None:
group_node = SubElement( abstract_lapack, "group" )
group_node.set("name", func )
config_node = group_node.find( "./matrix-configuration/[@name='" + config + "']" )
if config_node == None:
config_node = SubElement( group_node, "matrix-configuration" )
config_node.set( "name", config )
if config_node.find( "./types/type/[@name='" + type + "']" ) != None:
print "Double declaration of abstract LAPACK function", type, config, func, base_name, proc_name
continue
#prettyprintxml( config_node.find( "./type/[@name='" + type + "']" ) )
types_node = SubElementUnique( config_node, "types" )
type_node = SubElement( types_node, "type" )
type_node.set( "name", type )
type_node.set( "analogue", "./chapel-module/procedures/procedure/[@name='" + proc_name + "']" )
#prettyprintxml( abstract_lapack )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class ImportAbstractLAPACKFunctionsPass ( Pass ):
dependencies = [BaseCodegenReadyPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportAbstractLAPACKFunctionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/procedures" )
group_input = Pass.input_xml.find( "./pass/[@name='ImportAbstractLAPACKFunctionsPass']" )
proc_count = 0
for group in group_input.findall( "./group" ):
for config in group.findall( "./matrix-configuration" ):
code = SequenceOfProducers()
proc_name = config.get( "name" ) + group.get( "name" )
code.append( SegmentProducer( "proc " + proc_name ) )
args_producer = ListProducer(", ", "(", ")")
for arg in config.findall( "./method-arguments/argument" ):
args_producer.append( SegmentProducer(
arg.get("intent") + " " + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
arg.get("type") + \
( " = " + arg.text if arg.text != None and arg.text.strip() != "" else "" )
)
)
code.append( args_producer )
code.append( LineProducer( ": lapack_int" ) )
code.append( SegmentProducer( "where " ) )
where_producer = ListProducer( " || ", "", "" )
for type in config.findall("./types/type"):
where_producer.append( SegmentProducer( "T == " + type.get( "type" ) ) )
code.append( where_producer )
info_var = config.get( "name" ) + group.get( "name" ) + "_return_info"
func_body = ScopeProducer()
func_body.append( LineProducer( "var " + info_var + " : lapack_int;" ) )
#if_bodies = SequenceOfProducers()
arg_relates = {}
ana_args = []
for arg in config.findall( "./analogue-arguments-list/argument" ):
arg_name = arg.get("name")
arg_relates[ arg_name ] = config.find( "./arguments-relationships/argument/[@name='" + arg_name + "']" )
ana_args.append( arg );
for type in config.findall("./types/type"):
chpl_ana = xml_tree.find( type.get( "analogue" ) )
if_condition = LineProducer( "if ( T == " + type.get("type") + " )" )
func_body.append( if_condition )
if_body = ScopeProducer()
call_equals = SegmentProducer( info_var + " = " + chpl_ana.get( "name" ) )
call_seq = ListProducer( ", ", "(", ")" )
for ana_arg in ana_args:
call_seq.append( SegmentProducer(
"(" + arg_relates[ana_arg.get("name")].text.strip() + ")" + \
(" : " + ana_arg.get("type") if ana_arg.get("semantic") != "array" else "")
)
)
if_body.append( call_equals + call_seq + LineProducer( ";" ) )
func_body.append( if_body )
func_body.append( LineProducer( "return " + info_var + ";" ) )
code.append( func_body )
module_proc = SubElement( module_defs, "procedure" )
module_proc.set( "name", proc_name )
code_node = SubElement( module_proc, "code" )
code_node.set( "category", "upward facing" )
code_node.text = code.generate()
proc_count += 1
print "Generated", proc_count, "procedures"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class GroupsArgumentCollectionPass ( Pass )
Purpose:
collect common arguments into the function groups
'''
class CommonArgumentCollectionPass ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CommonArgumentCollectionPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
args_tree = ET.Element( "analogue-arguments-list" )
''''
prefix_type_map = {
"s" : "float",
"d" : "double",
"c" : "complex_float",
"z" : "complex_double",
"float" : "s",
"double" : "d",
"complex_float" : "c",
"complex_double" : "z"
}
'''
all_args = {} # dictionary to set [arg_name]=>set( function_names )
all_funcs = set() # set of all functions encountered
func_args_type = {} # {func_name} => {arg_name} => type_name
for type_func in config_node.findall( "./types/type" ):
type_name = type_func.get( "name" );
all_funcs.add( type_name );
func_args_type[ type_name ] = {};
chapel_func = xml_tree.find( type_func.get( "analogue" ) )
if chapel_func == None:
raise GeneralPassFailure( type_name + config_node.get( "name" ) + group_node.get( "name" ) + " does not have chapel analogue" )
for arg in chapel_func.findall( "./arguments-list/argument" ):
func_args_type[ type_name ][ arg.get("name") ] = arg.get("type")
args_type
find = args_tree.find( "./argument/[@name='" + arg.get( "name" ) + "']" )
if find == None:
args_tree.append( arg )
elif arg.get( "type" ) != find.get( "type" ):
find.set( "type", "?T" )
abstract_arg = ET.Element( "argument" )
arg_name = arg.get( "name" )
if not arg_name in all_args:
all_args[arg_name] = set()
all_args[arg_name].add(type_name)
for arg_name in all_args:
if all_args[ arg_name ] != all_funcs:
arg = args_tree.find( "./argument/[@name='" + arg_name + "']" )
args_tree.remove( arg )
for type_func_name in all_args[ arg_name ]:
#print "find", type_func_name
#prettyprintxml( config_node )
type_func = config_node.find( "./types/type/[@name='" + type_func_name + "']" )
args_list = SubElementUnique( type_func, "arguments-list" )
args_list.append( arg )
config_node.append( args_tree )
#prettyprintxml( abstract_lapack.find( "./group/[@name='sv']" ) )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseLAPACKAbstractPass ( Pass )
Purpose:
Ties together all passes that populate the Abstract-LAPACK classes
for upward facing LAPACK chapel procedures
'''
class BaseAbstractLAPACKPass ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass, CommonArgumentCollectionPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseAbstractLAPACKPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class DropAttemptedAssociations ( Pass ):
dependencies = [BaseChapelizePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropAttemptedAssociations
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
output_xml = ET.Element( "pass" );
output_xml.set( "name", "DropAttemptedAssociations" )
output_procs = SubElement( output_xml, "procedures" );
for chpl_proc in xml_tree.findall( "./chapel-module/procedures/procedure" ):
proc_name = chpl_proc.get( "name" )
if proc_name.startswith( "LAPACK_" ) or proc_name.endswith( "_work" ):
continue
base_name = proc_name.replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
continue
proc = copy.deepcopy( chpl_proc )
print proc.get( "name" )
#prettyprintxml( proc )
args_node = proc.find( "./arguments-list" )
args_list = [ ]
args_names = []
remove_list = set()
pass_through = {}
for arg in args_node.findall( "./argument" ):
args_list.append( arg )
args_names.append( arg.get("name") )
pass_through[ arg.get("name") ] = arg.get( "name" )
for arg in args_list:
if arg.get( "semantic" ) != "array" :
continue
if arg.get( "dimensions" ) != None:
dimensions = arg.get( "dimensions" ).lower().split(",")
for i in range( len(dimensions) ):
dimension = dimensions[i]
if dimension == "*":
continue
removeVar = None
for find in args_list:
if find.get( "name" ) == dimension:
removeVar = find
break
if removeVar != None:
remove_list.add( removeVar )
pass_through[ dimension ] = arg.get("name") + ".domain.dim("+str(i+1)+").size"
else:
print ( dimension + " is not described in the arguments of "+proc.get( "name" ) )
if arg.get( "matrix-size" ) != None:
matrix_size = arg.get( "matrix-size" ).lower()
rows = matrix_size.split(",")[0].strip()
cols = matrix_size.split(",")[1].strip()
removeRows = None
removeCols = None
for find in args_list:
if find.get( "name" ) == rows:
removeRows = find
if find.get( "name" ) == cols:
removeCols = find
if removeRows != None and removeCols != None:
pass_through[ rows ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(1).size else " + arg.get("name") + ".domain.dim(2).size "
pass_through[ cols ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(2).size else " + arg.get("name") + ".domain.dim(1).size "
remove_list.add( removeRows )
remove_list.add( removeCols )
else:
print ( rows + " and " + cols + " are not described in the arguments of "+proc.get( "name" ) )
for arg in args_list:
if arg.get( "semantic" ) != "scalar" :
continue
if arg.get( "type" ) == "c_char":
arg.set( "type", "string" )
pass_through[ arg.get("name") ] = arg.get( "name" ) + ".byte(1) : c_char"
associate_array_str = arg.get( "associate-array" )
associate_field_str = arg.get( "associate-field" )
if associate_array_str != None:
array_field_map = {}
arrays = associate_array_str.split(",")
fields = associate_field_str.split(",")
array = ""
field = ""
for i in range( len( arrays ) ) :
arrays[i] = arrays[i].lower()
fields[i] = fields[i].lower()
array_field_map[ arrays[i] ] = fields[i]
for associate_array in arrays:
if associate_array in args_names:
array = associate_array
field = fields[ arrays.index( array ) ]
break;
if field == "rows":
pass_through[ arg.get("name") ] = "if matrix_order == LAPACK_ROW_MAJOR then " + array + ".domain.dim(1).size else " + array + ".domain.dim(2).size "
elif field == "columns":
pass_through[ arg.get("name") ] = "if matrix_order == LAPACK_ROW_MAJOR then " + array + ".domain.dim(2).size else " + array + ".domain.dim(1).size "
elif field == "order" or field == "rank":
pass_through[ arg.get("name") ] = array + ".domain.dim(1).size"
else:
raise GeneralPassFailure( field + " is not a recognized array association field" )
remove_list.add( arg )
pass_through_node = SubElement( proc, "pass-through" )
for arg in args_node.findall( "./argument" ):
passing = copy.deepcopy( arg )
passing.text = pass_through[ arg.get( "name" ) ]
pass_through_node.append( passing )
for rm_arg in remove_list:
args_node.remove( args_node.find( "./argument/[@name='" + rm_arg.get( "name" ) + "']" ) )
count = 0
for arg in args_node:
arg.set("position", str( count ) )
count += 1
if arg.get( "name" ) == "matrix_order":
arg.text = "LAPACK_ROW_MAJOR"
#print pass_through
#prettyprintxml( proc )
#print pass_through, "\n", "==="*20, "\n"
output_procs.append( proc )
prettywritexml( output_xml, "DropAttemptedAssociations_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class AbstractDropAttemptedAssociations ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropAttemptedAssociations
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
if config_node.findall( "./types/type/arguments-list" ) != [] :
print config_node.get("name") + group_node.get("name"), " has typed functions with non common arguments. Skipping."
continue
full_func_name = config_node.get("name") + group_node.get("name")
all_args = []
array_args = set()
method_args = []
pass_through = {} # str => str
removed = {} # str => bool
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
all_args.append( arg.get("name") )
if arg.get( "semantic" ) == "array" :
array_args.add( arg.get("name" ) )
removed[ arg.get("name") ] = False
method_args.append( arg )
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
if removed[ arg.get("name") ] or arg.get( "semantic" ) != "array":
continue
pass_through[ arg.get("name") ] = arg.get( "name" )
if arg.get( "dimensions" ) != None:
dimensions = arg.get( "dimensions" ).split(",")
for i in range( len(dimensions) ):
dimension = dimensions[i].lower()
if dimension == "*":
continue
pass_through[ dimension ] = arg.get("name") + ".domain.dim("+str(i+1)+").size"
removed[ dimension ] = True
matrix_size = arg.get( "matrix-size" )
if matrix_size != None:
rows = matrix_size.split(",")[0].strip().lower()
cols = matrix_size.split(",")[1].strip().lower()
pass_through[ rows ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(1).size else " + arg.get("name") + ".domain.dim(2).size "
pass_through[ cols ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(2).size else " + arg.get("name") + ".domain.dim(1).size "
removed[ rows ] = True
removed[ cols ] = True
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
if removed[ arg.get("name") ] or arg.get( "semantic" ) != "scalar":
continue
pass_through[ arg.get("name") ] = arg.get("name")
for rm in removed:
if not removed[rm]:
continue
for i in range( len( method_args ) ):
if method_args[i].get("name") == rm:
method_args.remove( method_args[i] )
break;
interface_node = SubElement( config_node, "method-arguments" )
for arg in method_args :
argument = SubElement( interface_node, "argument" )
argument.set( "name", arg.get("name") )
argument.set( "intent" , arg.get("intent") )
argument.set( "semantic", arg.get("semantic") )
argument.set( "type", arg.get("type") )
argument.text = " " if arg.get("name") != "matrix_order" else "LAPACK_ROW_MAJOR"
pass_through_node = SubElement( config_node, "arguments-relationships" )
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
arg_name = arg.get( "name" )
arg_relate = SubElement( pass_through_node, "argument" )
arg_relate.set( "name", arg_name )
arg_relate.text = pass_through[arg_name]
prettywritexml( abstract_lapack, "AbstractDropAttemptedAssociations_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNeedsArgsDocPatchPass ( Pass )
Purpose:
Was used to find fortran files with incorrect argument documentation
( \param[intent] blah blah )
'''
class FindNeedsArgsDocPatchPass ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindNeedsArgsDocPatchPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_root_procs = xml_tree.find( "./LAPACK/procedures" )
patch = []
for proc in lapack_root_procs.findall( "./procedure" ):
printed = False
#prettyprintxml( proc )
for arg in proc.findall( "./arguments-list/argument" ):
if arg.find( "./documentation" ) == None:
if not printed:
print proc.get( "name" ), proc.get( "file-name" )
printed = True
print arg.get( "name" ), "MISSING"
patch.append( (proc.get("name"), proc.get("file-name"), arg.get("name") ) )
print patch
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNeedsFuncArgsTypePatchPass ( Pass )
Purpose:
Was used to find fortran files with incorrect argument type documentation
( ie ..Scalar Arguments.. blah blah )
'''
class FindNeedsFuncArgsTypePatchPass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindNeedsFuncArgsTypePatchPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
none_scalar = []
none_array = []
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
scalars = doc_scalarargs_regex.search( doc_node.text )
arrays = doc_arrayargs_regex.search( doc_node.text )
if scalars == None:
none_scalar.append( (proc_node.get( "name"), proc_file_name) )
if arrays == None:
none_array.append( (proc_node.get( "name"), proc_file_name) )
print "none_scalars", none_scalar,"\n\nnone_arrays", none_array
print "="*100
for i in none_scalar:
sys.stdout.write( i[1] + "," )
print "\n"*2
for i in none_array:
sys.stdout.write( i[1] + "," )
print "\n"*2
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindAllLAPACKETypesPass ( Pass )
Purpose:
was used to collect all the types named in LAPACKE.h
to put into input xml type defines etc.
'''
class FindAllLAPACKETypesPass ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindAllLAPACKETypesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_procs_root = xml_tree.find( "./LAPACKE/procedures" )
types = set()
for proc in lapacke_procs_root.findall( "./procedure" ):
types.add( proc.get( "return-type" ) )
for arg in proc.findall( "./arguments-list/argument" ):
types.add( arg.get( "type" ) )
print types
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindScalarOutIntentsPass ( Pass )
Purpose:
Find scalars in the fortran code with 'out' intents, that
are also not INFOs
Explore if there are LAPACKE scalars that are out intents
'''
class FindScalarOutIntentsPass ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindScalarOutIntentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACK/procedures" )
print lapack_procs_root
outs = []
for proc in lapack_procs_root.findall( "./procedure" ):
for arg in proc.findall( "./arguments-list/argument" ):
if arg.get( "semantic" ) == "scalar" \
and "out" in arg.get( "intent" ) \
and arg.get("name").lower() != "info":
outs.append( (proc.get( "name" ), arg.get( "name" ), proc.get( "file-name" ) ) )
print outs
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindIntentSetPass ( Pass ):
Purpose:
find the set of all intents that exist in LAPACKE fold
'''
class FindIntentSetPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindIntentSetPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
intents = set()
for proc in lapack_procs_root.findall( "./procedure" ):
#print proc.get( "name" )
#prettyprintxml( proc )
for arg in proc.findall( "./arguments-list/argument" ):
#print arg.get( "name" )
if arg.get( "intent" ) != None:
intents.add( arg.get( "intent" ) )
print intents
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindIntentSemanticsRefsSetPass ( Pass ):
Purpose:
find the set of all combinations of intenst that exist in LAPACKE fold
'''
class FindIntentSemanticsRefsSetPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindIntentSemanticsRefsSetPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
combos = {}
for proc in lapack_procs_root.findall( "./procedure" ):
for arg in proc.findall( "./arguments-list/argument" ):
intent = arg.get( "intent" )
semantic = arg.get( "semantic" )
refdepth = arg.get( "refdepth" )
combos[ (intent, semantic, refdepth ) ] = (proc, arg)
for key in combos:
print key, "(", combos[ key ][0].get( "name" ), combos[key][1].get( "name" ), ")"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindBadIntentSemanticsCodePass ( Pass ):
Purpose:
find fortran code where intent or semantic are None
'''
class FindBadIntentSemanticsCodePass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindBadIntentSemanticsCodePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
list = {}
for proc in lapack_procs_root.findall( "./procedure" ):
proc_name = proc.get("name")
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
intent = arg.get( "intent" )
semantic = arg.get( "semantic" )
if arg_name != "matrix_order" and ( (intent == None) ^ (semantic == None) ):
if not proc_name in list:
list[proc_name] = []
list[proc_name].append( (arg_name, intent, semantic, proc) )
files_str = str()
for key in list:
proc = list[key][1][3]
analogue_txt = proc.find( "./analogues/analogue" ).text
analogue = xml_tree.find( analogue_txt )
files_str += analogue.get( "file-name" ) + ","
print key, analogue_txt, analogue.get( "file-name" )
#prettyprintxml( proc )
#prettyprintxml( analogue )
for elem in list[key]:
print "\t",elem
print ""
print files_str
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNonAnalouges ( Pass ):
Purpose:
find all C lapack procedures with no fortran analogues
'''
class FindPassByRefNonAnalouges ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindPassByRefNonAnalouges
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
list = []
for proc in lapack_procs_root.findall( "./procedure" ):
proc_name = proc.get( "name" )
if proc.find( "./analogues" ) == None:
#list.add( proc.get( "name" ) )
print "Function", proc_name, "has no fortran analogue"
continue
printed = False
for arg in proc.findall( "./arguments-list/argument" ):
if arg.find( "./analogues" ) == None and \
int(arg.get( "refdepth" )) > 0 :
if not printed:
printed = True
print "In function", proc_name, ":"
print "\tArgument", arg.get( "name" ), "of refdepth", int(arg.get( "refdepth" )), "has no fortran analogue"
if printed:
print ""
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindFortranNoTypes ( Pass )
Purpose:
find any fortran arguments with no associated type
'''
class FindFortranNoTypes ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindFortranNoTypes
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
print lapack_f_procs
for f_proc in lapack_f_procs.findall( "./procedure" ):
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) == None:
print f_proc.get( "name" ), f_proc.get( "file-name" ), f_arg.get( "name" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BucketVariblesSemanticsPass ( Pass )
Purpose:
Find and bucket arguments by semantic
'''
class BucketArgumentsSemanticsPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketArgumentsSemanticsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACKE/procedures" )
variables = {}
for proc in lapack_f_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
semantic = arg.get( "semantic" ) if arg.get( "semantic" ) != None else "none"
if not arg_name in variables:
variables[ arg_name ] = {}
if not semantic in variables[ arg_name ]:
variables[ arg_name ][ semantic ] = []
variables[ arg_name ][ semantic ].append( proc_name )
for arg in variables:
if len( variables[ arg ] ) > 2:
print arg
for semantic in variables[ arg ]:
print " \"" + semantic + "\"", ":", len( variables[ arg ][ semantic ] )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BucketFortranTypes ( Pass )
Purpose:
find all fortran types
'''
class BucketFortranTypes ( Pass ):
dependencies = [DestroyUnassociatedFortranFunctionsTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketFortranTypes
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
lapack_c_procs = xml_tree.find( "./LAPACKE/procedures" )
f_types = set()
c_types = set()
for f_proc in lapack_f_procs.findall( "./procedure" ):
#if f_proc.get( "return-type" ) != None:
# f_types.add( f_proc.get( "return-type" ) )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) != None:
f_types.add( f_arg.get( "type" ) )
else:
f_types.add( "~BAD. None for type~" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
#if c_proc.get( "return-type" ) != None:
# c_types.add( c_proc.get( "return-type" ) )
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.get( "type" ) != None:
c_types.add( c_arg.get( "type" ) )
else:
c_types.add( "~BAD. None for type~" )
print "C types", c_types
print "Fortran types", f_types,"\n"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindAFortranType ( Pass )
Purpose:
find a fortran type
'''
class FindAFortranType ( Pass ):
dependencies = [ BaseAssociatePass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindAFortranType
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
#lapack_c_procs = xml_tree.find( "./LAPACKE/procedures" )
f_types = set()
#c_types = set()
find = "RECURSIVE"
for f_proc in lapack_f_procs.findall( "./procedure" ):
if f_proc.get( "return-type" ) == find:
print f_proc.get( "name" )
#return
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) == find:
print f_proc.get( "name" ), f_arg.get( "name" )
#return
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindUnmatchedArgsPass ( Pass )
Purpose:
Find unresolved matches arising from misnames
'''
class FindUnmatchedArgsPass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindUnmatchedArgsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
# Find the fortran analogue.
# Note: there should only be one path from C -> Fortran.
# though there may be many paths from Fortran -> C
f_proc_ana = c_proc.find( "./analogues/analogue" )
if f_proc_ana == None:
continue
f_proc = xml_tree.find( f_proc_ana.text )
c_no_match = []
f_no_match = []
for c_arg in c_proc.findall( "./arguments-list/argument" ):
#print c_arg.get( "name" )
if c_arg.find( "./analogues/" ) == None \
and c_arg.get( "name" ) != "matrix_order" :
#print "has none"
c_no_match.append( c_arg )
#prettyprintxml( c_arg )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_ana_node = f_arg.find( "./analogues" )
# if zero analogues add no_match
if f_ana_node == None:
f_no_match.append( f_arg )
continue
# if no analogues to this function add no_match
if f_ana_node.find( "./analogue/[@function='" + c_proc.get( "name" ) + "']" ) == None \
and f_arg.get( "name" ) != "INFO":
f_no_match.append( f_arg )
if c_no_match == []: continue
print c_proc.get( "name" ), ":", f_proc.get( "name" )
print "+",c_proc.get( "name" )
for m in c_no_match:
#prettyprintxml( m )
print "\t-", m.get( "name" )
print "+",f_proc.get( "name" )
for m in f_no_match:
#prettyprintxml( m )
print "\t-", m.get( "name" )
print "\n"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNoneLAPACKESementicsPass ( Pass )
Purpose:
Find LAPACKE arguments of procedures with no semantics
this arises when the function or the arguments do not
have analogues or they have not been imported
'''
class FindNoneLAPACKESementicsPass ( Pass ):
dependencies = [BaseTransformLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindNoneLAPACKESementicsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
#if c_proc.find( "./analogues/analogue" ) == None: continue
printed = False
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.get( "semantic" ) == None:
if not printed:
print c_proc.get( "name" )
printed = True
print "Missing sementic on", c_arg.get( "name" )
if printed:
print ""
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindHasNoFortranAnaloguePass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindHasNoFortranAnaloguePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
list = set()
for c_proc in lapack_c_procs.findall( "./procedure" ):
if c_proc.find( "./analogues/analogue" ) == None:
#list.add( c_proc.get("name").replace( "LAPACKE_", "" ).replace( "LAPACK_", "" ).replace( "_work", "" ) )
print c_proc.get( "name" )
for i in list:
print i
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DropFileOfCTreeUnmatchedArgsPass ( Pass )
Purpose:
export an xml file that has most skeleton of xml tree that could
be installed into the input xml for the ImportArgumentAnaloguesPass
'''
class DropFileOfCTreeUnmatchedArgsPass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropFileOfCTreeUnmatchedArgsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
output_tree = ET.Element( "pass-output" )
pass_output = SubElement( output_tree, "pass" )
pass_output.set( "name", "DropFileOfCTreeUnmatchedArgsPass" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
# Find the fortran analogue.
# Note: there should only be one path from C -> Fortran.
# though there may be many paths from Fortran -> C
f_proc_ana = c_proc.find( "./analogues/analogue" )
if f_proc_ana == None:
continue
f_proc = xml_tree.find( f_proc_ana.text )
c_no_match = []
f_no_match = []
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.find( "./analogues/" ) == None \
and c_arg.get( "name" ) != "matrix_order" :
c_no_match.append( c_arg )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_ana_node = f_arg.find( "./analogues" )
# if zero analogues add no_match
if f_ana_node == None:
f_no_match.append( f_arg )
continue
# if no analogues to this function add no_match
if f_ana_node.find( "./analogue/[@function='" + c_proc.get( "name" ) + "']" ) == None \
and f_arg.get( "name" ) != "INFO":
f_no_match.append( f_arg )
if c_no_match == []: #and f_no_match == []:
continue
proc_node = SubElement( pass_output, "procedure" )
proc_node.set("name", c_proc.get( "name" ) )
proc_node.set( "analogue-path", f_proc_ana.text )
for c_arg in c_no_match:
arg_node = SubElement( proc_node, "argument" )
arg_node.set( "name", c_arg.get("name") )
possible = SubElement( arg_node, "possible_substitutions" )
for f_arg in f_no_match :
f_arg_node = SubElement( possible, "option" )
f_arg_node.set( "name", f_arg.get( "name" ) )
f_arg_node.set( "semantic", f_arg.get( "semantic" ) )
f_arg_node.set( "intent", f_arg.get( "intent" ) )
f_arg_node.set( "type", f_arg.get( "type" ) )
f_arg_node.set( "substitution", f_arg.get( "name" ) )
prettywritexml( output_tree, "DropFileOfCTreeUnmatchedArgsPass_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DropFileOfCTreeUnmatchedArgsWithSuggestionsPass ( Pass )
Purpose:
export an xml file that has most skeleton of xml tree that could
be installed into the input xml for the ImportArgumentAnaloguesPass
and also include suggestions based on name-score and type union heuristics
that were used as an attempt to solve the issue automatically but were
found to be over-matchy
'''
class DropFileOfCTreeUnmatchedArgsWithSuggestionsPass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropFileOfCTreeUnmatchedArgsWithSuggestionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
output_tree = ET.Element( "pass-output" )
pass_output = SubElement( output_tree, "pass" )
pass_output.set( "name", "DropFileOfCTreeUnmatchedArgsPass" )
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
# Find the fortran analogue.
# Note: there should only be one path from C -> Fortran.
# though there may be many paths from Fortran -> C
f_proc_ana = c_proc.find( "./analogues/analogue" )
if f_proc_ana == None:
continue
f_proc = xml_tree.find( f_proc_ana.text )
c_no_match = []
f_no_match = []
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.find( "./analogues/" ) == None \
and c_arg.get( "name" ) != "matrix_order" :
c_no_match.append( c_arg )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_ana_node = f_arg.find( "./analogues" )
# if zero analogues add no_match
if f_ana_node == None:
f_no_match.append( f_arg )
continue
# if no analogues to this function add no_match
if f_ana_node.find( "./analogue/[@function='" + c_proc.get( "name" ) + "']" ) == None \
and f_arg.get( "name" ) != "INFO" :
f_no_match.append( f_arg )
if c_no_match == [] :
continue
proc_node = SubElement( output_tree, "procedure" )
proc_node.set( "name", c_proc.get( "name" ) )
proc_node.set( "path", "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']")
for c_arg in c_no_match:
arg_node = SubElement( proc_node, "argument" )
arg_info = SubElement( arg_node, "argument-info" )
arg_node.set( "name", c_arg.get( "name" ) )
arg_node.set( "substitution", "????" )
arg_node.set( "substitution-path", "????")
arg_info.set( "path", proc_node.get( "path" ) + "/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']" )
arg_info.set( "type", c_arg.get( "type" ) )
arg_info.set( "refdepth", c_arg.get("refdepth") )
if f_no_match != None:
possibles = SubElement( arg_node, "possible-analogues" )
for f_arg in f_no_match:
possible = SubElement( possibles, "possible" )
possible.set( "name", f_arg.get( "name" ) )
possible.set( "path", "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']" )
possible.set( "semantic", f_arg.get( "semantic" ) )
possible.set( "intent", f_arg.get( "intent" ) )
possible.set( "type", f_arg.get( "type" ) )
print c_proc.get( "name" ), f_proc.get( "name" )
type_map = {
"LOGICAL" : "booelan",
"lapack_int" : "int32",
"INTEGER" : "int32",
"lapack_complex_double" : "complex128",
"lapack_complex_float" : "complex64",
"COMPLEX*16" : "complex128",
"COMPLEX" : "complex64",
"DOUBLE" : "real32",
"DOUBLE PRECISION" : "real64",
"REAL" : "real32",
"SINGLE PRECISION" : "real32",
"double" : "real64",
"float" : "real32",
"char" : "char",
"CHARACTER" : "char",
"CHARACTER*1" : "char"
}
t_sets = {}
for arg in c_no_match:
type = type_map[ arg.get( "type" ) ]
if not type in t_sets:
t_sets[ type ] = set()
t_sets[ type ].add( arg )
for arg in f_no_match:
#print f_proc.get("name"), arg.get("name")
type = type_map[ arg.get( "type" ) ]
if not type in t_sets:
t_sets[ type ] = set()
t_sets[ type ].add( arg )
for type in t_sets:
# when there only exists a pair of arguments in a type,
# and those arguments are each in opposite code trees (fortran/C)
# it can heuristically be assumed that those arguments can be associated
if len( t_sets[ type ] ) == 2:
arg_1 = t_sets[ type ].pop()
arg_2 = t_sets[ type ].pop()
if (arg_1 in c_no_match and arg_2 in f_no_match ) ^ \
(arg_2 in c_no_match and arg_1 in f_no_match ):
c_arg = arg_1 if arg_1 in c_no_match else arg_2
f_arg = arg_2 if arg_2 in f_no_match else arg_1
print "match", c_arg.get("name"), "to", f_arg.get("name"),"unique type union"
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
arg_node = proc_node.find( "./argument/[@name='" + c_arg.get( "name" ) + "']" )
arg_node.set( "substitution", f_arg.get( "name" ) )
arg_node.set( "substitution-path", "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']" )
'''
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
'''
c_no_match.remove( c_arg )
f_no_match.remove( f_arg )
# if there are more than two arguments in a type
# we can try to match the strings from the
elif len( t_sets[ type ] ) > 2 :
change = True # True to emulate do-while
iter = 1
while change:
print "Iteration:", iter
change = False
c_removes = []
f_removes = []
for c_arg in c_no_match:
min_list = []
min_score = 10**1000
for f_arg in f_no_match:
score = score_string( c_arg.get("name").lower(), f_arg.get("name" ).lower() )
if score < min_score:
min_score = score
min_list = [ f_arg ]
elif score == min_score:
min_list.append( f_arg )
if len( min_list ) >1 :
print "BOTCHED matching for", c_arg.get("name"),": args",
for arg in min_list:
print arg.get("name"),",",
print "have same score", min_score
continue
min = min_list[0]
if min_score > 2:
print "FAILED to match", c_arg.get("name"), "to", min.get("name"), "score", min_score, "was too bad"
continue
change = True
print "match", c_arg.get("name"), "to", min.get("name"), "score", min_score
f_arg = min
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
arg_node = proc_node.find( "./argument/[@name='" + c_arg.get( "name" ) + "']" )
arg_node.set( "substitution", f_arg.get( "name" ) )
arg_node.set( "substitution-path", "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']" )
'''
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
'''
c_removes.append( c_arg )
f_removes.append( f_arg )
for r in c_removes:
c_no_match.remove( r )
for r in f_removes:
f_no_match.remove( r )
iter += 1
print "No changes"
for c_arg in c_no_match:
print "Could not match", c_arg.get( "name" )
for f_arg in f_no_match:
print "Could not match", f_arg.get( "name" )
print ""
prettywritexml( output_tree, "DropFileOfCTreeUnmatchedArgsPass_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindLAPACKFunctionGroups ( Pass ):
dependencies = [ChapelizeLAPACKEFunctionsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindLAPACKFunctionGroups
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
lapacke_procs = lapacke_root.find( "./procedures" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
names = set()
groups = {}
for proc in lapacke_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
base_name = proc_name.replace( "LAPACK_", "" ).replace( "LAPACKE_", "" ).replace( "_work", "" )
match = func_name_group_regex.search( base_name );
if match == None:
print proc_name, "ie", base_name, "does not match regex"
continue
#names.add( base_name );
func = match.group( "function" )
config = match.group( "mtype" )
if not func in groups:
groups[ func ] = {}
if not config in groups[func] :
groups[func][config] = []
groups[func][config].append( proc_name )
group_counts = 0
config_count = 0
type_counts = 0
for func in groups:
print func
group_counts += 1
for config in groups[func]:
print "\t", config
config_counts += 1
for name in groups[func][config]:
print "\t\t", name
type_counts += 1
print group_counts, config_counts, type_counts
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindDuplicateLAPACKEFunctions ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDuplicateLAPACKEFunctions
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
root_procs = xml_tree.find( "./LAPACKE/procedures" )
proc_names = set()
for proc in root_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
#print proc_name
if proc_name in proc_names:
raise GeneralPassFailure( proc_name )
else:
proc_names.add( proc_name )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class PrintArgDoc ( Pass ):
dependencies = [FuncArgsDocPass, FuncArgsTypePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = PrintArgDoc
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
doc_set = set();
total = 0
docked = 0
arg="A"
for proc_node in procs_node.findall( "./procedure" ):
for arg_node in proc_node.findall( "./arguments-list/argument/[@name='"+ arg +"']" ):
doc_node = arg_node.find( "documentation" )
total += 1
if doc_node == None:
print proc_node.get( "name" ), "/", arg, "has no documentation"
continue
doc = doc_node.text.lower()
doc = doc.replace( "-", "" )
doc = doc.replace( "the", "" )
doc = re.sub( "lda\s+is\s+integer", "", doc )
doc = re.sub( "\s+", "", doc )
doc_set.add( doc )
docked += 1
doc_list = sorted( list( doc_set ), key=len )
for i in doc_list:
print i
print len( doc_list ), "/", docked, "/", total
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindDifferenArgumentsPass ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDifferenArgumentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group/[@name='sv']" ):
for config_node in group_node.findall( "./matrix-configuration" ):
name_to_args = {} # function_name => { order => args_node }
name_to_counts = {} # function_name => number
for type_node in config_node.findall( "./type" ):
chap_func = xml_tree.find( type_node.get( "analogue" ) )
name_to_args[ type_node.get( "name" ) ] = {}
for arg in chap_func.findall( "./arguments-list/argument" ):
name_to_args[ type_node.get( "name" ) ][ arg.get("position") ] = arg
name_to_counts[ type_node.get( "name" ) ] = len( name_to_args[ type_node.get( "name" ) ] )
all_same = True
all_count = 0
names = name_to_counts.keys()
for i in range( len( names ) - 1 ):
all_same = all_same and ( name_to_counts[ names[i] ] == name_to_counts[ names[i+1] ] )
print all_same
all_count = name_to_counts[ names[1] ] # grab arbitrary count if all the same
for pos in range( all_count ):
is_same = True
for i in range( len(names)-1):
is_same = is_same and ( name_to_args[names[i]].get("name") == name_to_args[names[i+1]].get("name") ) \
and ( name_to_args[names[i]].get("semantic") == name_to_args[names[i+1]].get("semantic") ) \
and ( name_to_args[names[i]].get("intent") == name_to_args[names[i+1]].get("intent") )
print pos, is_same
if not is_same:
return
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class CountGroups ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDifferenArgumentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
groups = 0
configs = 0
types = 0
for group_node in abstract_lapack.findall( "./group" ):
groups += 1
for config_node in group_node.findall( "./matrix-configuration" ):
configs += 1
for type_node in config_node.findall( "./type" ):
types += 1
print groups, configs, types
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class DropFileOfGroups ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropFileOfGroups
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group/[@name='sv']" ):
for config_node in group_node.findall( "./matrix-configuration" ):
interface_tree = SubElement( config_node, "method-arguments" )
argument = SubElement( interface_tree, "argument" )
argument.set( "name", "?" )
argument.set( "intent" , "?" )
argument.set( "semantic", "?" )
argument.set( "type", "?" )
relation_tree = SubElement( config_node, "arguments-relationships" )
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
arg_relate = SubElement( relation_tree, "argument" )
arg_relate.set( "name", arg.get("name") )
arg_relate.text = "RELATIONSHIP"
prettyprintxml( abstract_lapack)
prettywritexml( abstract_lapack, "DropFilesOfGroups.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class TryMatrixArgsUnion ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TryMatrixArgsUnion
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
pass
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class SolveArgsUnionFor ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = SolveArgsUnionFor
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
unique = set()
co = set()
non = set()
unset = True;
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
print config_node.get( "name" ) + group_node.get( "name" )
config_args = set()
array_args = set()
array_dims = {}
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
config_args.add( arg.get( "name" ).lower() )
if arg.get( "semantic" ) == "array":
array_args.add( arg.get("name") )
#prettyprintxml( arg )
if "m" in config_args:
print array_args
for elem in array_args:
print elem
co |= array_args
if unset:
unique |= array_args
unset = False;
else:
unique &= array_args
print unique, "\n"
print "="*10
print unique, "\n"
print co, "\n"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindDifferentLengthCalls ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDifferentLengthCalls
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
procs_dict = {}
fams = set()
for proc in xml_tree.findall( "./LAPACKE/procedures/procedure" ):
if proc.get("name").startswith("LAPACK_"):
continue
base_name = proc.get("name").replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
#print proc.get("name"), "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
name = config + func
if not name in procs_dict:
procs_dict[ name ] = set()
procs_dict[name].add( len( proc.findall( "./arguments-list/argument" ) ) )
if len( procs_dict[name] ) > 1 :
fams.add( name )
#return
#print procs_dict
for fam in fams:
print fam
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class IsNOrMEverTheSame ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = IsNorMEverTheSame
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_name = proc_node.get( "name" )
'''
base_name = proc_name.lower()
match = func_name_group_regex.search( base_name );
if match == None:
#print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
continue
'''
arg_names = [ arg.get("name") for arg in proc_node.findall( "./arguments-list/argument" ) ]
for arg_node in proc_node.findall( "./arguments-list/argument" ):
doc_node = arg_node.find( "documentation" )
if doc_node == None or arg_node.get("semantic") != "scalar" or arg_node.get("type").lower() != "integer":
continue
what = []
who = []
string = []
for m in scalar_matrix_relation_regex.finditer( doc_node.text ):
if not m.group( "what" ) in ["rows", "columns", "order", "rank"] :
continue
names = m.group( "who" ).strip()
names_list = []
if " and " in names:
names_list = [ name.strip() for name in names.split( "and" ) ]
else:
names_list = [ names ]
nameHasSpace = False
for name in names_list:
if " " in name:
nameHasSpace = True
break
if nameHasSpace:
print names, " contains non names. Skipping."
continue
removes = []
for name in names_list:
if not name in arg_names:
removes.append( name )
for rm in removes:
names_list.remove( rm )
if len( names_list ) == 0:
print "Names list had no argument names. Skipping"
continue
what.append( m.group( "what" ) )
who.append( names_list )
string.append( re.sub( "\s+", " ", m.group(0) ) )
if len( what ) == 0 and len( who ) == 0:
continue
#proc_info[ proc_name ][ arg_node.get( "name" ) ] = [ what, who, string]
associate_array = str()
associate_field = str()
first = True
for i in range( len( who ) ):
for array in who[i]:
associate_array += ( "," if not first else "" ) + array
associate_field += ( "," if not first else "" ) + what[i]
first = False
arg_node.set( "associate-array", associate_array )
arg_node.set( "associate-field", associate_field )
prettyprintxml( proc_node )
'''
for func in proc_info:
if proc_info[func] == {}:
continue
print func
for arg in proc_info[func]:
print "\t", arg
for elem in proc_info[func][arg]:
print "\t\t", elem
'''
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindGroupsWithUncommon ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = PretendCreate
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
printed = False
for type in config_node.findall( "./types/type" ):
if type.find( "./arguments-list" ) != None:
if not printed:
print config_node.get("name") + group_node.get("name")
printed = True
print "\t", type.get("name")
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class TestInputGroupsGen ( Pass ):
dependencies = [BaseCodegenReadyPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TestInputGroupsGen
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
#abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
group_input = loadxml( "DropAttemptedAssociations_input.xml" )
for group in group_input.findall( "./group" ):
for config in group.findall( "./matrix-configuration" ):
code = SequenceOfProducers()
print config.get( "name" ) + group.get( "name" )
code.append( SegmentProducer( "proc " + config.get( "name" ) + group.get( "name" ) ) )
args_producer = ListProducer(", ", "(", ")")
for arg in config.findall( "./method-arguments/argument" ):
args_producer.append( SegmentProducer(
arg.get("intent") + " " + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
arg.get("type") + \
( " = " + arg.text if arg.text != None and arg.text.strip() != "" else "" )
)
)
code.append( args_producer )
code.append( LineProducer( ": lapack_int" ) )
code.append( SegmentProducer( "where " ) )
where_producer = ListProducer( " || ", "", "" )
for type in config.findall("./types/type"):
where_producer.append( SegmentProducer( "T == " + type.get( "type" ) ) )
code.append( where_producer )
info_var = config.get( "name" ) + group.get( "name" ) + "_return_info"
func_body = ScopeProducer()
func_body.append( LineProducer( "var " + info_var + " : lapack_int;" ) )
#if_bodies = SequenceOfProducers()
arg_relates = {}
ana_args = []
for arg in config.findall( "./analogue-arguments-list/argument" ):
arg_name = arg.get("name")
arg_relates[ arg_name ] = config.find( "./arguments-relationships/argument/[@name='" + arg_name + "']" )
ana_args.append( arg );
for type in config.findall("./types/type"):
chpl_ana = xml_tree.find( type.get( "analogue" ) )
if_condition = LineProducer( "if ( T == " + type.get("type") + " )" )
func_body.append( if_condition )
if_body = ScopeProducer()
call_equals = SegmentProducer( info_var + " = " + chpl_ana.get( "name" ) )
call_seq = ListProducer( ", ", "(", ")" )
for ana_arg in ana_args:
call_seq.append( SegmentProducer(
"(" + arg_relates[ana_arg.get("name")].text.strip() + ")" + \
(" : " + ana_arg.get("type") if ana_arg.get("semantic") != "array" else "")
)
)
if_body.append( call_equals + call_seq + LineProducer( ";" ) )
func_body.append( if_body )
func_body.append( LineProducer( "return " + info_var + ";" ) )
code.append( func_body )
print code.generate()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
```
#### File: gen-LAPACK/extern-tool/xmltool.py
```python
import fnmatch
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import re
import string
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element, SubElement, dump, ElementTree
import xml.dom.minidom
def stringtoxml( string ):
return xml.dom.minidom.parseString( string )
def prettystringxml( element ):
return xml.dom.minidom.parseString( ET.tostring(element) ).toprettyxml()
def prettyprintxml( element ):
print prettystringxml( element );
def prettywritexml( element, filename ):
file = open( filename, "w" );
file.write( prettystringxml(element) )
def writexml( element, filename ):
ET.ElementTree( element ).write( filename )
def loadxml( filename ):
return ET.parse( filename ).getroot()
def SubElementUnique( parent, child ):
if parent.find( child ) == None:
return SubElement( parent, child )
else:
return parent.find( child )
def unset( self, name ):
del self.attrib[name]
Element.unset = unset
```
#### File: util/test/genGraphs.py
```python
import sys, os, shutil, time, math, re, stat
from optparse import OptionParser
import datetime
import fileReadHelp
import json
try:
import annotate
except ImportError:
sys.stdout.write('[Warning: "annotate" import failed, no annotations will be generated]\n')
annotate = None
usage = '%prog [options] <graphfiles>'
parser = OptionParser(usage=usage)
parser.add_option('-g', '--graphlist', dest='graphlist',
help='file with list of graphfiles', metavar='FILE')
parser.add_option('-t', '--testdir', dest='testdir',
help='test directory', metavar='DIR',
default='.')
parser.add_option('-o', '--outdir', dest='outdir',
help='output directory', metavar='DIR',
default=None)
parser.add_option('-p', '--perfdir', dest='perfdir',
help='performance data directory', metavar='DIR',
default=None)
parser.add_option('-n', '--name', dest='name',
help='Test platform name', metavar='NAME',
default=None)
parser.add_option('-s', '--startdate', dest='startdate',
help='graph start date', metavar='DATE')
parser.add_option('-e', '--enddate', dest='enddate',
help='graph end date', metavar='DATE')
parser.add_option('-a', '--alttitle', dest='alttitle',
help='alternate/custom site title',
metavar='NAME', default=None)
parser.add_option('-v', '--verbose', dest='verbose',
action='store_true', default=False)
parser.add_option('-d', '--debug', dest='debug',
action='store_true', default=False)
parser.add_option('-r', '--reduce', dest='g_reduce', type='choice',
metavar='STRATEGY', default='avg',
choices=['avg', 'med', 'min', 'max'])
parser.add_option('-x', '--no-bounds', dest='g_display_bounds',
action='store_false', default=True)
parser.add_option('-u', '--numericX', dest='numericX',
help='expect numbers (e.g. revisions), not dates,' +
' for the X axis',
action='store_true', default=False)
parser.add_option('-m', '--configs', dest='multiConf',
help='comma separated list of configurations. ":v" after a '
'configuration makes it visible by default. e.g '
'"local:v,--no-local:v" will create graphs with series '
'duplicated for local and --no-local both of which will '
'be visible by default on the web page.',
default='')
if annotate:
parser.add_option('-j', '--annotate', dest='annotation_file',
default=None)
debug = False
verbose = False
numericX = False
multiConf = []
defaultMultiConf = []
def try_parse_float(value):
try:
# removes any trailing characters (units)
return float(re.sub(r'[^\d,.]+$', '', value))
except ValueError:
return value
def parse_date(value, dateformat='%m/%d/%y'):
if numericX:
return value
else:
return time.strptime(value.strip(), dateformat)
def show_date(value=time.localtime()):
if numericX:
return value
else:
return time.strftime('%Y-%m-%d', value)
# converts a csv file to json. Converts the csv file to json where the json
# object has two members: the labels and the actual data formatted as required
# by dygraphs
def csv_to_json(csvFile, ginfo):
data = parse_csv(csvFile)
os.unlink(csvFile)
# rename the csv file to indicate that it's a json file
jsonFile = os.path.splitext(csvFile)[0]+'.json'
ginfo.datfname = os.path.splitext(ginfo.datfname)[0]+'.json'
# each label is stored in a single element array because of how it is
# parsed, get rid of that array so labels is now a simple array of strings
labels = [a[0] for a in data[0]]
data = data[1:]
lines = []
for line in data:
# like for the labels, strip the array surrounding the date
curDate = line[0][0]
dataForCurDate = line[1:]
curLine = [curDate]
# the data for a series on the current date is stored as ['val'],
# ['low', 'med', 'high'], or ['']. Need to parse as floats, and turn
# [''] (no data for this series on this date) into None so json.dumps
# turns it into null. If we're not using custom bars (displayrange is
# false) then our val will come in as ['val'] but we just want to store
# the single value, not the single value in an array
for seriesArr in dataForCurDate:
if len(seriesArr) == 1 and seriesArr[0] == '':
curLine.append(None)
else:
if (ginfo.displayrange):
curLine.append([try_parse_float(x) for x in seriesArr])
else:
curLine.append(try_parse_float(seriesArr[0]))
lines.append(curLine)
# if there was no data in the csvFile, create an empty entry for todays
# date. Dygraphs does not accept a zero length array for the data so we add
# a single entry for today's date with null data for each series
if len(lines) == 0:
line = [None for label in labels]
line[0] = show_date()
lines.append(line)
jsonObj = {'labels': labels, 'data': lines}
with open(jsonFile, 'w') as f:
f.write(json.dumps(jsonObj))
# Helper functions to parse/write/sort a dygraphs compatible csv file.
#
# Expected form of csv file is:
#
# Date,<perfKey1>,<perfKey2>
# YYYY-mm-dd,<key1Value>,<key2Value>
# YYYY-mm-dd,<key1Value>,<key2Value>
#
# where <keyXValue> is of the form 'val' for numtrials=1 (customBars are not
# being used), 'lowVal;medVal;highVal' for numTrials>1 (customBars are being
# used), or '' if there was no no value for that key for that date
#
# Parses a csv file of the above form into a list of the form:
#
# [[['Date'], ['perfKey1'], ['perfKey2']],
# [['YYYY-mm-dd'],[<key1Value>],[<key2Value>]],
# [['YYYY-mm-dd'],[<key1Value>],[<key2Value>]]]
#
# where <keyXValue> is either a single value as a string, 3 values (low, med,
# high) as strings, or the empty string if there was no value for that key for
# that date
def parse_csv(csvFile):
lines = fileReadHelp.ReadFileWithComments(csvFile)
data = []
for line in lines:
line = line.rstrip()
if len(line) > 0:
valuesString = line.split(',')
values = []
for valueString in valuesString:
values.append(valueString.split(';'))
data.append(values)
return data
def data_to_csv(data, csvFile):
lines = []
for values in data:
line = []
for value in values:
line.append(';'.join(value))
lines.append(','.join(line)+'\n')
with open(csvFile, 'w') as f:
f.writelines(lines)
# sorts a csv of the aforementioned form. Sorts a series' keys and it's
# corresponding values (column) from greatest to least in terms of a series
# most recent data.
# Takes:
# Date,<perfKey1>,<perfKey2>
# YYYY-mm-dd,1;2;3,0;1;2
# YYYY-mm-dd,1;2;3,3;4;5
# and transforms it into:
# Date,<perfKey2>,<perfKey1>
# YYYY-mm-dd,0;1;2,1;2;3
# YYYY-mm-dd,3;4;5,1;2;3
#
# also works for 'val', instead of 'low;med;high' and empty values: ''
def sort_csv(csvFile):
data = parse_csv(csvFile)
if len(data) == 1:
return
# transpose the data so that we can sort by row
data = list(zip(*data))
# remove the Date perfkey and the actual dates as they screw up sorting
dates = data.pop(0)
# sort function called on values of the form:
# [['perfKey1'],[<key1Value>],[<key1Value>]]
# where keyXValue are of the form described above. sorts by the most recent
# date, and grabs the middle value. returns -1 for empty values, so that
# series with no recent data filter down to the bottom
def parse_sortable_float(values):
mostRecentValues = values[len(values)-1]
value = mostRecentValues[len(mostRecentValues)//2]
if value == '':
return -1.0
return try_parse_float(value)
data.sort(key=lambda values: parse_sortable_float(values), reverse=True)
# add the dates back in
data.insert(0, dates)
# untranspose the data
data = list(zip(*data))
data_to_csv(data, csvFile)
# Yield dateformat-ed dates in the range [start_date, end_date]
def date_range(start_date, end_date, dateformat='%Y-%m-%d'):
cur_date = datetime.datetime.strptime(start_date, dateformat)
end_date = datetime.datetime.strptime(end_date, dateformat)
while cur_date <= end_date:
yield cur_date.strftime(dateformat)
cur_date += datetime.timedelta(days=1)
# Fill in missing dates in the csv file. Grabs the start and end date from the
# file, and ensures that we have an entry for every date in the range.
# We do this because annotations require an actual data point to attach to, so
# we make sure there will always be a day available
def fill_sparse_csv(csvFile):
data = parse_csv(csvFile)
keys = data.pop(0)
if len(data) > 1:
dates = list(zip(*data)).pop(0)
dates = [date[0] for date in dates]
if len(dates) > 1:
start_date = dates[0]
end_date = dates[-1]
# for all the missing days append an empty entry to the end of the
# data (we'll sort later to get things in the right order)
missing_dates = set(date_range(start_date, end_date)) - set(dates)
for date in missing_dates:
# emptydate = [[date], [''], [''], ...]
emptydate = [[date]] + [['']]*(len(keys)-1)
data.append(emptydate)
# sort our data, we don't need to convert our date strings to datetimes
# because for ISO 8601 dates lexical order is also chronological order
data.sort()
data.insert(0, keys)
data_to_csv(data, csvFile)
# Strips all but the first 'numseries' series from a csv file. Useful for
# things like compiler performance testing where you want to display the top 10
# passes. If multiple configurations are being used it grabs the series from
# the default configuration and then finds the other configurations for those
# series.
def strip_series(csvFile, numseries):
data = parse_csv(csvFile)
labels = [a[0] for a in data[0]]
newData = []
data = list(zip(*data))
numFound = 0
newData.append(data[0])
if multiConf:
defaultConf = multiConf[0]
for i in range(1, len(labels)):
if labels[i].endswith('(' + defaultConf + ')') and numFound < numseries:
numFound+=1
newData.append(data[i])
for conf in multiConf[1:]:
confLabel = labels[i].replace('('+defaultConf+')','('+conf+')')
newData.append(data[labels.index(confLabel)])
else:
for i in range(1, numseries+1):
newData.append(data[i])
# untranspose the data
data = list(zip(*newData))
data_to_csv(data, csvFile)
# Find the series to attach annotations to. If there were multiple
# configurations, attach to a series in the default (first listed)
# configuration. Else attach to first series
def get_annotation_series(csvFile):
data = parse_csv(csvFile)
labels = [a[0] for a in data[0]]
labels = labels[1:]
annotation_series = labels[0]
if multiConf:
defaultConf = multiConf[0]
for label in labels:
if label.endswith('(' + defaultConf + ')'):
annotation_series = label
break
return annotation_series
############
class CouldNotReadGraphFile(Exception):
pass
# Global info about generating graphs
class GraphStuff:
def __init__(self, _name, _testdir, _perfdir, _outdir, _startdate, _enddate,
_reduce, _display_bounds, _alttitle, _annotation_file):
self.numGraphs = 0
self.config_name = _name
self.testdir = _testdir
self.perfdir = _perfdir
self.outdir = _outdir
self.datdir = self.outdir+'/'+'CSVfiles'
self.title = 'Chapel Performance Graphs'
if _alttitle:
self.title = _alttitle
if _name != None:
self.title += ' for '+_name
self.gfname = self.outdir+'/'+'graphdata.js'
self.gfile = None
self.suites = list()
self.startdate = _startdate
self.enddate = _enddate
self._reduce = _reduce
self.display_bounds = _display_bounds
self.annotation_file = _annotation_file
def init(self):
if os.path.exists(self.outdir):
if verbose:
sys.stdout.write('Removing old directory %s...\n'%(self.outdir))
try:
shutil.rmtree(self.outdir)
except OSError:
sys.stderr.write('Error: Could not clean up directory: %s\n'%(self.outdir))
raise
if verbose:
sys.stdout.write('Creating directory %s...\n'%(self.outdir))
try:
os.makedirs(self.outdir)
except OSError:
sys.stderr.write('ERROR: Could not (re)create directory: %s\n'%(self.outdir))
raise
if verbose:
sys.stdout.write('Creating directory %s...\n'%(self.datdir))
try:
os.makedirs(self.datdir)
except OSError:
sys.stderr.write('ERROR: Could not create directory: %s\n'%(self.datdir))
raise
try:
self.gfile = open(self.gfname, 'w')
except IOError:
sys.stderr.write('ERROR: Could not open file: %s\n'%(self.gfname))
if annotate and self.annotation_file:
try:
self.all_annotations = annotate.load(self.annotation_file)
except IOError:
sys.stderr.write('ERROR: Could not open file: %s\n'%(self.annotation_file))
raise
else:
self.all_annotations = None
self.gfile.write('// AUTOMATICALLY GENERATED GRAPH DESCRIPTION\n')
self.gfile.write('document.title = "%s";\n'%(self.title))
self.gfile.write('var pageTitle = "%s";\n'%(self.title))
runDate = time.strftime('%Y-%m-%d', time.localtime())
self.gfile.write('var runDate= "%s";\n'%(runDate))
self.gfile.write('var numericX = %s;\n'%(str(numericX).lower()))
self.gfile.write('var configurations = [%s];\n' %(', '.join([ '" ('+conf+')"' for conf in multiConf])))
self.gfile.write('var configurationsVis = [%s];\n' %(', '.join([ '"('+conf+')"' for conf in defaultMultiConf])))
self.gfile.write('var allGraphs = [\n')
self.firstGraph = True
return 0
def __str__(self):
s = 'Number of unique graphs = '+str(self.numGraphs)
s += 'test dir = '+self.testdir
s += 'performance data dir = '+self.perfdir
s += 'output dir = '+self.outdir
s += 'start date = '+self.startdate
s += 'end date = '+self.enddate
s += ': '+str(len(self.dygarphs))+' dygraphs'
return s
def finish(self):
if self.gfile:
self.gfile.write('\n];\n')
first = True
self.gfile.write('var perfSuites = [\n')
for s in self.suites:
if not first:
self.gfile.write(',\n')
else:
first = False
self.gfile.write('{ "suite" : "%s" }'%(s))
self.gfile.write('\n];\n')
self.gfile.close()
def genGraphInfo(self, ginfo):
if not self.firstGraph:
self.gfile.write(',\n')
else:
self.firstGraph = False
series = ginfo.annotationSeries
# generate the annotations for this graph
if series and annotate and self.all_annotations and not numericX:
ginfo.annotations = annotate.get(
self.all_annotations, ginfo.name, series,
parse_date(ginfo.startdate, '%Y-%m-%d'),
parse_date(ginfo.enddate,'%Y-%m-%d'), self.config_name)
self.gfile.write('{\n')
if ginfo.title != '':
self.gfile.write(' "title" : "%s",\n'%(ginfo.title))
elif ginfo.graphname != '':
sys.stdout.write('WARNING: \'graphname\' is deprecated. Use \'graphtitle\' instead.\n')
self.gfile.write(' "title" : "%s",\n'%(ginfo.graphname))
else:
sys.stdout.write('WARNING: No graph title found.\n')
self.gfile.write(' "title" : "%s",\n'%(ginfo.name))
suites = (', '.join('"' + s + '"' for s in ginfo.suites if s))
self.gfile.write(' "suites" : [%s],\n'%(suites))
self.gfile.write(' "datfname" : "%s",\n'%(ginfo.datfname))
self.gfile.write(' "ylabel" : "%s",\n'%(ginfo.ylabel))
self.gfile.write(' "startdate" : "%s",\n'%(ginfo.startdate))
self.gfile.write(' "enddate" : "%s",\n'%(ginfo.enddate))
self.gfile.write(' "displayrange" : %s,\n'%(str(ginfo.displayrange).lower()))
self.gfile.write(' "defaultexpand" : %s,\n'%(str(ginfo.expand).lower()))
self.gfile.write(' "annotations" : [')
if ginfo.annotations:
self.gfile.write('\n ')
self.gfile.write(',\n '.join(ginfo.annotations))
self.gfile.write('\n ')
self.gfile.write(']\n')
self.gfile.write('}')
def genGraphStuff(self, fname, suites):
if os.path.isabs(fname):
fullFname = fname
else:
fullFname = self.testdir+'/'+fname
if not os.path.exists(fullFname):
fullFname = './'+fname
if verbose:
sys.stdout.write('Reading %s...\n'%(fullFname))
lines=fileReadHelp.ReadFileWithComments(fullFname)
if lines==None:
sys.stdout.write('WARNING: Could not read graph description from %s. Ignoring.\n'%(fullFname))
raise CouldNotReadGraphFile
basename = os.path.splitext(os.path.basename(fname))[0]
graphs=list()
currgraph=-1
firstGraphNum = self.numGraphs
for l in lines:
key = l.split(':')[0].strip()
rest = l[l.index(':')+1:].strip()
if key == 'perfkeys' :
if currgraph != -1:
try:
graphs[currgraph].generateGraphData(self, currgraph)
except (ValueError, IOError, OSError):
raise
# new graph
currgraph += 1
graphs.append(GraphClass(basename, firstGraphNum+currgraph,
suites, self.startdate, self.enddate,
self._reduce, self.display_bounds))
graphs[currgraph].perfkeys += [ s.strip() for s in rest.split(', ') ]
elif key == 'graphkeys' :
graphs[currgraph].graphkeys += [ s.strip() for s in rest.split(',') ]
# files takes a list of the files to use per perfkey, while
# repeat-files uses each of n files perkey/n times. So if you have
# 10 perfkeys and 2 files listed, each file will be used 5 times
elif key == 'files' :
graphs[currgraph].datfilenames += [ s.strip() for s in rest.split(',') ]
elif key == 'repeat-files' :
dFiles = [ s.strip() for s in rest.split(',') ]
if len(graphs[currgraph].perfkeys) % len(dFiles) != 0:
sys.stdout.write('[Warning: num .dat files did not evenly divide into num perfkeys for %s. Ignoring.]\n'%(fullFname))
return
keysRange = range(0, (len(graphs[currgraph].perfkeys) // len(dFiles)))
for dFile in dFiles:
graphs[currgraph].datfilenames += [ dFile for i in keysRange ]
elif key == 'graphtitle':
graphs[currgraph].title = rest.strip()
elif key == 'graphname':
# deprecated
graphs[currgraph].graphname = rest.strip()
elif key == 'ylabel':
graphs[currgraph].ylabel = rest.strip()
elif key == 'startdate':
graphs[currgraph].startdate = parse_date(rest)
elif key == 'enddate':
graphs[currgraph].enddate = parse_date(rest)
elif key == 'generate':
graphs[currgraph].generate = [s if s != '' else self._reduce for s in re.sub(r'\s+', '', rest).split(',')]
for strat in graphs[currgraph].generate:
if strat not in parser.get_option('-r').choices:
sys.stdout.write('WARNING: Invalid generate option {0} in {1}\n'.format(strat, fullFname))
elif key == 'displayrange':
graphs[currgraph].displayrange = rest.lower() in ('true', 't', '1', 'on', 'y', 'yes')
# expansion is used to turn a single graph with multiple series into
# multiple graphs where the first one is all the series combined and
# the subsequent ones contain each series individually. -1 means
# expand all. Other numbers indicate the number of series to expand
elif key == 'defaultexpand':
graphs[currgraph].expand = rest.lower() in ('true', 't', '1', 'on', 'y', 'yes')
elif key == 'numseries':
graphs[currgraph].numseries = int(rest.strip())
elif key == 'sort':
graphs[currgraph].sort = rest.lower() in ('true', 't', '1', 'on', 'y', 'yes')
try:
graphs[currgraph].generateGraphData(self, currgraph)
except (ValueError, IOError, OSError):
raise
return (currgraph+1)
############
# graph class
class GraphClass:
gid = 1
def __init__(self, _name, _graphNum, _suites=[], _startdate=None,
_enddate=None, _reduce='avg', _displayrange=True,
_expand=False, _sort=True):
self.id = GraphClass.gid
GraphClass.gid += 1
self.name = _name.strip()
self.srcdatfname = self.name+'.dat'
self.title = ''
self.suites = _suites
self.graphname = ''
self.ylabel = ''
self.startdate = _startdate
self.enddate = _enddate
self.graphNum = _graphNum
self.perfkeys = list()
self.graphkeys = list()
self.datfilenames = list()
self.generate = list()
self.displayrange = _displayrange
self._reduce = _reduce
self.expand = _expand
self.numseries = -1
self.sort = _sort
self.annotations = []
self.annotationSeries = ""
def __str__(self):
l = 'Graph: '+str(self.name)+' (id='+str(self.id)+')\n'
l += '\ttitle: '+self.title+'\n'
if self.suites == None or self.suites == [] or self.suites == [None]:
l += '\tsuites: none\n'
else:
l += '\tsuites: ['+','.join(self.suites)+']\n'
l += '\tgraphname: '+self.graphname+'\n'
l += '\tylabel: '+self.ylabel+'\n'
if self.startdate != None:
l += '\tstartdate: '+show_date(self.startdate)+'\n'
else:
l += '\tstartdate: Not specified\n'
if self.enddate != None:
l += '\tenddate: '+show_date(self.enddate)+'\n'
else:
l += '\tenddate: Not specified\n'
l += '\tperfkeys: '+list.__str__(self.perfkeys)+'\n'
l += '\tgraphkeys: '+list.__str__(self.graphkeys)+'\n'
l += '\tdatfilenames: '+list.__str__(self.datfilenames)+'\n'
l += '\tdisplayrange:'+str(self.displayrange).lower()+'\n'
return l
# For each unique data file
class DatFileClass:
def __init__(self, _filename):
# lines will end up looking like:
# lines[lineNum][trailNum][field]
# where lineNum is the number after they are "merged"
self.lines = []
self.mykeys = {}
try:
self.dfile = open(_filename, 'r')
# First line must be a comment and must be a tab separated list
# of the performance keys
self.perfkeys = [ l.strip() for l in self.dfile.readline()[1:].split('\t') ]
except IOError:
pass
# Allows some performance tests to be graphed when you aren't
# running over every single performance test
except:
raise
def add(self, _i, _k):
# the _ith data stream comes from the column of the _kth perfkey
self.mykeys[_i] = self.perfkeys.index(_k)
def __str__(self):
l = '\tperfkeys: '+list.__str__(self.perfkeys)+'\n'
l += '\tmykeys: '+dict.__str__(self.mykeys)+'\n'
return l
def __del__(self):
if hasattr(self, 'dfile'):
self.dfile.close()
# Generate the new data file inline in CSV format
def generateData(self, graphInfo, datfiles):
# An alternative is to have an off-line process incrementally
# update a CSV data file. Since we would still have to open
# potentially multiple data files and read thru them and look
# at every line for the appropriate date, I opted for
# regenerating.
fname = graphInfo.datdir+'/'+self.datfname
f = open(fname, 'w')
f.write('Date,')
for i in range(len(self.graphkeys)):
f.write(self.graphkeys[i])
if i < len(self.graphkeys)-1:
f.write(',')
else:
f.write('\n')
numKeys = len(self.perfkeys)
# currLines stores the current merged line number of each dat file
currLines = [0]*numKeys
startdate = self.startdate
enddate = self.enddate
minDate = None
first = True
done = False
for i in range(numKeys):
# The file may be missing (in the case where only a subdirectory
# has been tested for performance). If so, we don't want to
# try to access a non-existent datfile
if self.datfilenames[i] in datfiles:
df = datfiles[self.datfilenames[i]]
for line in sorted(df.dfile):
line = line.strip()
if line == '' or line[0] == '#':
continue
fields = line.split()
myDate=parse_date(fields[0])
fields[0] = myDate
fields[1:] = [try_parse_float(x) for x in fields[1:]]
if df.lines and myDate == df.lines[-1][0][0]:
# append to the batch for that date
df.lines[-1].append(fields)
else:
# start a new batch
df.lines.append([fields])
df.lines.sort()
found_data = False
while not done:
done = True
for i in range(numKeys):
# The file may be missing (in the case where only a subdirectory
# has been tested for performance). If so, we don't want to
# try to access a non-existent datfile
if not self.datfilenames[i] in datfiles:
continue
df = datfiles[self.datfilenames[i]]
# find the min date
if currLines[i] < len(df.lines):
done = False
myDate = df.lines[currLines[i]][0][0]
if minDate==None or myDate<minDate:
minDate = myDate
if startdate==None:
startdate = minDate
if done:
break
# We didn't print anything last iteration if there was no data
# found, so skip the new line in that case
if not first and found_data:
f.write('\n')
else:
first = False
# write out the data for this date
found_data = False
line_to_write = show_date(minDate)
for i in range(numKeys):
if not self.datfilenames[i] in datfiles:
continue
try:
df = datfiles[self.datfilenames[i]]
if currLines[i] < len(df.lines):
fields = list(zip(*df.lines[currLines[i]]))
myDate = fields[0][0]
if myDate == minDate:
# consume this line
line_to_write += ','
if len(fields)>df.mykeys[i] and '-' not in fields[df.mykeys[i]]:
fieldId = df.mykeys[i]
value = fields[fieldId][0]
if self.generate:
method = self.generate[i]
else:
method = self._reduce
if method == 'avg':
value = math.fsum(fields[fieldId])/len(fields[fieldId])
elif method == 'med':
slist = sorted(fields[fieldId])
if len(fields[fieldId]) % 2 == 0:
value = (slist[len(slist)/2]+slist[len(slist)/2-1])/2
else:
value = slist[len(slist)/2]
elif method == 'min':
value = min(fields[fieldId])
elif method == 'max':
value = max(fields[fieldId])
if self.displayrange:
minval = min(fields[fieldId])
maxval = max(fields[fieldId])
line_to_write += "{0};{1};{2}".format(minval, value, maxval)
else:
line_to_write += "{0}".format(value)
found_data = True
currLines[i] += 1
else:
# no data for this date
line_to_write += ','
else:
# no data for this date
line_to_write += ','
except:
print('[Error parsing .dat file: {0}'.format(self.datfilenames[i]))
raise
if found_data:
f.write(line_to_write)
if self.enddate==None:
enddate = minDate
else:
enddate = self.enddate
minDate = None
f.write('\n')
f.close()
# sort the csv if sorting is enabled for this graph. This happens after
# the file is created instead of above because the above process
# appends to the file line by line and it wasn't clear to me how to get
# it into a better form. If instead of writing out line by line, you
# put the data into a list of the form described in the csv helper
# functions section above, you can eliminate the intermediate step.
if self.sort:
sort_csv(fname)
fill_sparse_csv(fname)
if self.numseries > 0:
strip_series(fname, self.numseries)
self.annotationSeries = get_annotation_series(fname)
csv_to_json(fname, self)
if startdate == None:
startdate = time.localtime()
if enddate == None:
self.enddate = startdate
if graphInfo.startdate != None:
self.startdate = show_date(graphInfo.startdate)
else:
self.startdate = show_date(startdate)
if graphInfo.enddate != None:
self.enddate = show_date(graphInfo.enddate)
elif enddate != None:
self.enddate = show_date(enddate)
def generateGraphData(self, graphInfo, gnum):
if debug:
print('===')
print(self)
if verbose:
sys.stdout.write('Generating graph data for %s (graph #%d)\n'%(self.name, gnum))
self.datfname = self.name+str(gnum)+'.txt'
nperfkeys = len(self.perfkeys)
if nperfkeys != len(self.graphkeys):
start = len(self.graphkeys)
for i in range(start,nperfkeys):
self.graphkeys.append(self.perfkeys[i])
# strip trailing colons, equals, spaces, and dashes so graphkeys are
# cleaner. We have a lot of them when graph keys come from perfkeys
self.graphkeys = [key.rstrip(':= -') for key in self.graphkeys]
for i in range(nperfkeys):
for j in range(nperfkeys):
if (i != j) and (self.graphkeys[i]==self.graphkeys[j]):
sys.stdout.write('WARNING: Duplicate graph keys (%s)\n'%(self.graphkeys[i]))
defaultFilename = self.srcdatfname
if nperfkeys != len(self.datfilenames):
start = len(self.datfilenames)
for i in range(start,nperfkeys):
self.datfilenames.append(defaultFilename)
# fix path to dat files
for i in range(nperfkeys):
fn = self.datfilenames[i]
idx = fn.rfind('/')
if idx != -1:
if graphInfo.perfdir[0] == '/':
# absolute path (prepend path and strip relative path)
self.datfilenames[i] = graphInfo.perfdir+fn[idx:]
else:
# relative path (find .dat file in the subdir)
self.datfilenames[i] = './'+fn[0:idx]+'/'+graphInfo.perfdir+fn[idx:]
else:
self.datfilenames[i] = graphInfo.perfdir+'/'+fn
# this is the 'magic' for multiple configurations. It takes the info
# for a graph and creates copies for each configuration.
if multiConf:
# copy datfiles, and keys to temp copies, and clear originals
tempdatfilenames = self.datfilenames[0:]
tempperfkeys = self.perfkeys[0:]
tempgraphkeys = self.graphkeys[0:]
self.datfilenames = []
self.perfkeys = []
self.graphkeys = []
graphConfs = [ ' ('+conf+')' for conf in multiConf if conf ]
for dat, perf, graph in zip(tempdatfilenames, tempperfkeys, tempgraphkeys):
for conf, graphconf in zip(multiConf, graphConfs):
if conf == 'default':
conf = ''
datpath = os.path.dirname(dat)
datfile = os.path.basename(dat)
fullDatFile = os.path.join(datpath, conf, datfile)
# if we found the files for the current conf, add the
# datfile, perf key, and graph key (with conf appended)
if os.path.exists(fullDatFile):
self.datfilenames.append(fullDatFile)
self.perfkeys.append(perf)
self.graphkeys.append(graph + graphconf)
# if the .dat file wasn't found, then it's possible the
# test didn't run for this configuration. Try to find the
# .dat file for one of the configurations and copy the
# series (first line.) Dygraphs expects there to be a
# series for each configuration even if there's no data.
else:
for otherConf in multiConf:
if otherConf == 'default':
otherConf = ''
otherFullDatFile = os.path.join(datpath, otherConf, datfile)
if os.path.exists(otherFullDatFile):
firstline = ''
with open(otherFullDatFile, 'r') as f:
firstline = f.readline()
if firstline:
with open(fullDatFile, 'w') as f:
f.write(firstline)
# if we still didn't find the file, it might be a test that
# doesn't really have multi-configurations (misc stats for
# compiler perf.) If there's a file not in a conf directory
# use it and don't append the conf to the graph key
if not os.path.exists(fullDatFile) and not 'default' in multiConf:
myDatFile = os.path.join(datpath, datfile)
if os.path.exists(myDatFile):
self.datfilenames.append(myDatFile)
self.perfkeys.append(perf)
self.graphkeys.append(graph)
break;
# it's possible we still didn't fine the file, but if
# that's the case the test wasn't run for any configuration
# and we won't generate data for any series which won't
# bother dygraphs
nperfkeys = len(self.perfkeys)
# create a hashmap of open datfiles
datfiles = {}
for i in range(nperfkeys):
d = self.datfilenames[i]
if d not in datfiles:
datfiles[d] = self.DatFileClass(d)
try:
if hasattr(datfiles[d], 'dfile'):
# May not have a dfile if the specific performance test
# was not previously run and dumped into this folder
# Should distinguish this case from cases where there are
# legitimately no perfkeys
datfiles[d].add(i, self.perfkeys[i])
else:
# We don't have a dfile for that file name. Delete the
# created DatFileClass so it doesn't mess us up
del datfiles[d]
except ValueError:
sys.stderr.write('ERROR: Could not find perfkey \'%s\' in %s\n'%(self.perfkeys[i], self.datfilenames[i]))
raise
# generate the new data files
self.generateData(graphInfo, datfiles)
graphInfo.genGraphInfo(self)
for n, d in datfiles.items():
del d
####################
def main():
(options, args) = parser.parse_args()
sys.stdout.write('Running genGraphs with %s\n'%(' '.join(sys.argv)))
global debug
debug = options.debug
global verbose
verbose = options.verbose
global numericX
numericX = options.numericX
if options.perfdir != None:
perfdir = options.perfdir
else:
perfdir = './perfdat/'+os.uname()[1]
if options.outdir != None:
outdir = options.outdir
else:
outdir = perfdir+'/html'
if options.startdate != None:
startdate = parse_date(options.startdate)
else:
startdate = None
if options.enddate != None:
enddate = parse_date(options.enddate)
else:
if numericX:
enddate = None
else:
enddate = time.localtime()
global multiConf
global defaultMultiConf
# process multiConf options. Format is 'conf1[:v][,conf2[:v]]*'
# e.g. 'default:v,no-local:v', which will generate two configurations that
# are visible by default. If 'v' isn't specified the series will not be
# visible by default. If 'v' isn't specified for any series, the first conf
# will be visible by default
if options.multiConf:
tempConf = options.multiConf.split(',')
for confAndDefaultVis in tempConf:
temp = confAndDefaultVis.split(':')
conf = temp[0]
multiConf.append(conf)
if len(temp) > 1 and temp[1] == 'v':
defaultMultiConf.append(conf);
if len(defaultMultiConf) == 0:
defaultMultiConf.append(multiConf[0])
else:
multiConf = []
annotation_file = None
if annotate:
if options.annotation_file is not None:
annotation_file = options.annotation_file
else:
af = os.path.join(options.testdir, 'ANNOTATIONS.yaml')
if os.path.exists(af):
annotation_file = af
# This allows the graph webpage to have an alternate title. The default is
# Chapel Performance Graphs, which is not always appropriate
if options.alttitle != None:
alttitle = options.alttitle
else:
alttitle = None
graphInfo = GraphStuff(options.name, options.testdir, perfdir, outdir,
startdate, enddate, options.g_reduce, options.g_display_bounds,
alttitle, annotation_file)
try:
graphInfo.init()
except (IOError, OSError):
return -1
# get the list of .graph files
lines = list()
# command line .graph files
for a in args:
lines.append(a)
# .graph files from the specified file
graphlist = options.graphlist
if graphlist != None:
try:
f = open(graphlist, 'r')
try:
lines += f.readlines()
finally:
f.close()
except IOError:
sys.stderr.write('ERROR: Could not open graph file: %s\n'%(graphlist))
return -1
currSuite = None
numSuites = 0
numGraphfiles = 0
graphList = list()
# determine the suite(s) for each graph
for line in lines:
l = line.strip()
if l != '':
if l[0] != '#':
# if a graph has already been seen, just append to the list of
# suites it belongs to, else add it add it to the graphList
graphnames = [x[0] for x in graphList]
if graphnames.count(l):
if currSuite in graphList[graphnames.index(l)][1]:
sys.stdout.write('[Warning: duplicate graph "{0}" '
'found in suite "{1}"]\n'.format(l, currSuite))
graphList[graphnames.index(l)][1].append(currSuite)
# the suites at the top of our list our more often 'meta'
# suites that contains graphs from various suites. To keep
# the graphs that are logically related next to each other
# we push this graph to the end of the list
tempGraph = graphList.pop(graphnames.index(l))
graphList.append(tempGraph)
else:
graphList.append((l, [currSuite]))
else:
# parse for suite name
ls = l[1:].split(': ')
if ls[0].strip() == 'suite':
currSuite = ls[1].rstrip()
graphInfo.suites.append(currSuite)
numSuites += 1
if verbose:
sys.stdout.write('suite: %s\n'%(currSuite))
# generate the graphs
for graph in graphList:
try:
graphInfo.genGraphStuff(graph[0], graph[1])
numGraphfiles += 1
except (CouldNotReadGraphFile):
pass # do not increment numGraphfiles
except (ValueError, IOError, OSError):
return -1
# Copy the index.html and support css and js files
auxdir = os.path.dirname(os.path.realpath(__file__))+'/perf'
copiedAllFiles = True
try:
shutil.copy(auxdir+'/index.html', outdir)
except IOError:
sys.stderr.write('WARNING: Could not copy index.html\n')
copiedAllFiles = False
try:
shutil.copy(auxdir+'/perfgraph.css', outdir)
except IOError:
sys.stderr.write('WARNING: Could not copy perfgraph.css\n')
copiedAllFiles = False
try:
shutil.copy(auxdir+'/perfgraph.js', outdir)
except IOError:
sys.stderr.write('WARNING: Could not copy perfgraph.js\n')
copiedAllFiles = False
graphInfo.finish()
sys.stdout.write('Read %d graph files in %d suites\n'%(numGraphfiles, numSuites))
if copiedAllFiles:
with open(outdir + "/SUCCESS", 'w') as f:
# do nothing, we just want to create the file so we know that no
# errors that we currently detect occurred and all the requires
# files were copied over. This is used to see if we should sync the
# files over to the website.
sys.stdout.write('Created SUCCESS file\n')
#
# recursively chmod the html/ directory for access via web servers
# - for directories, chmod u+rwx and go+rx
# - for directories, chmod go+r
#
for root, dirs, files in os.walk(outdir):
os.chmod(root, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
for momo in dirs:
os.chmod(os.path.join(root, momo), stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
for momo in files:
os.chmod(os.path.join(root, momo), stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
return 0
if __name__ == '__main__':
sys.exit(main())
```
#### File: util/test/py3_compat.py
```python
import subprocess
import sys
import os
import errno
def instance_or_none(val, val_type):
return val is None or isinstance(val, val_type)
def bytes_to_str(byte_string):
if sys.version_info[0] >= 3 and not instance_or_none(byte_string, str):
try:
return str(byte_string, 'utf-8')
except UnicodeDecodeError as e:
pass
return byte_string
else:
return byte_string
def str_to_bytes(str_string):
if sys.version_info[0] >= 3 and not instance_or_none(str_string, bytes):
return bytes(str_string, 'utf-8')
else:
return str_string
def concat_streams(stdout, stderr):
""" Concats stderr to stdout. Returns str if both are utf-8 strings, bytes otherwise """
if sys.version_info[0] >= 3:
str_stdout = bytes_to_str(stdout)
str_stderr = bytes_to_str(stderr)
if isinstance(str_stdout, str) and isinstance(str_stderr, str):
return str_stdout + str_stderr
else:
return str_to_bytes(stdout) + str_to_bytes(stderr)
return stdout + stderr
class Py3CompatPopen(object):
""" Popen wrapper where communicate will convert input to bytes, and try to
convert output to str. Note that communicate() will try to stderr and
strerr to str independently (so you could end up with 1 being str and
the other being bytes)"""
def __init__(self, popen):
self._popen = popen
def communicate(self, input=None):
bytes_input = str_to_bytes(input)
stdout, stderr = self._popen.communicate(bytes_input)
str_stdout = bytes_to_str(stdout)
str_stderr = bytes_to_str(stderr)
return (str_stdout, str_stderr)
def __getattr__(self, name):
return getattr(self._popen, name)
def Popen(*args, **kwargs):
p = subprocess.Popen(*args, **kwargs)
return Py3CompatPopen(p)
def makedirs(path, mode=0o777, exist_ok=False):
if sys.version_info[0] >= 3:
os.makedirs(path, mode, exist_ok)
else:
try:
os.makedirs(path, mode)
except OSError as e:
if exist_ok and e.errno == errno.EEXIST:
pass # Ignore directory already existing error
else:
raise
``` |
{
"source": "jhha85/system-pharos-web-client",
"score": 2
} |
#### File: src/flask/group_api.py
```python
import os
import requests
import json
import logging
from flask import request, abort
from src.common.objects import SDAManager, Port
class GroupAPI:
def __init__(self):
pass
@classmethod
def register_api(cls, app):
# Get groups Info.
@app.route("/sdamanager/groups", methods=["GET"])
def sda_manager_groups():
logging.info("[" + request.method + "] sda manager groups - IN")
l = list()
ret = dict()
# No Input Target Address
if SDAManager().get_sda_manager_ip() == "":
d = dict()
d.update({"address": ""})
return json.dumps(d), 200
response = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups",
timeout=1500)
if response.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
for obj in response.json()["groups"]:
d = dict()
if "id" in obj:
d.update({"id": str(obj["id"])})
if "name" in obj:
d.update({"name": (obj["name"]).encode('utf-8')})
if "members" in obj:
d.update({"members": str(len(obj["members"]))})
l.append(d)
ret.update({"groups": l, "address": SDAManager().get_sda_manager_ip()})
return json.dumps(json.dumps(ret)), 200
# Get devices(SDAs) in selected group
@app.route("/sdamanager/group", methods=["POST"])
def sda_manager_group():
logging.info("[" + request.method + "] sda manager group - IN")
l = list()
ret = dict()
data = json.loads(request.data)
SDAManager.set_group_id(data["id"])
SDAManager.set_current_group_name(data["name"])
response = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id(),
timeout=1500)
response2 = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/nodes",
timeout=1500)
if response.status_code is not 200 or response2.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
for obj in response2.json()["nodes"]:
for member in response.json()["members"]:
d = dict()
if obj["id"] == member:
if "id" in obj:
d.update({"id": str(obj["id"])})
res = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/nodes/" + str(obj["id"]) + "/configuration",
timeout=1500)
if res.status_code is 200:
for prop in res.json()["properties"]:
if "devicename" in prop:
d.update({"name": str(prop["devicename"])})
if "ip" in obj:
d.update({"ip": str(obj["ip"])})
if "status" in obj:
d.update({"status": str(obj["status"])})
l.append(d)
ret.update({"devices": l})
return json.dumps(json.dumps(ret)), 200
# Create a group
@app.route("/sdamanager/group/create", methods=["POST"])
def sda_manager_create_group():
logging.info("[" + request.method + "] sda manager create group - IN")
l = list()
data = json.loads(request.data)
response = requests.post(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/create",
data=json.dumps(data),
timeout=1500)
response2 = requests.post(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ response.json()["id"] + "/join",
data=json.dumps(data["members"]),
timeout=1500)
if response.status_code is not 200 or response2.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
return "", 200
# Delete a group
@app.route("/sdamanager/group/delete", methods=["DELETE"])
def sda_manager_group_delete():
logging.info("[" + request.method + "] sda manager group delete - IN")
response = requests.delete(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id(),
timeout=1500)
if response.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
return "", 200
@app.route("/sdamanager/group/devices", methods=["GET"])
def sda_manager_group_devices():
logging.info("[" + request.method + "] sda manager group devices - IN")
l = list()
l2 = list()
ret = dict()
response = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id(),
timeout=1500)
response2 = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/nodes")
if response.status_code is not 200 or response2.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
for obj in response.json()["members"]:
l2.append(str(obj))
for obj in response2.json()["nodes"]:
d = dict()
if "id" in obj and str(obj["id"]) in l2:
d.update({"id": str(obj["id"])})
res = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/nodes/" + str(obj["id"]) + "/configuration",
timeout=1500)
if res.status_code is 200:
for prop in res.json()["properties"]:
if "devicename" in prop:
d.update({"name": str(prop["devicename"])})
if "ip" in obj:
d.update({"ip": str(obj["ip"])})
if "status" in obj:
d.update({"status": str(obj["status"])})
l.append(d)
ret.update({"devices": l})
return json.dumps(json.dumps(ret)), 200
# Deploy an app to the group
@app.route("/sdamanager/group/deploy", methods=["POST"])
def sda_manager_group_app_install():
logging.info("[" + request.method + "] sda manager group app install - IN")
l = list()
d = dict()
ret = dict()
data = json.loads(request.data)
response = requests.post(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id() + "/apps/deploy",
data=data["data"],
timeout=1500)
if response.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
d.update({"id": response.json()["id"], "name": data["name"]})
root_path = os.getcwd()
if not os.path.exists(root_path + "/static/user/apps"):
with open(root_path + "/static/user/apps", 'w'): pass
with open(root_path + "/static/user/apps", 'r') as content_file:
content = content_file.read()
if content == "":
apps = {"apps": l}
else:
apps = json.loads(content)
apps["apps"].append(d)
with open(root_path + "/static/user/apps", 'w+') as content_file:
content_file.write(json.dumps(apps))
ret.update({"id": response.json()["id"]})
return json.dumps(json.dumps(ret)), 200
# Change an group members
@app.route("/sdamanager/group/members", methods=["POST"])
def sda_manager_group_members():
logging.info("[" + request.method + "] sda manager group members - IN")
join_dict = dict()
leave_dict = dict()
response = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/" + SDAManager.get_group_id(),
timeout=1500)
if response.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
join_list = request.json["nodes"]
leave_list = response.json()["members"]
for i in join_list[:]:
if i in leave_list:
join_list.remove(i)
leave_list.remove(i)
join_dict.update({"nodes": join_list})
leave_dict.update({"nodes": leave_list})
response2 = requests.post(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id() + "/join",
data=json.dumps(join_dict),
timeout=1500)
response3 = requests.post(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id() + "/leave",
data=json.dumps(leave_dict),
timeout=1500)
if response2.status_code is not 200 and response3.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
return "", 200
# Get a group apps Info.
@app.route("/sdamanager/group/apps", methods=["GET"])
def sda_manager_group_apps():
logging.info("[" + request.method + "] sda manager group apps - IN")
l = list()
apps = list()
ret = dict()
response = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id() + "/apps",
timeout=1500)
if response.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
root_path = os.getcwd()
if not os.path.exists(root_path + "/static/user/apps"):
with open(root_path + "/static/user/apps", 'w'): pass
with open(root_path + "/static/user/apps", 'r') as content_file:
content = content_file.read()
if content != "":
apps = json.loads(content)["apps"]
for obj in response.json()["apps"]:
d = dict()
d.update({"id": str(obj["id"])})
response2 = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id() + "/apps/" + str(obj["id"]),
timeout=1500)
if response2.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
d.update({"services": len(response2.json()["responses"][0]["services"])})
for app in apps:
if "id" in app and app["id"] == str(obj["id"]):
d.update({"name": app["name"]})
l.append(d)
ret.update({"group": "Group Name: " + SDAManager.get_current_group_name(), "apps": l})
return json.dumps(json.dumps(ret)), 200
# Get a group's app Info & Delete a group's app
@app.route("/sdamanager/group/app", methods=["POST", "DELETE"])
def sda_manager_group_app():
if request.method == "POST":
logging.info("[" + request.method + "] sda manager group app - IN")
l = list()
l2 = list()
ret = dict()
SDAManager.set_app_id(json.loads(request.data)["id"])
response = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/nodes",
timeout=1500)
response2 = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id() + "/apps",
timeout=1500)
if response.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
for obj in response.json()["nodes"]:
d = dict()
if "id" in obj:
d.update({"id": str(obj["id"])})
res = requests.get(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/nodes/" + str(obj["id"]) + "/configuration",
timeout=1500)
if res.status_code is 200:
for prop in res.json()["properties"]:
if "devicename" in prop:
d.update({"name": str(prop["devicename"])})
if "ip" in obj:
d.update({"ip": str(obj["ip"])})
if "status" in obj:
d.update({"status": str(obj["status"])})
l.append(d)
for obj2 in response2.json()["apps"]:
if obj2["id"] == SDAManager.get_app_id():
for device in l:
for member in obj2["members"]:
if device["id"] == member:
l2.append(device)
break
ret.update({"devices": l2})
return json.dumps(json.dumps(ret)), 200
elif request.method == "DELETE":
response = requests.delete(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id() + "/apps/" + SDAManager.get_app_id(),
timeout=1500)
if response.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
return "", 200
# Start a group's app
@app.route("/sdamanager/group/app/start", methods=["GET"])
def sda_manager_group_app_start():
logging.info("[" + request.method + "] sda manager group app update - IN")
response = requests.post(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id() + "/apps/" + SDAManager.get_app_id()
+ "/start",
timeout=1500)
if response.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
return "", 200
# Stop a group's app
@app.route("/sdamanager/group/app/stop", methods=["GET"])
def sda_manager_group_app_stop():
logging.info("[" + request.method + "] sda manager group app update - IN")
response = requests.post(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id() + "/apps/" + SDAManager.get_app_id()
+ "/stop",
timeout=1500)
if response.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
return "", 200
# Update a group's app
@app.route("/sdamanager/group/app/update", methods=["GET"])
def sda_manager_group_app_update():
logging.info("[" + request.method + "] sda manager group app update - IN")
response = requests.post(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id() + "/apps/" + SDAManager.get_app_id()
+ "/update",
timeout=1500)
if response.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
return "", 200
# Update a group's app Yaml file to SDA DB
@app.route("/sdamanager/group/app/yaml", methods=["POST"])
def sda_manager_group_app_yaml():
logging.info("[" + request.method + "] sda manager group app YAML - IN")
data = json.loads(request.data)
response = requests.post(
url="http://" + SDAManager().get_sda_manager_endpoint() + "/api/v1/management/groups/"
+ SDAManager.get_group_id() + "/apps/" + SDAManager.get_app_id(),
data=data["data"],
timeout=1500)
if response.status_code is not 200:
logging.error("SDAM Server Return Error, Error Code(" + str(response.status_code) + ") - OUT")
abort(500)
return "", 200
``` |
{
"source": "jhhaanstra/Game-of-Life",
"score": 3
} |
#### File: Game-of-Life/gol/game.py
```python
from gol.grid import Vector
class Game(object):
NEIGHBOURING_POSITIONS = [
Vector(1, 0),
Vector(0, 1),
Vector(1, 1),
Vector(-1, 0),
Vector(0, -1),
Vector(-1, -1),
Vector(-1, 1),
Vector(1, -1)
]
interval = 1
running = True
game_state = []
width = 20
height = 15
def __init__(self, state: list):
self.game_state = state
def update(self) -> list:
new_state = []
neighbours = {}
for position in self.game_state:
living_neighbours_count = 0
for neighbour in self.NEIGHBOURING_POSITIONS:
neighbouring_position = position + neighbour
if neighbouring_position in self.game_state:
living_neighbours_count += 1
if neighbouring_position not in self.game_state:
if neighbouring_position not in neighbours:
neighbours[neighbouring_position] = 1
else:
neighbours[neighbouring_position] += 1
if living_neighbours_count == 3 or living_neighbours_count == 2:
new_state.append(position)
for position, count in neighbours.items():
if count == 3:
new_state.append(position)
self.game_state = new_state
return self.game_state
```
#### File: Game-of-Life/gol/grid.py
```python
from enum import Enum
from math import floor
from tkinter import Canvas, Event
class States(Enum):
DEAD = 1
ALIVE = 2
class Vector(object):
def __init__(self, x: int, y: int):
self.x = x
self.y = y
def as_coord(self) -> tuple:
return self.x, self.y
def __add__(self, other):
total_x = self.x + other.x
total_y = self.y + other.y
return Vector(total_x, total_y)
def __eq__(self, other) -> bool:
return self.x == other.x and self.y == other.y
def __hash__(self):
return hash((self.x, self.y))
def __str__(self) -> str:
return "[{}, {}]".format(self.x, self.y)
class Cell(object):
def __init__(self, vector: object, state: States):
self.vector = vector
self.state = state
class Grid(object):
canvas = None
rect_size = 25
width = 20
height = 15
occupied_fill = "#000000"
dead_fill = "#ffffff"
outline_color = "#c1c1c1"
def __init__(self, canvas: Canvas, rect_size: int = 25):
self.canvas = canvas
self.rect_size = rect_size
self.state = [[States.DEAD for y in range(self.height)] for x in range(self.width)]
self.game_state = []
self.canvas.bind("<Button-1>", self.update)
def draw(self):
for x in range(self.width):
for y in range(self.height):
if Vector(x, y) in self.game_state:
fill = self.occupied_fill
else:
fill = self.dead_fill
self.canvas.create_rectangle(
x * self.rect_size,
y * self.rect_size,
(x * self.rect_size) + self.rect_size,
(y * self.rect_size) + self.rect_size,
outline=self.outline_color,
fill=fill
)
def update(self, event: Event) -> None:
x = floor(event.x / self.rect_size)
y = floor(event.y / self.rect_size)
clicked_vector = Vector(x, y)
if clicked_vector in self.game_state:
self.game_state.remove(clicked_vector)
else:
self.game_state.append(clicked_vector)
self.redraw()
def redraw(self) -> None:
self.canvas.delete("all")
self.draw()
```
#### File: Game-of-Life/tests/pattern_test.py
```python
import unittest
from gol.game import Game
from gol.grid import Vector
# https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life
class StillLifeTest(unittest.TestCase):
def _still_life_comparison(self, starting_board: list):
game = Game(starting_board.copy())
new_board = game.update()
self.assertEqual(
sorted(starting_board, key=lambda position: (position.x, position.y)),
sorted(new_board, key=lambda position: (position.x, position.y))
)
def test_block(self):
self._still_life_comparison([
Vector(0, 0),
Vector(0, 1),
Vector(1, 0),
Vector(1, 1)
])
def test_beehive(self):
self._still_life_comparison([
Vector(0, 1),
Vector(1, 0),
Vector(2, 0),
Vector(3, 1),
Vector(1, 2),
Vector(2, 2)
])
def test_loaf(self):
self._still_life_comparison([
Vector(0, 1),
Vector(1, 0),
Vector(2, 0),
Vector(3, 1),
Vector(1, 2),
Vector(2, 3),
Vector(3, 2)
])
def test_boat(self):
self._still_life_comparison([
Vector(0, 0),
Vector(0, 1),
Vector(1, 0),
Vector(1, 2),
Vector(2, 1),
])
def test_tub(self):
self._still_life_comparison([
Vector(0, 1),
Vector(1, 0),
Vector(1, 2),
Vector(2, 1),
])
class OscillatorsTest(unittest.TestCase):
def _oscillator_comparison(self, starting_board: list, period: int):
game = Game(starting_board.copy())
new_board = starting_board.copy()
for step in range(period):
new_board = game.update()
self.assertEqual(
sorted(starting_board, key=lambda position: (position.x, position.y)),
sorted(new_board, key=lambda position: (position.x, position.y))
)
def test_blinker(self):
self._oscillator_comparison([
Vector(0, 1),
Vector(1, 1),
Vector(2, 1)
], 2)
def test_toad(self):
self._oscillator_comparison([
Vector(1, 0),
Vector(2, 0),
Vector(3, 0),
Vector(0, 1),
Vector(1, 1),
Vector(2, 1),
], 2)
def test_beacon(self):
self._oscillator_comparison([
Vector(0, 0),
Vector(0, 1),
Vector(1, 0),
Vector(2, 3),
Vector(3, 2),
Vector(3, 3),
], 2)
# TODO: Pulsar & Penta-decathlon
class SpaceshipTest(unittest.TestCase):
def _spaceship_comparison(self, starting_board: list, period: int, offset: Vector):
game = Game(starting_board.copy())
new_board = starting_board.copy()
for step in range(period):
new_board = game.update()
expected_board = [position + offset for position in starting_board]
self.assertEqual(
sorted(expected_board, key=lambda position: (position.x, position.y)),
sorted(new_board, key=lambda position: (position.x, position.y))
)
def test_glider(self):
self._spaceship_comparison([
Vector(1, 0),
Vector(0, 2),
Vector(1, 2),
Vector(2, 2),
Vector(2, 1),
], 4, Vector(1, 1))
def test_lwss(self):
self._spaceship_comparison([
Vector(0, 0),
Vector(3, 0),
Vector(4, 1),
Vector(4, 2),
Vector(4, 3),
Vector(3, 3),
Vector(2, 3),
Vector(1, 3),
Vector(0, 2)
], 4, Vector(2, 0))
def test_mwss(self):
self._spaceship_comparison([
Vector(2, 0),
Vector(0, 1),
Vector(4, 1),
Vector(5, 2),
Vector(5, 3),
Vector(5, 4),
Vector(4, 4),
Vector(3, 4),
Vector(2, 4),
Vector(1, 4),
Vector(0, 3)
], 4, Vector(2, 0))
def test_hwss(self):
self._spaceship_comparison([
Vector(0, 1),
Vector(0, 3),
Vector(2, 0),
Vector(3, 0),
Vector(5, 1),
Vector(6, 2),
Vector(6, 3),
Vector(5, 4),
Vector(4, 4),
Vector(3, 4),
Vector(1, 4),
Vector(2, 4),
Vector(6, 4)
], 4, Vector(2, 0))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jhhalls/helper_functions",
"score": 3
} |
#### File: plots/cross_validation/plot_stratified_cross_validation.py
```python
def plot_stratified_cross_validation():
fig, both_axes = plt.subplots(2, 1, figsize=(12, 5))
# plt.title("cross_validation_not_stratified")
axes = both_axes[0]
axes.set_title("Standard cross-validation with sorted class labels")
axes.set_frame_on(False)
n_folds = 3
n_samples = 150
n_samples_per_fold = n_samples / float(n_folds)
for i in range(n_folds):
colors = ["w"] * n_folds
colors[i] = "grey"
axes.barh(y=range(n_folds), width=[n_samples_per_fold - 1] *
n_folds, left=i * n_samples_per_fold, height=.6,
color=colors, hatch="//", edgecolor='k', align='edge')
axes.barh(y=[n_folds] * n_folds, width=[n_samples_per_fold - 1] *
n_folds, left=np.arange(3) * n_samples_per_fold, height=.6,
color="w", edgecolor='k', align='edge')
axes.invert_yaxis()
axes.set_xlim(0, n_samples + 1)
axes.set_ylabel("CV iterations")
axes.set_xlabel("Data points")
axes.set_xticks(np.arange(n_samples_per_fold / 2.,
n_samples, n_samples_per_fold))
axes.set_xticklabels(["Fold %d" % x for x in range(1, n_folds + 1)])
axes.set_yticks(np.arange(n_folds + 1) + .3)
axes.set_yticklabels(
["Split %d" % x for x in range(1, n_folds + 1)] + ["Class label"])
for i in range(3):
axes.text((i + .5) * n_samples_per_fold, 3.5, "Class %d" %
i, horizontalalignment="center")
ax = both_axes[1]
ax.set_title("Stratified Cross-validation")
ax.set_frame_on(False)
ax.invert_yaxis()
ax.set_xlim(0, n_samples + 1)
ax.set_ylabel("CV iterations")
ax.set_xlabel("Data points")
ax.set_yticks(np.arange(n_folds + 1) + .3)
ax.set_yticklabels(
["Split %d" % x for x in range(1, n_folds + 1)] + ["Class label"])
n_subsplit = n_samples_per_fold / 3.
for i in range(n_folds):
test_bars = ax.barh(
y=[i] * n_folds, width=[n_subsplit - 1] * n_folds,
left=np.arange(n_folds) * n_samples_per_fold + i * n_subsplit,
height=.6, color="grey", hatch="//", edgecolor='k', align='edge')
w = 2 * n_subsplit - 1
ax.barh(y=[0] * n_folds, width=[w] * n_folds, left=np.arange(n_folds)
* n_samples_per_fold + (0 + 1) * n_subsplit, height=.6, color="w",
hatch="//", edgecolor='k', align='edge')
ax.barh(y=[1] * (n_folds + 1), width=[w / 2., w, w, w / 2.],
left=np.maximum(0, np.arange(n_folds + 1) * n_samples_per_fold -
n_subsplit), height=.6, color="w", hatch="//",
edgecolor='k', align='edge')
training_bars = ax.barh(y=[2] * n_folds, width=[w] * n_folds,
left=np.arange(n_folds) * n_samples_per_fold,
height=.6, color="w", hatch="//", edgecolor='k',
align='edge')
ax.barh(y=[n_folds] * n_folds, width=[n_samples_per_fold - 1] *
n_folds, left=np.arange(n_folds) * n_samples_per_fold, height=.6,
color="w", edgecolor='k', align='edge')
for i in range(3):
ax.text((i + .5) * n_samples_per_fold, 3.5, "Class %d" %
i, horizontalalignment="center")
ax.set_ylim(4, -0.1)
plt.legend([training_bars[0], test_bars[0]], [
'Training data', 'Test data'], loc=(1.05, 1), frameon=False)
fig.tight_layout()
```
#### File: helper_functions/plots/plot_improper_preprocessing.py
```python
'
import matplotlib.pyplot as plt
def make_bracket(s, xy, textxy, width, ax):
annotation = ax.annotate(
s, xy, textxy, ha="center", va="center", size=20,
arrowprops=dict(arrowstyle="-[", fc="w", ec="k",
lw=2,), bbox=dict(boxstyle="square", fc="w"))
annotation.arrow_patch.get_arrowstyle().widthB = width
def plot_improper_processing():
fig, axes = plt.subplots(2, 1, figsize=(15, 10))
for axis in axes:
bars = axis.barh([0, 0, 0], [11.9, 2.9, 4.9], left=[0, 12, 15],
color=['white', 'grey', 'grey'], hatch="//",
align='edge', edgecolor='k')
bars[2].set_hatch(r"")
axis.set_yticks(())
axis.set_frame_on(False)
axis.set_ylim(-.1, 6)
axis.set_xlim(-0.1, 20.1)
axis.set_xticks(())
axis.tick_params(length=0, labeltop=True, labelbottom=False)
axis.text(6, -.3, "training folds",
fontdict={'fontsize': 14}, horizontalalignment="center")
axis.text(13.5, -.3, "validation fold",
fontdict={'fontsize': 14}, horizontalalignment="center")
axis.text(17.5, -.3, "test set",
fontdict={'fontsize': 14}, horizontalalignment="center")
make_bracket("scaler fit", (7.5, 1.3), (7.5, 2.), 15, axes[0])
make_bracket("SVC fit", (6, 3), (6, 4), 12, axes[0])
make_bracket("SVC predict", (13.4, 3), (13.4, 4), 2.5, axes[0])
axes[0].set_title("Cross validation")
axes[1].set_title("Test set prediction")
make_bracket("scaler fit", (7.5, 1.3), (7.5, 2.), 15, axes[1])
make_bracket("SVC fit", (7.5, 3), (7.5, 4), 15, axes[1])
make_bracket("SVC predict", (17.5, 3), (17.5, 4), 4.8, axes[1])
def plot_proper_processing():
fig, axes = plt.subplots(2, 1, figsize=(15, 8))
for axis in axes:
bars = axis.barh([0, 0, 0], [11.9, 2.9, 4.9],
left=[0, 12, 15], color=['white', 'grey', 'grey'],
hatch="//", align='edge', edgecolor='k')
bars[2].set_hatch(r"")
axis.set_yticks(())
axis.set_frame_on(False)
axis.set_ylim(-.1, 4.5)
axis.set_xlim(-0.1, 20.1)
axis.set_xticks(())
axis.tick_params(length=0, labeltop=True, labelbottom=False)
axis.text(6, -.3, "training folds", fontdict={'fontsize': 14},
horizontalalignment="center")
axis.text(13.5, -.3, "validation fold", fontdict={'fontsize': 14},
horizontalalignment="center")
axis.text(17.5, -.3, "test set", fontdict={'fontsize': 14},
horizontalalignment="center")
make_bracket("scaler fit", (6, 1.3), (6, 2.), 12, axes[0])
make_bracket("SVC fit", (6, 3), (6, 4), 12, axes[0])
make_bracket("SVC predict", (13.4, 3), (13.4, 4), 2.5, axes[0])
axes[0].set_title("Cross validation")
axes[1].set_title("Test set prediction")
make_bracket("scaler fit", (7.5, 1.3), (7.5, 2.), 15, axes[1])
make_bracket("SVC fit", (7.5, 3), (7.5, 4), 15, axes[1])
make_bracket("SVC predict", (17.5, 3), (17.5, 4), 4.8, axes[1])
fig.subplots_adjust(hspace=.3)
```
#### File: helper_functions/plots/plot_linear_regression.py
```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from .datasets import make_wave
from .plot_helpers import cm2
def plot_linear_regression_wave():
X, y = make_wave(n_samples=60)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
line = np.linspace(-3, 3, 100).reshape(-1, 1)
lr = LinearRegression().fit(X_train, y_train)
print("w[0]: %f b: %f" % (lr.coef_[0], lr.intercept_))
plt.figure(figsize=(8, 8))
plt.plot(line, lr.predict(line))
plt.plot(X, y, 'o', c=cm2(0))
ax = plt.gca()
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.set_ylim(-3, 3)
#ax.set_xlabel("Feature")
#ax.set_ylabel("Target")
ax.legend(["model", "training data"], loc="best")
ax.grid(True)
ax.set_aspect('equal')
```
#### File: helper_functions/plots/plot_tree_nonmonotonous.py
```python
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from .tools import discrete_scatter
from .plot_2d_separator import plot_2d_separator
def plot_tree_not_monotone():
import graphviz
# make a simple 2d dataset
X, y = make_blobs(centers=4, random_state=8)
y = y % 2
plt.figure()
discrete_scatter(X[:, 0], X[:, 1], y)
plt.legend(["Class 0", "Class 1"], loc="best")
# learn a decision tree model
tree = DecisionTreeClassifier(random_state=0).fit(X, y)
plot_2d_separator(tree, X, linestyle="dashed")
# visualize the tree
export_graphviz(tree, out_file="mytree.dot", impurity=False, filled=True)
with open("mytree.dot") as f:
dot_graph = f.read()
print("Feature importances: %s" % tree.feature_importances_)
return graphviz.Source(dot_graph)
``` |
{
"source": "JHHAX/Passivehunter",
"score": 3
} |
#### File: JHHAX/Passivehunter/passivehunter.py
```python
from status_check import make_requests
from error_handling import error_handler
from collections import Counter
import os
import requests
import time
import re
import sys
import pathlib
import asyncio
def main(url , domain):
# a small try and except block for error handling
try:
r = requests.get(url)
r.raise_for_status()
except requests.exceptions.HTTPError as err:
print(err)
exit(0)
# filtering out the domain/subdomains using my ugly regex skills
pattern = r',.*.'+domain+'",' # an example of ugly regex
raw_domains = re.findall(pattern,r.text)
temp_domains = []
for i in raw_domains:
temp_domains.append(i[1:-2])
# using Counter for removing the duplicate entries of domains if any.
cnt = Counter(temp_domains)
print("\u001b[32m[!] Total No of unique domains/subdomains found : " + str(len(cnt)) + "\n\u001b[0m")
urls = []
# for storing https and http urls
print("\u001b[34;1m")
for i,j in cnt.items():
print(i)
urls.append("https://"+i) #appending https
urls.append("http://"+i) #appending http
print("\n\n")
print("\u001b[0m")
'''
if file already exists empty the file , it happens when you run the script againts same domain
multiple times
'''
with open(domain+'-200.txt', 'w') as empty:
empty.write('')
with open(domain+'-other.txt', 'w') as empty:
empty.write('')
with open(domain+'.txt', 'w') as empty:
empty.write('')
for i in urls:
with open(domain+'.txt', 'a') as f:
f.write(i+"\n")
# if no subdomains found , then exit the program and delete the empty files
if len(cnt)==0:
os.remove(domain+'.txt')
os.remove(domain+'-other.txt')
os.remove(domain+'-200.txt')
sys.exit()
if __name__ == "__main__":
if os.name =='nt':
os.system("cls") #clear screen
num_lines = 0
# banner below
banner = """
\u001b[35;1m
_ _ _
___ ___ ___ _ __|_|_ _ ___| |_ _ _ ___| |_ ___ ___
| . | .'|_ -|_ -| | | | -_| | | | | _| -_| _|
| _|__,|___|___|_|\_/|___|_|_|___|_|_|_| |___|_|
|_|\u001b[0m
\u001b[42;1m-coded with <3 by <NAME>\u001b[0m
"""
print(banner)
# checks if the supplied command argument is valid
if len(sys.argv)!=2:
print("\u001b[31;1m[!] Usage : python3 passivehunter.py domainname.tld\u001b[0m")
sys.exit(1)
domain = sys.argv[1]
assert sys.version_info >= (3, 7), "Script requires Python 3.7+."
url = "https://dns.bufferover.run/dns?q=." + domain
# a request is made to the host , that will check for errors (error handling)
error_handler(url) #function imported from error_handling.py
main(url , domain)
``` |
{
"source": "jhhb/pydefipulsedata",
"score": 3
} |
#### File: pydefipulsedata/defipulsedata/eth_gas_station.py
```python
from urllib import parse
from .utils import get_request
class EthGasStation:
__API_URL_BASE = 'https://data-api.defipulse.com/api/v1/egs/api'
def __init__(self, *, api_key):
self.api_base_url = self.__API_URL_BASE
self.base_params = {'api-key': api_key}
def get_gas_price(self):
encoded_params = parse.urlencode(self.base_params)
api_url = '{0}/ethgasAPI.json?{1}'.format(self.api_base_url, encoded_params)
return get_request(api_url)
def get_prediction_table(self):
encoded_params = parse.urlencode(self.base_params)
api_url = '{0}/predictTable.json?{1}'.format(self.api_base_url, encoded_params)
return get_request(api_url)
```
#### File: pydefipulsedata/defipulsedata/pools_fyi.py
```python
from urllib import parse
from .utils import get_request, validate_allowed_params
class PoolsFyi:
__API_URL_BASE = 'https://data-api.defipulse.com/api/v1/blocklytics/pools'
def __init__(self, *, api_key):
self.api_base_url = self.__API_URL_BASE
self.base_params = {'api-key': api_key}
def get_exchanges(self, *, params=None):
# Example URL:
# https://data-api.defipulse.com/api/v1/blocklytics/pools/v1/exchanges
allowed_params = {
'tags',
'platform',
'direction',
'orderBy',
'offset',
'limit',
'api-key',
}
function_params = params or {}
merged_params = {**function_params, **self.base_params}
validate_allowed_params(merged_params, allowed_params)
encoded_params = parse.urlencode(merged_params)
api_url = '{0}/v1/exchanges?{1}'.format(self.api_base_url, encoded_params)
return get_request(api_url)
def get_returns(self, *, address):
# Example URL for UNI-V2 ETH/GRT:
# https://data-api.defipulse.com/api/v1/blocklytics/pools/v1/returns/0x2e81ec0b8b4022fac83a21b2f2b4b8f5ed744d70
encoded_params = parse.urlencode(self.base_params)
api_url = '{0}/v1/returns/{1}?{2}'.format(
self.api_base_url, address, encoded_params
)
return get_request(api_url)
def get_liquidity(self, *, address):
# Returns the owners of liquidity on the AMM
# Example URL: https://data-api.defipulse.com/api/v1/blocklytics/pools/v0/liquidity/0x2e81ec0b8b4022fac83a21b2f2b4b8f5ed744d70/owners
encoded_params = parse.urlencode(self.base_params)
api_url = '{0}/v0/liquidity/{1}?{2}'.format(
self.api_base_url, address, encoded_params
)
return get_request(api_url)
def get_exchange(self, *, address):
# Example URL:
# https://data-api.defipulse.com/api/v1/blocklytics/pools/v1/exchange/0x2e81ec0b8b4022fac83a21b2f2b4b8f5ed744d70
encoded_params = parse.urlencode(self.base_params)
api_url = '{0}/v1/exchange/{1}?{2}'.format(
self.api_base_url, address, encoded_params
)
return get_request(api_url)
def get_trades(self, *, address, params=None):
allowed_params = {
'platform',
'direction',
'orderBy',
'offset',
'limit',
'to',
'from',
'api-key',
}
function_params = params or {}
merged_params = {**function_params, **self.base_params}
validate_allowed_params(merged_params, allowed_params)
encoded_params = parse.urlencode(merged_params)
api_url = '{0}/v1/trades/{1}?{2}'.format(
self.api_base_url, address, encoded_params
)
return get_request(api_url)
```
#### File: pydefipulsedata/defipulsedata/rek_to.py
```python
from urllib import parse
from .utils import get_request
class RekTo:
__API_URL_BASE = 'https://data-api.defipulse.com/api/v1/rekto/api'
def __init__(self, *, api_key):
self.api_base_url = self.__API_URL_BASE
self.base_params = {'api-key': api_key}
def get_events(self):
# https://data-api.defipulse.com/api/v1/rekto/api/events
encoded_params = parse.urlencode(self.base_params)
api_url = '{0}/events?{1}'.format(self.api_base_url, encoded_params)
return get_request(api_url)
def get_top_10(self):
# https://data-api.defipulse.com/api/v1/rekto/api/top10
encoded_params = parse.urlencode(self.base_params)
api_url = '{0}/top10?{1}'.format(self.api_base_url, encoded_params)
return get_request(api_url)
def get_total_damage(self):
# https://data-api.defipulse.com/api/v1/rekto/api/total-damage
encoded_params = parse.urlencode(self.base_params)
api_url = '{0}/total-damage?{1}'.format(self.api_base_url, encoded_params)
return get_request(api_url)
```
#### File: pydefipulsedata/tests/test_defi_pulse.py
```python
import unittest
import responses
from defipulsedata import DefiPulse
EMPTY_BLOB = {}
class TestWrapper(unittest.TestCase):
@responses.activate
def test_simple_endpoints(self):
client = DefiPulse(api_key='mock-key')
simple_endpoint_urls = [
(
client.get_market_data,
'https://data-api.defipulse.com/api/v1/defipulse/api/MarketData?api-key=mock-key',
),
(
client.get_projects,
'https://data-api.defipulse.com/api/v1/defipulse/api/GetProjects?api-key=mock-key',
),
(
client.get_lending_tokens,
'https://data-api.defipulse.com/api/v1/defipulse/api/GetLendingTokens?api-key=mock-key',
),
(
client.get_lending_market_data,
'https://data-api.defipulse.com/api/v1/defipulse/api/LendingMarketData?api-key=mock-key',
),
(
client.get_lending_projects,
'https://data-api.defipulse.com/api/v1/defipulse/api/GetLendingProjects?api-key=mock-key',
),
]
for fn, url in simple_endpoint_urls:
responses.reset()
responses.add(responses.GET, url, json=EMPTY_BLOB, status=200)
fn()
self.assertEqual(responses.calls[0].request.url, url)
@responses.activate
def test_get_history(self):
client = DefiPulse(api_key='mock-key')
url = 'https://data-api.defipulse.com/api/v1/defipulse/api/GetHistory?api-key=mock-key'
responses.add(responses.GET, url, json=EMPTY_BLOB, status=200)
client.get_history()
self.assertEqual(responses.calls[0].request.url, url)
responses.reset()
url_with_invalid_param_combination = 'https://data-api.defipulse.com/api/v1/defipulse/api/GetHistory?period=period&length=length&api-key=mock-key'
responses.add(
responses.GET,
url_with_invalid_param_combination,
json=EMPTY_BLOB,
status=200,
)
client.get_history(params={'period': 'period', 'length': 'length'})
self.assertEqual(
responses.calls[0].request.url,
url_with_invalid_param_combination,
)
self.assertWarnsRegex(
UserWarning, 'API only supports "period" or "length" params exclusively.'
)
@responses.activate
def test_get_lending_history(self):
client = DefiPulse(api_key='mock-key')
url = 'https://data-api.defipulse.com/api/v1/defipulse/api/getLendingHistory?api-key=mock-key'
responses.add(responses.GET, url, json=EMPTY_BLOB, status=200)
client.get_lending_history()
self.assertEqual(
responses.calls[0].request.url,
url,
)
responses.reset()
url_with_invalid_param_combination = 'https://data-api.defipulse.com/api/v1/defipulse/api/getLendingHistory?period=period&length=length&api-key=mock-key'
responses.add(
responses.GET,
url_with_invalid_param_combination,
json=EMPTY_BLOB,
status=200,
)
client.get_lending_history(params={'period': 'period', 'length': 'length'})
self.assertEqual(
responses.calls[0].request.url,
url_with_invalid_param_combination,
)
self.assertWarnsRegex(
UserWarning, 'API only supports "period" or "length" params exclusively.'
)
@responses.activate
def test_get_rates(self):
client = DefiPulse(api_key='mock-key')
url_without_amount = 'https://data-api.defipulse.com/api/v1/defipulse/api/GetRates?token=DAI&api-key=mock-key'
responses.add(responses.GET, url_without_amount, json=EMPTY_BLOB, status=200)
client.get_rates(token='DAI')
self.assertEqual(
responses.calls[0].request.url,
url_without_amount,
'it does not include amount as a query param',
)
responses.reset()
url_with_amount = 'https://data-api.defipulse.com/api/v1/defipulse/api/GetRates?token=DAI&amount=100&api-key=mock-key'
responses.add(responses.GET, url_with_amount, json=EMPTY_BLOB, status=200)
client.get_rates(token='DAI', amount=100)
self.assertEqual(
responses.calls[0].request.url,
url_with_amount,
'it includes the amount as a query param',
)
``` |
{
"source": "jhhcs/refinery",
"score": 2
} |
#### File: refinery/lib/mscrypto.py
```python
import enum
from Crypto.PublicKey import RSA
from Crypto.Math.Numbers import Integer
from refinery.lib.structures import Struct, StructReader
class _ENUM(enum.IntEnum):
def __str__(self): return self.name
def __repr__(self): return self.name
class TYPES(_ENUM):
KEYSTATEBLOB = 0xC # noqa
OPAQUEKEYBLOB = 0x9 # noqa
PLAINTEXTKEYBLOB = 0x8 # noqa
PRIVATEKEYBLOB = 0x7 # noqa
PUBLICKEYBLOB = 0x6 # noqa
PUBLICKEYBLOBEX = 0xA # noqa
SIMPLEBLOB = 0x1 # noqa
SYMMETRICWRAPKEYBLOB = 0xB # noqa
class ALGORITHMS(_ENUM):
CALG_3DES = 0x00006603 # noqa
CALG_3DES_112 = 0x00006609 # noqa
CALG_AES = 0x00006611 # noqa
CALG_AES_128 = 0x0000660e # noqa
CALG_AES_192 = 0x0000660f # noqa
CALG_AES_256 = 0x00006610 # noqa
CALG_AGREEDKEY_ANY = 0x0000aa03 # noqa
CALG_CYLINK_MEK = 0x0000660c # noqa
CALG_DES = 0x00006601 # noqa
CALG_DESX = 0x00006604 # noqa
CALG_DH_EPHEM = 0x0000aa02 # noqa
CALG_DH_SF = 0x0000aa01 # noqa
CALG_DSS_SIGN = 0x00002200 # noqa
CALG_ECDH = 0x0000aa05 # noqa
CALG_ECDH_EPHEM = 0x0000ae06 # noqa
CALG_ECDSA = 0x00002203 # noqa
CALG_ECMQV = 0x0000a001 # noqa
CALG_HASH_REPLACE_OWF = 0x0000800b # noqa
CALG_HUGHES_MD5 = 0x0000a003 # noqa
CALG_HMAC = 0x00008009 # noqa
CALG_KEA_KEYX = 0x0000aa04 # noqa
CALG_MAC = 0x00008005 # noqa
CALG_MD2 = 0x00008001 # noqa
CALG_MD4 = 0x00008002 # noqa
CALG_MD5 = 0x00008003 # noqa
CALG_NO_SIGN = 0x00002000 # noqa
CALG_OID_INFO_CNG_ONLY = 0xffffffff # noqa
CALG_OID_INFO_PARAMETERS = 0xfffffffe # noqa
CALG_PCT1_MASTER = 0x00004c04 # noqa
CALG_RC2 = 0x00006602 # noqa
CALG_RC4 = 0x00006801 # noqa
CALG_RC5 = 0x0000660d # noqa
CALG_RSA_KEYX = 0x0000a400 # noqa
CALG_RSA_SIGN = 0x00002400 # noqa
CALG_SCHANNEL_ENC_KEY = 0x00004c07 # noqa
CALG_SCHANNEL_MAC_KEY = 0x00004c03 # noqa
CALG_SCHANNEL_MASTER_HASH = 0x00004c02 # noqa
CALG_SEAL = 0x00006802 # noqa
CALG_SHA1 = 0x00008004 # noqa
CALG_SHA_256 = 0x0000800c # noqa
CALG_SHA_384 = 0x0000800d # noqa
CALG_SHA_512 = 0x0000800e # noqa
CALG_SKIPJACK = 0x0000660a # noqa
CALG_SSL2_MASTER = 0x00004c05 # noqa
CALG_SSL3_MASTER = 0x00004c01 # noqa
CALG_SSL3_SHAMD5 = 0x00008008 # noqa
CALG_TEK = 0x0000660b # noqa
CALG_TLS1_MASTER = 0x00004c06 # noqa
CALG_TLS1PRF = 0x0000800a # noqa
class BCRYPT_MAGIC(_ENUM):
BCRYPT_RSAPUBLIC_MAGIC = 0x31415352 # noqa
BCRYPT_RSAPRIVATE_MAGIC = 0x32415352 # noqa
BCRYPT_RSAFULLPRIVATE_MAGIC = 0x33415352 # noqa
class BLOBHEADER(Struct):
def __init__(self, reader: StructReader):
t, self.version, self.reserved, a = reader.read_struct('BBHI')
self.type = TYPES(t)
self.algorithm = ALGORITHMS(a)
class PLAINTEXTKEYBLOB(Struct):
def __bytes__(self): return bytes(self.data)
def __init__(self, reader: StructReader):
self.size = reader.u32()
self.data = reader.read(self.size)
class SIMPLEBLOB(Struct):
def __bytes__(self): return bytes(self.data)
def __init__(self, reader: StructReader):
self.magic = reader.read(4)
if self.magic != B'\0\0\xA4\0':
raise ValueError(F'Invalid magic bytes: {self.magic.hex(":").upper()}')
self.data = reader.read(0x100)
class BCRYPT_RSAKEY_BLOB(Struct):
def __init__(self, reader: StructReader):
magic, bits, e_size, n_size, p_size, q_size = reader.read_struct('<6L')
e_size *= 8
n_size *= 8
self.magic = BCRYPT_MAGIC(magic)
reader.bigendian = True
self.exponent = reader.read_integer(e_size)
self.modulus = reader.read_integer(n_size)
self.bit_size = bits
if self.has_private_key:
p_size *= 8
q_size *= 8
self.prime1 = reader.read_integer(p_size)
self.prime2 = reader.read_integer(q_size)
if self.magic is BCRYPT_MAGIC.BCRYPT_RSAFULLPRIVATE_MAGIC:
self.exp1 = reader.read_integer(p_size)
self.exp2 = reader.read_integer(q_size)
self.coefficient = reader.read_integer(p_size)
self.exp_private = reader.read_integer(n_size)
else:
self.exp1 = None
self.exp2 = None
self.coefficient = None
self.exp_private = None
else:
self.prime1 = None
self.prime2 = None
@property
def has_private_key(self):
return self.magic in (
BCRYPT_MAGIC.BCRYPT_RSAPRIVATE_MAGIC,
BCRYPT_MAGIC.BCRYPT_RSAFULLPRIVATE_MAGIC
)
def convert(self, force_public=False):
components = self.modulus, self.exponent
if not force_public and self.has_private_key:
components += self.exp_private, self.prime1, self.prime2
return RSA.construct(components)
class RSAPUBKEY(Struct):
def __init__(self, reader: StructReader):
self.magic = reader.read(4)
if self.magic not in (B'RSA2', B'RSA1'):
raise ValueError(F'Invalid signature: {self.magic.hex()}')
self.size, self.exponent = reader.read_struct('II')
if self.size % 8 != 0:
raise ValueError(F'The bitlength {self.size} is not a multiple of 8.')
self.modulus = reader.read_integer(self.size)
def convert(self):
return RSA.construct((self.modulus, self.exponent))
def __str__(self):
return self.key().export_key(format='PEM')
def __bytes__(self):
return str(self).encode('ascii')
class PRIVATEKEYBLOB(Struct):
def __init__(self, reader: StructReader):
self.pub = RSAPUBKEY(reader)
halfsize = self.pub.size // 2
self.prime1 = reader.read_integer(halfsize)
self.prime2 = reader.read_integer(halfsize)
self.exp1 = reader.read_integer(halfsize)
self.exp2 = reader.read_integer(halfsize)
self.coefficient = reader.read_integer(halfsize)
self.exponent = reader.read_integer(self.pub.size)
self._check()
def _check(self):
if self.pub.modulus // self.prime1 != self.prime2:
raise ValueError('Product of primes does not equal the modulus.')
from math import gcd
a = self.prime1 - 1
b = self.prime2 - 1
totient = (a * b) // gcd(a, b)
if self.pub.exponent * self.exponent % totient != 1:
raise ValueError('Public exponent is not a modular inverse of private exponent.')
def convert(self):
parameters = (
self.pub.modulus,
self.pub.exponent,
self.exponent,
self.prime1,
self.prime2,
self.coefficient
)
try:
return RSA.construct(parameters, consistency_check=True)
except ValueError as V:
try:
return RSA.RsaKey(
n=Integer(self.pub.modulus),
e=Integer(self.pub.exponent),
d=Integer(self.exponent),
p=Integer(self.prime1),
q=Integer(self.prime2),
u=Integer(self.coefficient),
)
except Exception as E:
raise E from V
def __str__(self):
return self.key().export_key(format='PEM')
def __bytes__(self):
return str(self).encode('ascii')
class DHPUBKEY(Struct):
def __init__(self, reader: StructReader):
self.magic, self.size = reader.read_struct('4sI')
if self.magic not in (B'\0DH1', B'\0DH2'):
raise ValueError(F'Invalid magic bytes: {self.magic.hex(":").upper()}')
if self.size % 8 != 0:
raise ValueError('Bit length is not a multiple of 8.')
self.public = reader.read_integer(self.size)
self.prime = reader.read_integer(self.size)
self.generator = reader.read_integer(self.size)
def __bytes__(self):
raise NotImplementedError
class CRYPTOKEY(Struct):
def __init__(self, reader: StructReader):
self.header = BLOBHEADER(reader)
if self.header.type in {
TYPES.KEYSTATEBLOB,
TYPES.OPAQUEKEYBLOB,
TYPES.SYMMETRICWRAPKEYBLOB
}:
raise ValueError(F'Unsupported type: {self.header.type}')
elif self.header.type == TYPES.PLAINTEXTKEYBLOB:
self.key = PLAINTEXTKEYBLOB(reader)
elif self.header.type == TYPES.SIMPLEBLOB:
self.key = SIMPLEBLOB(reader)
else:
if self.header.algorithm not in {
ALGORITHMS.CALG_RSA_KEYX,
ALGORITHMS.CALG_RSA_SIGN
}:
raise ValueError(F'Unknown algorithm for {self.header.type}: {self.header.algorithm}')
elif self.header.type == TYPES.PRIVATEKEYBLOB:
self.key = PRIVATEKEYBLOB(reader)
elif self.header.type == TYPES.PUBLICKEYBLOB:
self.key = RSAPUBKEY(reader)
elif self.header.type == TYPES.PUBLICKEYBLOBEX:
self.key = DHPUBKEY(reader)
```
#### File: crypto/cipher/__init__.py
```python
import abc
from typing import (
Any,
ByteString,
ClassVar,
Iterable,
Optional,
Tuple,
Type,
)
from refinery.lib.crypto import (
CipherObjectFactory,
CipherInterface,
)
from refinery.lib.argformats import (
Option,
extract_options,
OptionFactory,
)
from refinery.units import (
arg,
Executable,
RefineryCriticalException,
RefineryPartialResult,
Unit,
)
class CipherExecutable(Executable):
"""
A metaclass for the abstract class `refinery.units.crypto.cipher.CipherUnit` which
normalizes the class variable `key_sizes` containing an iterable of all possible
key sizes that are acceptable for the represented cipher.
"""
def __new__(mcs, name, bases: tuple, nmspc: dict, abstract=False, blocksize=1, key_sizes=None):
nmspc.setdefault('blocksize', blocksize)
nmspc.setdefault('key_sizes', key_sizes)
return super(CipherExecutable, mcs).__new__(mcs, name, bases, nmspc, abstract=abstract)
def __init__(cls, name, bases, nmspc, abstract=False, **_):
cls.key_sizes = (cls.key_sizes,) if isinstance(cls.key_sizes, int) else tuple(cls.key_sizes or ())
super(CipherExecutable, cls).__init__(name, bases, nmspc, abstract=abstract)
class CipherUnit(Unit, metaclass=CipherExecutable, abstract=True):
def __init__(self, key: arg(help='The encryption key.'), **keywords):
super().__init__(key=key, **keywords)
@abc.abstractmethod
def decrypt(self, data: ByteString) -> ByteString:
raise NotImplementedError
@abc.abstractmethod
def encrypt(self, data: ByteString) -> ByteString:
raise NotImplementedError
def process(self, data: ByteString) -> ByteString:
if self.key_sizes and len(self.args.key) not in self.key_sizes:
raise ValueError(F'the given key has an invalid length of {len(self.args.key)} bytes.')
return self.decrypt(data)
def reverse(self, data: ByteString) -> ByteString:
return self.encrypt(data)
class StreamCipherUnit(CipherUnit, abstract=True):
def __init__(
self, key,
stateful: arg.switch('-s', help='Do not reset the key stream while processing the chunks of one frame.') = False,
**keywords
):
super().__init__(key=key, stateful=stateful, **keywords)
self._keystream = None
@abc.abstractmethod
def keystream(self) -> Iterable[int]:
raise NotImplementedError
@Unit.Requires('numpy')
def _numpy():
import numpy
return numpy
def encrypt(self, data: bytearray) -> bytearray:
it = self._keystream or self.keystream()
try:
np = self._numpy
except ImportError:
self.log_info('this unit could perform faster if numpy was installed.')
out = bytearray(a ^ b for a, b in zip(it, data))
else:
key = np.fromiter(it, dtype=np.uint8, count=len(data))
out = np.frombuffer(
memoryview(data), dtype=np.uint8, count=len(data))
out ^= key
return out
def filter(self, chunks: Iterable):
if self.args.stateful:
self._keystream = self.keystream()
yield from chunks
self._keystream = None
decrypt = encrypt
class BlockCipherUnitBase(CipherUnit, abstract=True):
def __init__(
self, key, iv: arg('-i', '--iv', help=(
'Specifies the initialization vector. If none is specified, then a block of zero bytes is used.')) = B'',
padding: arg.choice('-p', type=str.lower, choices=['pkcs7', 'iso7816', 'x923', 'raw'],
nargs=1, metavar='P', help=(
'Choose a padding algorithm ({choices}). The raw algorithm does nothing. By default, all other algorithms '
'are attempted. In most cases, the data was not correctly decrypted if none of these work.')
) = None,
raw: arg.switch('-r', '--raw', help='Set the padding to raw; ignored when a padding is specified.') = False,
**keywords
):
if not padding:
padding = ['raw'] if raw else ['pkcs7', 'iso7816', 'x923']
elif not isinstance(padding, list):
padding = [padding]
iv = iv or bytes(self.blocksize)
super().__init__(key=key, iv=iv, padding=padding, **keywords)
@property
def iv(self) -> ByteString:
return self.args.iv
def reverse(self, data: ByteString) -> ByteString:
from Crypto.Util.Padding import pad
padding = self.args.padding[0]
self.log_info('padding method:', padding)
if padding != 'raw':
data = pad(data, self.blocksize, padding)
return super().reverse(data)
def process(self, data: ByteString) -> ByteString:
from Crypto.Util.Padding import unpad
result = super().process(data)
for p in self.args.padding:
if p == 'raw':
return result
try:
unpadded = unpad(result, self.blocksize, p.lower())
except Exception:
pass
else:
self.log_info(F'unpadding worked using {p}')
return unpadded
raise RefineryPartialResult(
'None of these paddings worked: {}'.format(', '.join(self.args.padding)),
partial=result)
class StandardCipherExecutable(CipherExecutable):
_available_block_cipher_modes: ClassVar[Type[Option]]
_cipher_object_factory: ClassVar[CipherObjectFactory]
def __new__(mcs, name, bases, nmspc, cipher: Optional[CipherObjectFactory] = None):
if cipher is None:
keywords = dict(abstract=True)
else:
keywords = dict(
abstract=False,
blocksize=cipher.block_size,
key_sizes=cipher.key_size,
)
return super(StandardCipherExecutable, mcs).__new__(mcs, name, bases, nmspc, **keywords)
def __init__(cls, name, bases, nmspc, cipher: Optional[CipherObjectFactory] = None):
cls: Executable
abstract = cipher is None
super(StandardCipherExecutable, cls).__init__(
name, bases, nmspc, abstract=abstract)
cls._cipher_object_factory = cipher
if abstract or cipher.block_size <= 1 or 'mode' not in cls._argument_specification:
return
modes = extract_options(cipher)
if not modes:
raise RefineryCriticalException(
F'The cipher {cipher.name} is a block cipher module, '
F'but no cipher block mode constants were found.'
)
cls._available_block_cipher_modes = OptionFactory(modes, ignorecase=True)
cls._argument_specification['mode'].merge_all(arg(
'-m', '--mode', type=str.upper, metavar='M', nargs=arg.delete, choices=list(modes),
help=(
'Choose cipher mode to be used. Possible values are: {}. By default, the CBC mode'
' is used when an IV is is provided, and ECB otherwise.'.format(', '.join(modes))
)
))
class StandardCipherUnit(CipherUnit, metaclass=StandardCipherExecutable):
_available_block_cipher_modes: ClassVar[Type[Option]]
_cipher_object_factory: ClassVar[CipherObjectFactory]
def _get_cipher_instance(self, **optionals) -> CipherInterface:
self.log_info(lambda: F'encryption key: {self.args.key.hex()}')
return self._cipher_object_factory.new(key=self.args.key, **optionals)
def encrypt(self, data: bytes) -> bytes:
return self._get_cipher_instance().encrypt(data)
def decrypt(self, data: bytes) -> bytes:
cipher = self._get_cipher_instance()
try:
return cipher.decrypt(data)
except ValueError:
overlap = len(data) % self.blocksize
if not overlap:
raise
data[-overlap:] = []
self.log_warn(F'removing {overlap} bytes from the input to make it a multiple of the {self.blocksize}-byte block size')
return cipher.decrypt(data)
class StandardBlockCipherUnit(BlockCipherUnitBase, StandardCipherUnit):
blocksize: int
key_sizes: Tuple[int, ...]
def __init__(self, key, iv=B'', padding=None, mode=None, raw=False):
mode = self._available_block_cipher_modes(mode or iv and 'CBC' or 'ECB')
if iv and mode.name == 'ECB':
raise ValueError('No initialization vector can be specified for ECB mode.')
super().__init__(key=key, iv=iv, padding=padding, mode=mode, raw=raw)
def _get_cipher_instance(self, **optionals) -> CipherInterface:
mode = self.args.mode.name
if mode != 'ECB':
iv = bytes(self.iv)
if mode == 'CTR' and len(iv) == self.blocksize:
from Crypto.Util import Counter
counter = Counter.new(self.blocksize * 8,
initial_value=int.from_bytes(iv, 'big'))
optionals['counter'] = counter
elif mode in ('CCM', 'EAX', 'GCM', 'SIV', 'OCB', 'CTR'):
bounds = {
'CCM': (7, self.blocksize - 2),
'OCB': (1, self.blocksize),
'CTR': (1, self.blocksize),
}.get(mode, None)
if bounds and len(iv) not in range(*bounds):
raise ValueError(F'Invalid nonce length, must be in {bounds} for {mode}.')
optionals['nonce'] = iv
elif mode in ('PCBC', 'CBC', 'CFB', 'OFB', 'OPENPGP'):
if len(iv) > self.blocksize:
self.log_warn(F'The IV has length {len(self.args.iv)} and will be truncated to the blocksize {self.blocksize}.')
iv = iv[:self.blocksize]
elif len(iv) < self.blocksize:
raise ValueError(F'The IV has length {len(self.args.iv)} but the block size is {self.blocksize}.')
optionals['iv'] = iv
self.log_info('initial vector:', iv.hex())
if self.args.mode:
optionals['mode'] = self.args.mode.value
try:
return super()._get_cipher_instance(**optionals)
except TypeError:
if 'iv' not in optionals:
raise
del optionals['iv']
if self.iv:
self.log_info('ignoring iv for mode', self.args.mode)
return self._cipher_object_factory.new(key=self.args.key, **optionals)
class LatinCipherUnit(StreamCipherUnit, abstract=True):
key_sizes = 16, 32
def __init__(
self, key,
nonce: arg(help='The nonce. Default is the string {default}.') = B'REFINERY',
magic: arg('-m', help='The magic constant; depends on the key size by default.') = B'',
offset: arg.number('-x', help='Optionally specify the stream index, default is {default}.') = 0,
rounds: arg.number('-r', help='The number of rounds. Has to be an even number.') = 20,
):
super().__init__(key=key, nonce=nonce, magic=magic, offset=offset, rounds=rounds)
class LatinCipherStandardUnit(StandardCipherUnit):
def __init__(self, key, nonce: arg(help='The nonce. Default is the string {default}.') = B'REFINERY'):
super().__init__(key, nonce=nonce)
def _get_cipher_instance(self, **optionals) -> Any:
self.log_info('one-time nonce:', self.args.nonce.hex())
return super()._get_cipher_instance(nonce=self.args.nonce)
```
#### File: crypto/cipher/rot.py
```python
from refinery.units import arg, Unit
_UCASE = range(ord('A'), ord('Z') + 1)
_LCASE = range(ord('a'), ord('z') + 1)
class rot(Unit):
"""
Rotate the characters of the alphabet by the given amount. The default
amount is 13, providing the common (and weak) string obfuscation method.
"""
def __init__(self, amount: arg.number(help='Number of letters to rotate by; Default is 13.') = 13):
super().__init__(amount=amount)
def process(self, data: bytearray):
rot = self.args.amount % 26
for index, byte in enumerate(data):
for alphabet in _LCASE, _UCASE:
if byte in alphabet:
zero = alphabet[0]
data[index] = zero + (byte - zero + rot) % 26
break
return data
```
#### File: crypto/keyderive/__init__.py
```python
import importlib
from refinery.units import arg, Unit
from refinery.lib.argformats import number
from refinery.lib.types import ByteStr
from enum import Enum
from typing import Callable
__all__ = ['arg', 'HASH', 'KeyDerivation']
class HASH(str, Enum):
MD2 = 'MD2'
MD4 = 'MD4'
MD5 = 'MD5'
SHA1 = 'SHA'
SHA256 = 'SHA256'
SHA512 = 'SHA512'
SHA224 = 'SHA224'
SHA384 = 'SHA384'
def multidecode(data: ByteStr, function: Callable[[str], ByteStr]) -> ByteStr:
for codec in ['utf8', 'latin1', 'cp1252']:
try:
return function(data.decode(codec))
except UnicodeError:
continue
else:
return function(''.join(chr(t) for t in data))
class KeyDerivation(Unit, abstract=True):
def __init__(
self,
size: arg(help='The number of bytes to generate.', type=number),
salt: arg(help='Salt for the derivation.'),
hash: arg.option(choices=HASH, metavar='hash',
help='Specify one of these algorithms (default is {default}): {choices}') = None,
iter: arg.number(metavar='iter', help='Number of iterations; default is {default}.') = None,
**kw
):
if hash is not None:
name = arg.as_option(hash, HASH)
hash = importlib.import_module(F'Crypto.Hash.{name}')
return super().__init__(salt=salt, size=size, iter=iter, hash=hash, **kw)
@property
def hash(self): return self.args.hash
```
#### File: units/encoding/b64.py
```python
import base64
from refinery.units import arg, Unit
class b64(Unit):
"""
Base64 encoding and decoding.
"""
def __init__(self, urlsafe: arg.switch('-u', help='use URL-safe alphabet') = False):
super().__init__(urlsafe=urlsafe)
@property
def altchars(self):
if self.args.urlsafe:
return B'-_'
def reverse(self, data):
return base64.b64encode(data, altchars=self.altchars)
def process(self, data: bytearray):
if not data:
return data
if len(data) == 1:
raise ValueError('single byte can not be base64-decoded.')
data.extend(B'===')
return base64.b64decode(data, altchars=self.altchars)
```
#### File: units/encoding/base.py
```python
from refinery.units import arg, Unit
class base(Unit):
"""
Encodes and decodes integers in arbitrary base.
"""
_DEFAULT_APHABET = B'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __init__(
self,
base: arg.number(bound=(2, None), metavar='base', help=(
'Base to be used for conversion; The value defaults to the length of the alphabet '
'if given, or 0 otherwise. Base 0 treats the input as a Python integer literal.')) = 0,
little_endian: arg('-e', help='Use little endian instead byte order.') = False,
alphabet: arg('-a', metavar='STR', help=(
'The alphabet of digits. Has to have length at least equal to the chosen base. '
'The default is: 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ.')) = B'',
):
if alphabet:
if len(alphabet) < 2:
raise ValueError('an alphabet with at least two digits is required')
if not base:
base = len(alphabet)
else:
alphabet = self._DEFAULT_APHABET
if base and base not in range(2, len(alphabet) + 1):
raise ValueError(F'base may only be an integer between 2 and {len(alphabet)}')
super().__init__(base=base, little_endian=little_endian, alphabet=alphabet)
@property
def byteorder(self):
return 'little' if self.args.little_endian else 'big'
def reverse(self, data):
self.log_info('using byte order', self.byteorder)
number = int.from_bytes(data, byteorder=self.byteorder)
if number == 0:
return B'0'
if self.args.base == 0:
return B'0x%X' % number
if self.args.base > len(self.args.alphabet):
raise ValueError(
F'Only {len(self.args.alphabet)} available; not enough to '
F'encode base {self.args.base}'
)
def reverse_result(number):
while number:
yield self.args.alphabet[number % self.args.base]
number //= self.args.base
return bytes(reversed(tuple(reverse_result(number))))
def process(self, data: bytearray):
data = data.strip()
base = self.args.base
defaults = self._DEFAULT_APHABET[:base]
alphabet = self.args.alphabet[:base]
if len(alphabet) == len(defaults):
if alphabet != defaults:
self.log_info('translating input data to a default alphabet for faster conversion')
data = data.translate(bytes.maketrans(alphabet, defaults))
result = int(data, self.args.base)
elif len(alphabet) == 64:
import base64
_b64_alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
return base64.b64decode(data.translate(bytes.maketrans(alphabet, _b64_alphabet)))
elif len(alphabet) == 85:
import base64
_b85_alphabet = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~'
return base64.b85decode(data.translate(bytes.maketrans(alphabet, _b85_alphabet)))
else:
self.log_warn('very long alphabet, unable to use built-ins; reverting to (slow) fallback.')
result = 0
alphabet = {digit: k for k, digit in enumerate(alphabet)}
for digit in data:
result *= base
result += alphabet[digit]
size, rest = divmod(result.bit_length(), 8)
size += int(bool(rest))
return result.to_bytes(size, byteorder=self.byteorder)
```
#### File: units/encoding/esc.py
```python
import re
from refinery.units import arg, Unit
class esc(Unit):
"""
Encodes and decodes common ASCII escape sequences.
"""
_ESCAPE = {
0x00: BR'\0',
0x07: BR'\a',
0x08: BR'\b',
0x0C: BR'\f',
0x0A: BR'\n',
0x0D: BR'\r',
0x09: BR'\t',
0x0B: BR'\v',
0x5C: BR'\\',
0x27: BR'\'',
0x22: BR'\"'
}
_UNESCAPE = {
BR'0': B'\x00',
BR'a': B'\x07',
BR'b': B'\x08',
BR'f': B'\x0C',
BR'n': B'\x0A',
BR'r': B'\x0D',
BR't': B'\x09',
BR'v': B'\x0B',
B'\\': B'\x5C',
BR"'": B'\x27',
BR'"': B'\x22'
}
def __init__(self,
hex : arg.switch('-x', help='Hex encode everything, do not use C escape sequences.') = False,
unicode : arg.switch('-u', help='Use unicode escape sequences and UTF-8 encoding.') = False,
greedy : arg.switch('-g', help='Replace \\x by x and \\u by u when not followed by two or four hex digits, respectively.') = False,
quoted : arg.switch('-q', help='Remove enclosing quotes while decoding and add them for encoding.') = False,
bare : arg.switch('-b', help='Do not escape quote characters.') = False,
expand : arg.switch('-p', help='Decode sequences of the form \\uHHLL as two bytes when the upper byte is nonzero.') = False,
) -> Unit: pass # noqa
def process(self, data):
if self.args.quoted:
quote = data[0]
if data[-1] != quote:
self.log_info('string is not correctly quoted')
else:
data = data[1:-1]
if self.args.unicode:
return data.decode('UNICODE_ESCAPE').encode(self.codec)
def unescape(match):
c = match[1]
if len(c) > 1:
if c[0] == 0x75:
# unicode
upper = int(c[1:3], 16)
lower = int(c[3:5], 16)
if self.args.expand:
return bytes((upper, lower))
return bytes((lower,))
elif c[0] == 0x78:
# hexadecimal
return bytes((int(c[1:3], 16),))
else:
# octal escape sequence
return bytes((int(c, 8) & 0xFF,))
elif c in B'ux':
return c if self.args.greedy else match[0]
return self._UNESCAPE.get(c, c)
data = re.sub(
RB'\\(u[a-fA-F0-9]{4}|x[a-fA-F0-9]{1,2}|[0-7]{3}|.)', unescape, data)
return data
def reverse(self, data):
if self.args.unicode:
string = data.decode(self.codec).encode('UNICODE_ESCAPE')
else:
if not self.args.hex:
def escape(match):
c = match[0][0]
return self._ESCAPE.get(c, RB'\x%02x' % c)
pattern = RB'[\x00-\x1F\x22\x27\x5C\x7F-\xFF]'
if self.args.bare:
pattern = RB'[\x00-\x1F\x5C\x7F-\xFF]'
string = re.sub(pattern, escape, data)
else:
string = bytearray(4 * len(data))
for k in range(len(data)):
a = k * 4
b = k * 4 + 4
string[a:b] = RB'\x%02x' % data[k]
if self.args.quoted:
string = B'"%s"' % string
return string
```
#### File: units/encoding/url.py
```python
import re
from refinery.units import arg, Unit
class url(Unit):
"""
Decodes and encodes URL-Encoding, which preserves only alphanumeric characters and the symbols `_`, `.`, `-`, `~`, `\\`, and `/`.
Every other character is escaped by hex-encoding it and prefixing it with a percent symbol.
"""
def __init__(
self,
plus: arg.switch('-p', help='also replace plus signs by spaces') = False,
hex : arg.switch('-x', help='hex encode every character in reverse mode') = False
):
super().__init__(plus=plus, hex=hex)
def process(self, data):
data = re.sub(
B'\\%([0-9a-fA-F]{2})',
lambda m: bytes((int(m[1], 16),)),
data
)
if self.args.plus:
data = data.replace(B'+', B' ')
return data
def reverse(self, data):
if self.args.plus:
data = data.replace(B' ', B'+')
if not self.args.hex:
return re.sub(B'[^a-zA-Z0-9_.-~\\/]', lambda m: B'%%%02X' % ord(m[0]), data)
result = bytearray(len(data) * 3)
offset = 0
for byte in data:
result[offset] = B'%'[0]
offset += 1
result[offset:offset + 2] = B'%02X' % byte
offset += 2
return result
```
#### File: formats/archive/xtzip.py
```python
from typing import Optional
from datetime import datetime
from zipfile import ZipInfo, ZipFile
from refinery.units.formats.archive import ArchiveUnit
from refinery.lib.structures import MemoryFile
ZIP_FILENAME_UTF8_FLAG = 0x800
class xtzip(ArchiveUnit):
"""
Extract files from a Zip archive.
"""
@ArchiveUnit.Requires('chardet', optional=True)
def _chardet():
import chardet
return chardet
def unpack(self, data):
password = self.args.pwd.decode(self.codec)
archive = ZipFile(MemoryFile(data))
if password:
archive.setpassword(self.args.pwd)
else:
def password_invalid(pwd: Optional[str]):
if pwd is not None:
archive.setpassword(pwd.encode(self.codec))
try:
archive.testzip()
except RuntimeError as E:
if 'password' not in str(E):
raise
return True
else:
self.log_debug(pwd)
return False
for pwd in [None, *self._COMMON_PASSWORDS]:
if not password_invalid(pwd):
break
else:
raise RuntimeError('Archive is password-protected.')
for info in archive.infolist():
def xt(archive: ZipFile = archive, info: ZipInfo = info):
try:
return archive.read(info.filename)
except RuntimeError as E:
if 'password' not in str(E):
raise
if not password:
raise RuntimeError('archive is password-protected')
else:
raise RuntimeError(F'invalid password: {password}') from E
if info.is_dir():
continue
try:
date = datetime(*info.date_time)
except Exception:
date = None
# courtesy of https://stackoverflow.com/a/37773438/9130824
filename = info.filename
if info.flag_bits & ZIP_FILENAME_UTF8_FLAG == 0:
filename_bytes = filename.encode('437')
try:
guessed_encoding = self._chardet.detect(filename_bytes)['encoding']
except ImportError:
guessed_encoding = None
guessed_encoding = guessed_encoding or 'cp1252'
filename = filename_bytes.decode(guessed_encoding, 'replace')
yield self._pack(filename, date, xt)
```
#### File: pe/dotnet/dnstr.py
```python
from refinery.units import arg, Unit
from refinery.lib.dotnet.header import DotNetHeader
class dnstr(Unit):
"""
Extracts all strings defined in the `#Strings` and `#US` streams of .NET
executables.
"""
def __init__(
self,
user: arg.switch('-m', '--meta', off=True, group='HEAP', help='Only extract from #Strings.') = True,
meta: arg.switch('-u', '--user', off=True, group='HEAP', help='Only extract from #US.') = True,
):
if not meta and not user:
raise ValueError('Either ascii or utf16 strings must be enabled.')
super().__init__(meta=meta, user=user)
def process(self, data):
header = DotNetHeader(data, parse_resources=False)
if self.args.meta:
for string in header.meta.Streams.Strings.values():
yield string.encode(self.codec)
if self.args.user:
for string in header.meta.Streams.US.values():
yield string.encode(self.codec)
```
#### File: units/meta/group.py
```python
from refinery.units import arg, Unit
class group(Unit):
"""
Group incoming chunks into frames of the given size.
"""
def __init__(self, size: arg.number(help='Size of each group; must be at least 2.', bound=(2, None))):
super().__init__(size=size)
def process(self, data):
members = data.temp or ()
if len(members) >= self.args.size:
raise RuntimeError(F'received {len(members) + 1} items in group')
yield data
yield from members
def filter(self, chunks):
members = []
header = None
for chunk in chunks:
if not chunk.visible:
yield chunk
continue
if len(members) > self.args.size - 2:
yield header
header = None
if header is None:
chunk.temp = members
header = chunk
members.clear()
else:
members.append(chunk)
if header is not None:
yield header
```
#### File: units/pattern/rex.py
```python
from typing import List, Match
from refinery.lib.argformats import utf8
from refinery.lib.meta import metavars
from refinery.units.pattern import arg, RegexUnit, PatternExtractor
class rex(RegexUnit, PatternExtractor):
"""
A binary grep which can apply a transformation to each match. Each match is an individual output.
Besides the syntax `{k}` to insert the `k`-th match group, the unit supports processing the
contents of match groups with arbitrary refinery units. To do so, use the following F-string-like
syntax:
{match-group:pipeline}
where `:pipeline` is an optional pipeline of refinery commands as it would be specified on
the command line. The value of the corresponding match is post-processed with this command.
"""
def __init__(
self, regex,
# TODO: Use positional only in Python 3.8
# /,
*transformation: arg(type=utf8, help=(
'An optional sequence of transformations to be applied to each match. '
'Each transformation produces one output in the order in which they '
'are given. The default transformation is {0}, i.e. the entire match. '
)),
unicode: arg.switch('-u', help='Also find unicode strings.') = False,
unique: arg.switch('-q', help='Yield every (transformed) match only once.') = False,
multiline=False, ignorecase=False, min=1, max=None, len=None, stripspace=False,
longest=False, take=None
):
super().__init__(
regex=regex,
transformation=transformation,
unicode=unicode,
unique=unique,
multiline=multiline,
ignorecase=ignorecase,
min=min,
max=max,
len=len,
stripspace=stripspace,
longest=longest,
take=take,
utf16=unicode,
ascii=True,
duplicates=not unique
)
def process(self, data):
meta = metavars(data)
self.log_debug('regular expression:', self.regex)
transformations = []
specs: List[bytes] = list(self.args.transformation)
if not specs:
specs.append(B'{0}')
for spec in specs:
def transformation(match: Match, s=spec.decode(self.codec)):
symb: dict = match.groupdict()
args: list = [match.group(0), *match.groups()]
used = set()
item = meta.format(s, self.codec, args, symb, True, True, used)
for variable in used:
symb.pop(variable, None)
symb.update(offset=match.start())
for name, value in meta.items():
symb.setdefault(name, value)
return self.labelled(item, **symb)
transformations.append(transformation)
yield from self.matches_filtered(memoryview(data), self.regex, *transformations)
```
#### File: units/sinks/__init__.py
```python
import re
import dataclasses
from typing import ByteString, Iterable, Optional
from refinery.units import arg, Unit
from refinery.lib.tools import get_terminal_size, lookahead
from refinery.lib import chunks
@dataclasses.dataclass
class HexDumpMetrics:
hex_columns: int = 0
address_width: int = 0
line_count: int = 0
block_size: int = 1
big_endian: bool = True
padding: int = 0
expand: bool = False
max_width: int = 0
txt_separator: str = ' '
hex_char_format: str = '{:02X}'
hex_char_spacer: str = ' '
hex_addr_spacer: str = ': '
@property
def hex_column_width(self):
return len(self.hex_char_format.format(0)) + len(self.hex_char_spacer)
def get_max_width(self):
width = self.max_width
if not width:
width = get_terminal_size()
width = width and width or 75
self.max_width = width
return width
def fit_to_width(self, width: int = 0, allow_increase: bool = False):
padding = self.padding + len(self.txt_separator)
if self.address_width:
padding += self.address_width + len(self.hex_addr_spacer)
width_max = width or self.get_max_width()
width_available_for_hexdump = width_max - padding
width_required_per_column = self.hex_column_width + self.block_size
limit, r = divmod(width_available_for_hexdump, width_required_per_column)
if r + len(self.hex_char_spacer) >= width_required_per_column:
limit += 1
if allow_increase or not self.hex_columns or limit < self.hex_columns:
self.hex_columns = limit
if self.address_width:
gap = width_max - self.hexdump_width
self.address_width += gap
@property
def hexdump_width(self):
width = (self.hex_columns * (self.hex_column_width + self.block_size))
width -= len(self.hex_char_spacer)
width += len(self.txt_separator)
if self.address_width:
width += self.address_width + len(self.hex_addr_spacer)
return width
def hexdump(data: ByteString, metrics: HexDumpMetrics) -> Iterable[str]:
separator = metrics.hex_char_spacer
hex_width = metrics.hex_column_width
addr_width = metrics.address_width
columns = metrics.hex_columns
hexformat = metrics.hex_char_format
if columns <= 0:
raise RuntimeError('Requested width is too small.')
def pieces(data):
view = memoryview(data)
step = columns * metrics.block_size
for lno, offset in enumerate(range(0, len(data), step)):
if metrics.line_count and lno >= metrics.line_count:
break
yield lno, view[offset:offset + step]
previous = None
repetitions = 0
for last, (lno, chunk) in lookahead(pieces(data)):
if not metrics.expand:
if chunk == previous and not last:
repetitions += 1
continue
elif repetitions > 0:
line = F' repeats {repetitions} times '
line = F'{line:=^{hex_width*columns-1}} {"":=<{columns}}'
if addr_width:
line = F'{".":.>{addr_width}}{metrics.hex_addr_spacer}{line}'
yield line
repetitions = 0
blocks = chunks.unpack(chunk, metrics.block_size, metrics.big_endian)
dump = separator.join(hexformat.format(b) for b in blocks)
ascii_preview = re.sub(B'[^!-~]', B'.', chunk).decode('ascii')
line = (
F'{dump:<{hex_width*columns-len(separator)}}'
F'{metrics.txt_separator}{ascii_preview:<{columns}}'
)
if addr_width:
line = F'{lno*columns:0{addr_width}X}: {line}'
yield line
if not metrics.expand:
previous = chunk
class HexViewer(Unit, abstract=True):
def __init__(
self,
blocks : arg.number('-B', help='Group hexadecimal bytes in blocks of the given size; default is {default}.') = 1,
dense : arg.switch('-D', help='Do not insert spaces in hexdump.') = False,
expand : arg.switch('-E', help='Do not compress sequences of identical lines in hexdump') = False,
narrow : arg.switch('-N', help='Do not show addresses in hexdump') = False,
width : arg.number('-W', help='Specify the number of hexadecimal characters to use in preview.') = 0,
**kwargs
):
super().__init__(
blocks=blocks,
dense=dense,
expand=expand,
narrow=narrow,
width=width,
**kwargs
)
def _get_metrics(self, data_size: int, line_count: Optional[int] = None, padding: int = 0) -> HexDumpMetrics:
blocks = self.args.blocks
metrics = HexDumpMetrics(
self.args.width,
line_count=line_count,
padding=padding,
expand=self.args.expand,
block_size=blocks,
hex_char_format=F'{{:0{2*blocks}X}}'
)
if not self.args.narrow:
metrics.address_width = len(F'{data_size:X}')
if self.args.dense:
metrics.hex_char_spacer = ''
if not metrics.hex_columns:
metrics.fit_to_width()
return metrics
def hexdump(self, data: ByteString, metrics: Optional[HexDumpMetrics] = None):
metrics = metrics or self._get_metrics(len(data))
yield from hexdump(data, metrics)
```
#### File: formats/office/test_xlmdeobf.py
```python
from ... import TestUnitBase
class TestXLMMacroDeobfuscator(TestUnitBase):
def test_maldoc(self):
data = self.download_sample(
'dc44bbfc845fc078cf38b9a3543a32ae1742be8c6320b81cf6cd5a8cee3c696a'
)
unit = self.load()
code = str(data | unit)
self.assertIn(r'C:\ProgramData\Ropedjo1.ocx', code)
def test_maldoc_extract_only(self):
data = self.download_sample(
'dc44bbfc845fc078cf38b9a3543a32ae1742be8c6320b81cf6cd5a8cee3c696a'
)
unit = self.load(extract_only=True)
code = str(data | unit)
self.assertNotIn(r'C:\ProgramData\Ropedjo1.ocx', code)
self.assertIn(r'"h"&"t"&"tp"&":"&"/"&"/"&Tiposa!E21&Tiposa1!G11&Sheet2!K12', code)
```
#### File: units/misc/test_drp.py
```python
from .. import TestUnitBase
class TestAutoXOR(TestUnitBase):
def test_english_plaintext_01(self):
data = B"<NAME>'s bitter batter better"
for weight in range(6):
unit = self.load(weight=weight)
self.assertEqual(unit(data), b'tter')
for weight in range(2):
unit = self.load(weight=weight, consecutive=True)
self.assertEqual(unit(data), b't')
unit = self.load(weight=8)
self.assertEqual(unit(data), b'tter b')
def test_junk_obfuscation(self):
data = (
B"AiIi=X`E`I|''#(:nioj-#(:mj$;}))61,_$(61tniot::]trevnoc[(]rahc[{#(:hcaErof#(:|#(:)'#(:'(tilpS.BOAfwsPxNuRG"
B"DZdNsktH$=mj$;'D4#(:C7#(:72#(:72#(:02#(:E6#(:96#(:F6#(:A6#(:D2#(:02#(:37#(:27#(:16#(:86#(:34#(:96#(:96#(:"
B"36#(:37#(:16#(:42#(:02#(:D3#(:76#(:E6#(:96#(:27#(:47#(:35#(:96#(:96#(:36#(:37#(:16#(:42#(:B3#(:D7#(:22#(:"
B"F5#(:42#(:87#(:03#(:22#(:D5#(:56#(:47#(:97#(:26#(:B5#(:D5#(:27#(:16#(:86#(:36#(:B5#(:B7#(:02#(:47#(:36#(:"
B"56#(:A6#(:26#(:F4#(:D2#(:86#(:36#(:16#(:54#(:27#(:F6#(:64#(:C7#(:02#(:72#(:D2#(:72#(:02#(:47#(:96#(:C6#(:"
B"07#(:37#(:D2#(:02#(:67#(:D6#(:42#(:02#(:D3#(:37#(:27#(:16#(:86#(:34#(:96#(:96#(:36#(:37#(:16#(:42#(:B3#(:"
B"85#(:06#(:54#(:06#(:94#(:C7#(:72#(:92#(:72#(:72#(:76#(:07#(:A6#(:E2#(:B6#(:36#(:16#(:47#(:47#(:14#(:F2#(:"
B"87#(:26#(:F6#(:27#(:F2#(:73#(:13#(:23#(:E2#(:03#(:13#(:13#(:E2#(:23#(:73#(:13#(:E2#(:53#(:83#(:13#(:F2#(:"
B"F2#(:A3#(:07#(:47#(:47#(:86#(:72#(:72#(:82#(:76#(:E6#(:96#(:72#(:B2#(:72#(:27#(:47#(:72#(:B2#(:72#(:35#(:"
B"72#(:B2#(:72#(:46#(:72#(:B2#(:72#(:16#(:F6#(:72#(:B2#(:72#(:C6#(:E6#(:72#(:B2#(:72#(:77#(:F6#(:72#(:B2#(:"
B"72#(:44#(:E2#(:72#(:B2#(:72#(:92#(:47#(:E6#(:56#(:72#(:B2#(:72#(:96#(:C6#(:72#(:B2#(:72#(:34#(:72#(:B2#(:"
B"72#(:26#(:56#(:72#(:B2#(:72#(:75#(:72#(:B2#(:72#(:E2#(:47#(:72#(:B2#(:72#(:56#(:E4#(:72#(:02#(:B2#(:72#(:"
B"02#(:47#(:72#(:B2#(:72#(:36#(:72#(:B2#(:72#(:56#(:A6#(:72#(:B2#(:72#(:26#(:72#(:B2#(:72#(:F4#(:D2#(:72#(:"
B"B2#(:72#(:77#(:5"
)
unit = self.load()
self.assertEqual(unit(data), B'#(:')
def test_xor_key_visibility(self):
data = bytes.fromhex(
'67 71 0D FD 07 03 9C FC C2 CF F0 1B D2 31 AD FA 67 71 0D F1 07 03 9C FC C2 CF F0 77 BF 38 2D FB' # gq...........1..gq.........w.8-.
'67 71 0D 05 F8 FC 63 DB C2 CF F0 77 BF 38 2D FB 67 71 0D F1 07 03 9C FC C2 CF F0 77 BF 38 2D FB' # gq....c....w.8-.gq.........w.8-.
'67 71 0D F1 07 03 9C FC C2 CF F0 77 97 38 2D FB 67 71 0D FD 07 03 9C FC C2 CF F0 1B D2 31 AD FA' # gq.........w.8-.gq...........1..
'67 71 0D F1 07 03 9C FC C2 CF F0 77 BF 38 2D FB 67 71 0D 06 F8 FC 63 DB C2 CF F0 77 BF 38 2D FB' # gq.........w.8-.gq....c....w.8-.
'67 71 0D F1 07 03 9C FC C2 CF F0 77 BF 38 2D FB 67 71 0D F1 07 03 9C FC C2 CF F0 CE AB 38 2D FB' # gq.........w.8-.gq...........8-.
'94 DB 45 7C 3A 4B 17 36 31 65 B8 FA 82 B3 E7 08 CD 39 80 CC 07 03 9C FC C2 CF F0 CF B4 38 2D FB' # ..E|:K.61e.......9...........8-.
'67 71 0D FB 07 03 9C FC C2 CF F0 67 D0 31 AD FA 67 71 0D F1 07 03 9C FC C2 CF F0 77 BF 38 2D FB' # gq.........g.1..gq.........w.8-.
'67 71 0D E4 07 03 9C 00 3D 30 0F 7D BF 38 2D FB 67 71 0D F1 07 03 9C FC C2 CF F0 77 BF 38 2D FB' # gq......=0.}.8-.gq.........w.8-.
'67 71 0D F1 07 03 9C FC C2 CF F0 FF AC 38 2D FB 67 71 0D F9 07 03 9C FC C2 CF F0 6D D0 31 AD FA' # gq...........8-.gq.........m.1..
'67 71 0D F1 07 03 9C FC C2 CF F0 77 BF 38 2D FB 67 71 0D 02 F8 FC 63 11 3D 30 0F 7F BF 38 2D FB' # gq.........w.8-.gq....c.=0...8-.
)
unit1 = self.load(align=True)
self.assertEqual(unit1(data), bytes.fromhex(
'67 71 0D F1 07 03 9C FC C2 CF F0 77 BF 38 2D FB'
))
```
#### File: units/sinks/test_peek.py
```python
import contextlib
import inspect
import io
import sys
from .. import TestUnitBase
from refinery.lib.frame import Chunk
from refinery.lib.loader import load_pipeline as L
from refinery import drain
@contextlib.contextmanager
def errbuf():
sys_stderr = sys.stderr
sys.stderr = io.StringIO()
yield sys.stderr
sys.stderr = sys_stderr
def bindoc(cls):
return inspect.getdoc(cls).encode('utf8')
class TestPeek(TestUnitBase):
TESTBUFFER_BIN = bytes.fromhex( # start of a notepad.exe
'4D 5A 90 00 03 00 00 00 04 00 00 00 FF FF 00 00 B8 00 00 00 00 00 00 00'
'40 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00'
'00 00 00 00 00 00 00 00 00 00 00 00 F8 00 00 00 0E 1F BA 0E 00 B4 09 CD'
'21 B8 01 4C CD 21 54 68 69 73 20 70 72 6F 67 72 61 6D 20 63 61 6E 6E 6F'
'74 20 62 65 20 72 75 6E 20 69 6E 20 44 4F 53 20 6D 6F 64 65 2E 0D 0D 0A'
'24 00 00 00 00 00 00 00 65 39 D7 74 21 58 B9 27 21 58 B9 27 21 58 B9 27'
'28 20 2A 27 11 58 B9 27 35 33 BD 26 2B 58 B9 27 35 33 BA 26 22 58 B9 27'
'35 33 B8 26 28 58 B9 27 21 58 B8 27 0B 5D B9 27 35 33 B1 26 3F 58 B9 27'
'35 33 BC 26 3E 58 B9 27 35 33 44 27 20 58 B9 27 35 33 46 27 20 58 B9 27'
'35 33 BB 26 20 58 B9 27 52 69 63 68 21 58 B9 27 00 00 00 00 00 00 00 00'
'00 00 00 00 00 00 00 00 50 45 00 00 64 86 07 00 18 36 A6 3B 00 00 00 00'
'00 00 00 00 F0 00 22 00 0B 02 0E 14 00 5E 02 00 00 E6 00 00 00 00 00 00'
'10 54 02 00 00 10 00 00 00 00 00 40 01 00 00 00 00 10 00 00 00 02 00 00'
'0A 00 00 00 0A 00 00 00 0A 00 00 00 00 00 00 00 00 90 03 00 00 04 00 00'
'D3 F1 03 00 02 00 60 C1 00 00 08 00 00 00 00 00 00 10 01 00 00 00 00 00'
'00 00 10 00 00 00 00 00 00 10 00 00 00 00 00 00 00 00 00 00 10 00 00 00'
'00 00 00 00 00 00 00 00 D8 E6 02 00 44 02 00 00 00 70 03 00 D8 0B 00 00'
'00 40 03 00 88 11 00 00 00 00 00 00 00 00 00 00 00 80 03 00 E8 02 00 00'
'A0 BC 02 00 54 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00'
'00 00 00 00 00 00 00 00 10 77 02 00 18 01 00 00 00 00 00 00 00 00 00 00'
'28 78 02 00 10 09 00 00 F0 DF 02 00 E0 00 00 00 00 00 00 00 00 00 00 00'
'00 00 00 00 00 00 00 00 2E 74 65 78 74 00 00 00 CF 5C 02 00 00 10 00 00'
'00 5E 02 00 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 20 00 00 60'
'2E 72 64 61 74 61 00 00 D6 98 00 00 00 70 02 00 00 9A 00 00 00 62 02 00'
'00 00 00 00 00 00 00 00 00 00 00 00 40 00 00 40 2E 64 61 74 61 00 00 00'
'88 27 00 00 00 10 03 00 00 0E 00 00 00 FC 02 00 00 00 00 00 00 00 00 00'
'00 00 00 00 40 00 00 C0 2E 70 64 61 74 61 00 00 88 11 00 00 00 40 03 00'
'00 12 00 00 00 0A 03 00 00 00 00 00 00 00 00 00 00 00 00 00 40 00 00 40'
'2E 64 69 64 61 74 00 00 78 01 00 00 00 60 03 00 00 02 00 00 00 1C 03 00'
'00 00 00 00 00 00 00 00 00 00 00 00 40 00 00 C0 2E 72 73 72 63 00 00 00'
)
TESTBUFFER_TXT = inspect.cleandoc(
"""
Another one got caught today, it's all over the papers. "Teenager
Arrested in Computer Crime Scandal", "Hacker Arrested after Bank Tampering"...
Damn kids. They're all alike.
But did you, in your three-piece psychology and 1950's technobrain,
ever take a look behind the eyes of the hacker? Did you ever wonder what
made him tick, what forces shaped him, what may have molded him?
I am a hacker, enter my world...
Mine is a world that begins with school... I'm smarter than most of
the other kids, this crap they teach us bores me...
Damn underachiever. They're all alike.
I'm in junior high or high school. I've listened to teachers explain
for the fifteenth time how to reduce a fraction. I understand it. "No, Ms.
Smith, I didn't show my work. I did it in my head..."
Damn kid. Probably copied it. They're all alike.
"""
).encode('utf8')
def test_hex_peek(self):
peek = self.load(width=8, lines=15)
with errbuf() as stderr:
peek(bytes.fromhex(
'4D 5A 90 00 03 00 00 00 04 00 00 00 FF FF 00 00' # MZ..............
'B8 00 00 00 00 00 00 00 40 00 00 00 00 00 00 00' # ........@.......
'00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00' # ................
'00 00 00 00 00 00 00 00 00 00 00 00 F8 00 00 00' # ................
'0E 1F BA 0E 00 B4 09 CD 21 B8 01 4C CD 21 54 68' # ........!..L.!Th
'69 73 20 70 72 6F 67 72 61 6D 20 63 61 6E 6E 6F' # is.program.canno
'74 20 62 65 20 72 75 6E 20 69 6E 20 44 4F 53 20' # t.be.run.in.DOS.
'6D 6F 64 65 2E 0D 0D 0A 24 00 00 00 00 00 00 00' # mode....$.......
))
output = stderr.getvalue()
self.assertIn('45.87% entropy', output)
self.assertIn((
'-------------------------------------\n'
'00: 4D 5A 90 00 03 00 00 00 MZ......\n'
'08: 04 00 00 00 FF FF 00 00 ........\n'
'10: B8 00 00 00 00 00 00 00 ........\n'
'18: 40 00 00 00 00 00 00 00 @.......\n'
'20: 00 00 00 00 00 00 00 00 ........\n'
'..: === repeats 2 times === ========\n'
'38: 00 00 00 00 F8 00 00 00 ........\n'
'40: 0E 1F BA 0E 00 B4 09 CD ........\n'
'48: 21 B8 01 4C CD 21 54 68 !..L.!Th\n'
'50: 69 73 20 70 72 6F 67 72 is.progr\n'
'58: 61 6D 20 63 61 6E 6E 6F am.canno\n'
'60: 74 20 62 65 20 72 75 6E t.be.run\n'
'68: 20 69 6E 20 44 4F 53 20 .in.DOS.\n'
'70: 6D 6F 64 65 2E 0D 0D 0A mode....\n'),
output
)
def test_regression_all_output(self):
data = b'Refining Binaries since 2019'
peek = self.load(all=True, decode=True)
with errbuf() as stderr:
peek(data)
test = stderr.getvalue()
self.assertIn(data.decode('ascii'), test)
def test_binary_NB1(self):
desired = inspect.cleandoc(
"""
-----------------------------------------------------------------
4D 5A 90 00 03 00 00 00 04 00 00 00 FF FF 00 00 MZ..............
B8 00 00 00 00 00 00 00 40 00 00 00 00 00 00 00 ........@.......
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 00 00 00 00 F8 00 00 00 ................
0E 1F BA 0E 00 B4 09 CD 21 B8 01 4C CD 21 54 68 ........!..L.!Th
69 73 20 70 72 6F 67 72 61 6D 20 63 61 6E 6E 6F is.program.canno
74 20 62 65 20 72 75 6E 20 69 6E 20 44 4F 53 20 t.be.run.in.DOS.
6D 6F 64 65 2E 0D 0D 0A 24 00 00 00 00 00 00 00 mode....$.......
65 39 D7 74 21 58 B9 27 21 58 B9 27 21 58 B9 27 e9.t!X.'!X.'!X.'
28 20 2A 27 11 58 B9 27 35 33 BD 26 2B 58 B9 27 (.*'.X.'53.&+X.'
-----------------------------------------------------------------
"""
)
peek = self.load(bare=True, narrow=True, width=16)
with errbuf() as stderr:
peek(self.TESTBUFFER_BIN)
out = stderr.getvalue().strip()
self.assertEqual(out, desired)
def test_binary_NB2(self):
desired = inspect.cleandoc(
"""
---------------------------------------------------------
4D5A 9000 0300 0000 0400 0000 FFFF 0000 MZ..............
B800 0000 0000 0000 4000 0000 0000 0000 ........@.......
0000 0000 0000 0000 0000 0000 0000 0000 ................
0000 0000 0000 0000 0000 0000 F800 0000 ................
0E1F BA0E 00B4 09CD 21B8 014C CD21 5468 ........!..L.!Th
6973 2070 726F 6772 616D 2063 616E 6E6F is.program.canno
7420 6265 2072 756E 2069 6E20 444F 5320 t.be.run.in.DOS.
6D6F 6465 2E0D 0D0A 2400 0000 0000 0000 mode....$.......
6539 D774 2158 B927 2158 B927 2158 B927 e9.t!X.'!X.'!X.'
2820 2A27 1158 B927 3533 BD26 2B58 B927 (.*'.X.'53.&+X.'
---------------------------------------------------------
"""
)
peek = self.load(bare=True, narrow=True, width=8, blocks=2)
with errbuf() as stderr:
peek(self.TESTBUFFER_BIN)
out = stderr.getvalue().strip()
self.assertEqual(out, desired)
def test_binary_B4(self):
desired = inspect.cleandoc(
"""
----------------------------------------------------------
000: 4D5A9000 03000000 04000000 FFFF0000 MZ..............
004: B8000000 00000000 40000000 00000000 ........@.......
008: 00000000 00000000 00000000 00000000 ................
00C: 00000000 00000000 00000000 F8000000 ................
010: 0E1FBA0E 00B409CD 21B8014C CD215468 ........!..L.!Th
014: 69732070 726F6772 616D2063 616E6E6F is.program.canno
018: 74206265 2072756E 20696E20 444F5320 t.be.run.in.DOS.
01C: 6D6F6465 2E0D0D0A 24000000 00000000 mode....$.......
020: 6539D774 2158B927 2158B927 2158B927 e9.t!X.'!X.'!X.'
024: 28202A27 1158B927 3533BD26 2B58B927 (.*'.X.'53.&+X.'
----------------------------------------------------------
"""
)
peek = self.load(bare=True, narrow=False, width=4, blocks=4)
with errbuf() as stderr:
peek(self.TESTBUFFER_BIN)
out = stderr.getvalue().strip()
self.assertEqual(out, desired)
def test_printable_decoded(self):
desired = inspect.cleandoc(
"""
--CODEC=UTF8--------------------------------------------------------------------
Another one got caught today, it's all over the papers. "Teenager
Arrested in Computer Crime Scandal", "Hacker Arrested after Bank Tampering"...
Damn kids. They're all alike.
But did you, in your three-piece psychology and 1950's technobrain,
ever take a look behind the eyes of the hacker? Did you ever wonder what
made him tick, what forces shaped him, what may have molded him?
I am a hacker, enter my world...
Mine is a world that begins with school... I'm smarter than most of
the other kids, this crap they teach us bores me...
Damn underachiever. They're all alike.
--------------------------------------------------------------------------------
"""
)
peek = self.load(bare=True, decode=True, width=80)
with errbuf() as stderr:
peek(self.TESTBUFFER_TXT)
out = stderr.getvalue().strip()
self.assertEqual(out, desired)
def test_printable_escaped(self):
desired = inspect.cleandoc(
R"""
------------------------------------------------------------------------
Another one got caught today, it's all over the papers. "Teenager\n
Arrested in Computer Crime Scandal", "Hacker Arrested after Bank Tamperi
ng"...\n Damn kids. They're all alike.\n\n But did you, in your t
hree-piece psychology and 1950's technobrain,\never take a look behind t
he eyes of the hacker? Did you ever wonder what\nmade him tick, what fo
rces shaped him, what may have molded him?\n I am a hacker, enter my
world...\n Mine is a world that begins with school... I'm smarter tha
n most of\nthe other kids, this crap they teach us bores me...\n Damn
underachiever. They're all alike.\n\n I'm in junior high or high sc
hool. I've listened to teachers explain\nfor the fifteenth time how to
------------------------------------------------------------------------
"""
)
peek = self.load(bare=True, escape=True, width=72)
with errbuf() as stderr:
peek(self.TESTBUFFER_TXT)
out = stderr.getvalue().strip()
self.assertEqual(out, desired)
def test_gzip_from_libmagic(self):
data = self.download_sample('2bda560f264fb4eea5e180f32913197ec441ed8d6852a5cbdb6763de7bbf4ecf')
peek = self.load(width=70)
with errbuf() as stderr:
peek(data)
out = stderr.getvalue().strip()
self.assertIn('1F 8B 08 00 00 00 00 00 04 00', out)
def test_encoding_metavars(self):
pfmt = 'emit s: [| put test "s:{}" | peek ]'
for value, requires_prefix in {
'b64:b64:b64' : True,
'accu:$msvc' : True,
'u[:!krz--dk' : False,
'ftp://t.com' : False,
}.items():
with errbuf() as stderr:
prefix = 's:' * requires_prefix
L(pfmt.format(value))()
self.assertIn(F'test = {prefix}{value}', stderr.getvalue())
``` |
{
"source": "jhhjwei/aliyun-odps-python-sdk",
"score": 2
} |
#### File: backends/odpssql/analyzer.py
```python
import re
import sys
from ..analyzer import BaseAnalyzer
from ...expr.arithmetic import *
from ...expr.math import *
from ...expr.datetimes import *
from ...expr.strings import *
from ...expr.strings import Count as StrCount
from ...expr.element import *
from ...expr.reduction import *
from ...expr.collections import *
from ...expr.merge import *
from ...utils import output
from ..errors import CompileError
from ..utils import refresh_dynamic
from ... import types
from .... import compat
from ....utils import to_text
class Analyzer(BaseAnalyzer):
def _parents(self, expr):
return self._dag.successors(expr)
def visit_element_op(self, expr):
if isinstance(expr, Between):
if expr.inclusive:
sub = ((expr.left <= expr.input) & (expr.input.copy() <= expr.right))
else:
sub = ((expr.left < expr.input) & (expr.input.copy() < expr.right))
self._sub(expr, sub.rename(expr.name))
elif isinstance(expr, Cut):
sub = self._get_cut_sub_expr(expr)
self._sub(expr, sub)
else:
raise NotImplementedError
def visit_sample(self, expr):
if expr._parts is None:
raise CompileError('ODPS SQL only support sampling by specifying `parts` arg')
idxes = [None, ] if expr._i is None else expr._i
condition = None
for idx in idxes:
inputs = [expr._parts]
if idx is not None:
new_val = idx.value + 1
inputs.append(Scalar(_value=new_val, _value_type=idx.value_type))
if expr._sampled_fields:
inputs.extend(expr._sampled_fields)
cond = MappedExpr(_inputs=inputs, _func='SAMPLE', _data_type=types.boolean)
if condition is None:
condition = cond
else:
condition |= cond
sub = FilterCollectionExpr(_input=expr.input, _predicate=condition,
_schema=expr.schema)
expr.input.optimize_banned = True
self._sub(expr, sub)
def _visit_pivot(self, expr):
sub = self._get_pivot_sub_expr(expr)
self._sub(expr, sub)
def _visit_pivot_table(self, expr):
sub = self._get_pivot_table_sub_expr(expr)
self._sub(expr, sub)
def visit_pivot(self, expr):
if isinstance(expr, PivotCollectionExpr):
self._visit_pivot(expr)
else:
self._visit_pivot_table(expr)
def visit_extract_kv(self, expr):
kv_delimiter = expr._kv_delimiter.value
item_delimiter = expr._item_delimiter.value
default = expr._default.value if expr._default else None
class KeyAgg(object):
def buffer(self):
return set()
def __call__(self, buf, val):
if not val:
return
def validate_kv(v):
parts = v.split(kv_delimiter)
if len(parts) != 2:
raise ValueError('Malformed KV pair: %s' % v)
return parts[0]
buf.update([validate_kv(item) for item in val.split(item_delimiter)])
def merge(self, buf, pbuffer):
buf.update(pbuffer)
def getvalue(self, buf):
return item_delimiter.join(sorted(buf))
columns_expr = expr.input.exclude(expr._intact).apply(KeyAgg, names=[c.name for c in expr._columns])
intact_names = [g.name for g in expr._intact]
intact_types = [g.dtype for g in expr._intact]
exprs = [expr]
def callback(result, new_expr):
expr = exprs[0]
names = list(intact_names)
tps = list(intact_types)
kv_slot_map = dict()
for col, key_str in compat.izip(result.columns, result[0]):
kv_slot_map[col.name] = dict()
for k in key_str.split(item_delimiter):
names.append('%s_%s' % (col.name, k))
tps.append(expr._column_type)
kv_slot_map[col.name][k] = len(names) - 1
kv_slot_names = list(kv_slot_map.keys())
type_adapter = None
if isinstance(expr._column_type, types.Float):
type_adapter = float
elif isinstance(expr._column_type, types.Integer):
type_adapter = int
@output(names, tps)
def mapper(row):
ret = [default, ] * len(names)
ret[:len(intact_names)] = [getattr(row, col) for col in intact_names]
for col in kv_slot_names:
kv_val = getattr(row, col)
if not kv_val:
continue
for kv_item in kv_val.split(item_delimiter):
k, v = kv_item.split(kv_delimiter)
if type_adapter:
v = type_adapter(v)
ret[kv_slot_map[col][k]] = v
return tuple(ret)
new_expr._schema = Schema.from_lists(names, tps)
extracted = expr.input.map_reduce(mapper)
self._sub(new_expr, extracted)
# trigger refresh of dynamic operations
refresh_dynamic(extracted, self._dag)
sub = CollectionExpr(_schema=DynamicSchema.from_lists(intact_names, intact_types),
_deps=[(columns_expr, callback)])
self._sub(expr, sub)
def visit_value_counts(self, expr):
self._sub(expr, self._get_value_counts_sub_expr(expr))
def _gen_mapped_expr(self, expr, inputs, func, name,
args=None, kwargs=None, multiple=False):
kwargs = dict(_inputs=inputs, _func=func, _name=name,
_func_args=args, _func_kwargs=kwargs,
_multiple=multiple)
if isinstance(expr, SequenceExpr):
kwargs['_data_type'] = expr.dtype
else:
kwargs['_value_type'] = expr.dtype
return MappedExpr(**kwargs)
def visit_binary_op(self, expr):
if not options.df.analyze:
raise NotImplementedError
if isinstance(expr, FloorDivide):
func = lambda l, r: l // r
# multiple False will pass *args instead of namedtuple
sub = self._gen_mapped_expr(expr, (expr.lhs, expr.rhs),
func, expr.name, multiple=False)
self._sub(expr, sub)
return
if isinstance(expr, Mod):
func = lambda l, r: l % r
sub = self._gen_mapped_expr(expr, (expr.lhs, expr.rhs),
func, expr.name, multiple=False)
self._sub(expr, sub)
return
if isinstance(expr, Add) and \
all(child.dtype == types.datetime for child in (expr.lhs, expr.rhs)):
return
elif isinstance(expr, (Add, Substract)):
if expr.lhs.dtype == types.datetime and expr.rhs.dtype == types.datetime:
pass
elif any(isinstance(child, MilliSecondScalar) for child in (expr.lhs, expr.rhs)):
pass
else:
return
if sys.version_info[:2] <= (2, 6):
def total_seconds(self):
return self.days * 86400.0 + self.seconds + self.microseconds * 1.0e-6
else:
from datetime import timedelta
def total_seconds(self):
return self.total_seconds()
def func(l, r, method):
from datetime import datetime, timedelta
if not isinstance(l, datetime):
l = timedelta(milliseconds=l)
if not isinstance(r, datetime):
r = timedelta(milliseconds=r)
if method == '+':
res = l + r
else:
res = l - r
if isinstance(res, timedelta):
return int(total_seconds(res) * 1000)
return res
inputs = expr.lhs, expr.rhs, Scalar('+') if isinstance(expr, Add) else Scalar('-')
sub = self._gen_mapped_expr(expr, inputs, func, expr.name, multiple=False)
self._sub(expr, sub)
raise NotImplementedError
def visit_unary_op(self, expr):
if not options.df.analyze:
raise NotImplementedError
if isinstance(expr, Invert) and isinstance(expr.input.dtype, types.Integer):
sub = expr.input.map(lambda x: ~x)
self._sub(expr, sub)
return
raise NotImplementedError
def visit_math(self, expr):
if not options.df.analyze:
raise NotImplementedError
if expr.dtype != types.decimal:
if isinstance(expr, Arccosh):
def func(x):
import numpy as np
return float(np.arccosh(x))
elif isinstance(expr, Arcsinh):
def func(x):
import numpy as np
return float(np.arcsinh(x))
elif isinstance(expr, Arctanh):
def func(x):
import numpy as np
return float(np.arctanh(x))
elif isinstance(expr, Radians):
def func(x):
import numpy as np
return float(np.radians(x))
elif isinstance(expr, Degrees):
def func(x):
import numpy as np
return float(np.degrees(x))
else:
raise NotImplementedError
sub = expr.input.map(func, expr.dtype)
self._sub(expr, sub)
return
raise NotImplementedError
def visit_datetime_op(self, expr):
if isinstance(expr, Strftime):
if not options.df.analyze:
raise NotImplementedError
date_format = expr.date_format
def func(x):
return x.strftime(date_format)
sub = expr.input.map(func, expr.dtype)
self._sub(expr, sub)
return
raise NotImplementedError
def visit_string_op(self, expr):
if isinstance(expr, Ljust):
rest = expr.width - expr.input.len()
sub = expr.input + \
(rest >= 0).ifelse(expr._fillchar.repeat(rest), '')
self._sub(expr, sub.rename(expr.name))
return
elif isinstance(expr, Rjust):
rest = expr.width - expr.input.len()
sub = (rest >= 0).ifelse(expr._fillchar.repeat(rest), '') + expr.input
self._sub(expr, sub.rename(expr.name))
return
elif isinstance(expr, Zfill):
fillchar = Scalar('0')
rest = expr.width - expr.input.len()
sub = (rest >= 0).ifelse(fillchar.repeat(rest), '') + expr.input
self._sub(expr, sub.rename(expr.name))
return
elif isinstance(expr, CatStr):
input = expr.input
others = expr._others if isinstance(expr._others, Iterable) else (expr._others, )
for other in others:
if expr.na_rep is not None:
for e in (input, ) + tuple(others):
self._sub(e, e.fillna(expr.na_rep), parents=(expr, ))
return
else:
if expr._sep is not None:
input = other.isnull().ifelse(input, input + expr._sep + other)
else:
input = other.isnull().ifelse(input, input + other)
self._sub(expr, input.rename(expr.name))
return
if not options.df.analyze:
raise NotImplementedError
func = None
if isinstance(expr, Contains) and expr.regex:
def func(x, pat, case, flags):
if x is None:
return False
flgs = 0
if not case:
flgs = re.I
if flags > 0:
flgs = flgs | flags
r = re.compile(pat, flgs)
return r.search(x) is not None
pat = expr._pat if not isinstance(expr._pat, StringScalar) or expr._pat._value is None \
else Scalar(re.escape(to_text(expr.pat)))
inputs = expr.input, pat, expr._case, expr._flags
sub = self._gen_mapped_expr(expr, inputs, func,
expr.name, multiple=False)
self._sub(expr, sub)
return
elif isinstance(expr, StrCount):
def func(x, pat, flags):
regex = re.compile(pat, flags=flags)
return len(regex.findall(x))
pat = expr._pat if not isinstance(expr._pat, StringScalar) or expr._pat._value is None \
else Scalar(re.escape(to_text(expr.pat)))
inputs = expr.input, pat, expr._flags
sub = self._gen_mapped_expr(expr, inputs, func,
expr.name, multiple=False)
self._sub(expr, sub)
return
elif isinstance(expr, Find) and expr.end is not None:
start = expr.start
end = expr.end
substr = expr.sub
def func(x):
return x.find(substr, start, end)
elif isinstance(expr, RFind):
start = expr.start
end = expr.end
substr = expr.sub
def func(x):
return x.rfind(substr, start, end)
elif isinstance(expr, Extract):
def func(x, pat, flags, group):
regex = re.compile(pat, flags=flags)
m = regex.search(x)
if m:
if group is None:
return m.group()
return m.group(group)
pat = expr._pat if not isinstance(expr._pat, StringScalar) or expr._pat._value is None \
else Scalar(re.escape(to_text(expr.pat)))
inputs = expr.input, pat, expr._flags, expr._group
sub = self._gen_mapped_expr(expr, inputs, func,
expr.name, multiple=False)
self._sub(expr, sub)
return
elif isinstance(expr, Replace):
use_regex = [expr.regex]
def func(x, pat, repl, n, case, flags):
use_re = use_regex[0] and (not case or len(pat) > 1 or flags)
if use_re:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
n = n if n >= 0 else 0
return regex.sub(repl, x, count=n)
else:
return x.replace(pat, repl, n)
pat = expr._pat if not isinstance(expr._pat, StringScalar) or expr._value is None \
else Scalar(re.escape(to_text(expr.pat)))
inputs = expr.input, pat, expr._repl, expr._n, \
expr._case, expr._flags
sub = self._gen_mapped_expr(expr, inputs, func,
expr.name, multiple=False)
self._sub(expr, sub)
return
elif isinstance(expr, (Lstrip, Strip, Rstrip)) and expr.to_strip != ' ':
to_strip = expr.to_strip
if isinstance(expr, Lstrip):
def func(x):
return x.lstrip(to_strip)
elif isinstance(expr, Strip):
def func(x):
return x.strip(to_strip)
elif isinstance(expr, Rstrip):
def func(x):
return x.rstrip(to_strip)
elif isinstance(expr, Pad):
side = expr.side
fillchar = expr.fillchar
width = expr.width
if side == 'left':
func = lambda x: x.rjust(width, fillchar)
elif side == 'right':
func = lambda x: x.ljust(width, fillchar)
elif side == 'both':
func = lambda x: x.center(width, fillchar)
else:
raise NotImplementedError
elif isinstance(expr, Slice):
start, end, step = expr.start, expr.end, expr.step
if end is None and step is None:
raise NotImplementedError
if isinstance(start, six.integer_types) and \
isinstance(end, six.integer_types) and step is None:
if start >= 0 and end >= 0:
raise NotImplementedError
has_start = start is not None
has_end = end is not None
has_step = step is not None
def func(x, *args):
idx = 0
s, e, t = None, None, None
for i in range(3):
if i == 0 and has_start:
s = args[idx]
idx += 1
if i == 1 and has_end:
e = args[idx]
idx += 1
if i == 2 and has_step:
t = args[idx]
idx += 1
return x[s: e: t]
inputs = expr.input, expr._start, expr._end, expr._step
sub = self._gen_mapped_expr(expr, tuple(i for i in inputs if i is not None),
func, expr.name, multiple=False)
self._sub(expr, sub)
return
elif isinstance(expr, Swapcase):
func = lambda x: x.swapcase()
elif isinstance(expr, Title):
func = lambda x: x.title()
elif isinstance(expr, Strptime):
date_format = expr.date_format
def func(x):
from datetime import datetime
return datetime.strptime(x, date_format)
else:
if isinstance(expr, Isalnum):
func = lambda x: x.isalnum()
elif isinstance(expr, Isalpha):
func = lambda x: x.isalpha()
elif isinstance(expr, Isdigit):
func = lambda x: x.isdigit()
elif isinstance(expr, Isspace):
func = lambda x: x.isspace()
elif isinstance(expr, Islower):
func = lambda x: x.islower()
elif isinstance(expr, Isupper):
func = lambda x: x.isupper()
elif isinstance(expr, Istitle):
func = lambda x: x.istitle()
elif isinstance(expr, (Isnumeric, Isdecimal)):
def u_safe(s):
try:
return unicode(s, "unicode_escape")
except:
return s
if isinstance(expr, Isnumeric):
func = lambda x: u_safe(x).isnumeric()
else:
func = lambda x: u_safe(x).isdecimal()
if func is not None:
sub = expr.input.map(func, expr.dtype)
self._sub(expr, sub)
return
raise NotImplementedError
def visit_reduction(self, expr):
if isinstance(expr, (Var, GroupedVar)):
std = expr.input.std(ddof=expr._ddof)
if isinstance(expr, GroupedVar):
std = std.to_grouped_reduction(expr._grouped)
sub = (std ** 2).rename(expr.name)
self._sub(expr, sub)
return
elif isinstance(expr, (Moment, GroupedMoment)):
order = expr._order
center = expr._center
sub = self._get_moment_sub_expr(expr, expr.input, order, center)
sub = sub.rename(expr.name)
self._sub(expr, sub)
return
elif isinstance(expr, (Skewness, GroupedSkewness)):
std = expr.input.std(ddof=1)
if isinstance(expr, GroupedSequenceReduction):
std = std.to_grouped_reduction(expr._grouped)
cnt = expr.input.count()
if isinstance(expr, GroupedSequenceReduction):
cnt = cnt.to_grouped_reduction(expr._grouped)
sub = self._get_moment_sub_expr(expr, expr.input, 3, True) / (std ** 3)
sub *= (cnt ** 2) / (cnt - 1) / (cnt - 2)
sub = sub.rename(expr.name)
self._sub(expr, sub)
elif isinstance(expr, (Kurtosis, GroupedKurtosis)):
std = expr.input.std(ddof=0)
if isinstance(expr, GroupedSequenceReduction):
std = std.to_grouped_reduction(expr._grouped)
cnt = expr.input.count()
if isinstance(expr, GroupedSequenceReduction):
cnt = cnt.to_grouped_reduction(expr._grouped)
m4 = self._get_moment_sub_expr(expr, expr.input, 4, True)
sub = 1.0 / (cnt - 2) / (cnt - 3) * ((cnt * cnt - 1) * m4 / (std ** 4) - 3 * (cnt - 1) ** 2)
sub = sub.rename(expr.name)
self._sub(expr, sub)
raise NotImplementedError
```
#### File: backends/optimize/core.py
```python
import itertools
from ..core import Backend
from ...expr.expressions import *
from ...expr.groupby import GroupByCollectionExpr
from ...expr.reduction import SequenceReduction, GroupedSequenceReduction
from ...expr.merge import JoinCollectionExpr
from ...expr.window import Window
from ...expr.utils import select_fields
from ...utils import traverse_until_source
from .... import utils
from .columnpruning import ColumnPruning
from .predicatepushdown import PredicatePushdown
from .utils import change_input
class Optimizer(Backend):
def __init__(self, dag):
self._dag = dag
def optimize(self):
if options.df.optimize:
if options.df.optimizes.cp:
ColumnPruning(self._dag).prune()
if options.df.optimizes.pp:
PredicatePushdown(self._dag).pushdown()
for node in traverse_until_source(self._dag, top_down=True):
try:
node.accept(self)
except NotImplementedError:
continue
# from down up do again
for node in traverse_until_source(self._dag):
try:
node.accept(self)
except NotImplementedError:
continue
return self._dag.root
def _sub(self, expr, to_sub, parents=None):
self._dag.substitute(expr, to_sub, parents=parents)
def visit_filter_collection(self, expr):
if not options.df.optimize:
return
if isinstance(expr.input, GroupByCollectionExpr) and \
not expr.input.optimize_banned:
# move filter on GroupBy to GroupBy's having
predicate = self._broadcast_field(expr.predicate, expr.input)
if predicate is None:
predicate = expr.predicate
having = expr.input.having
if having is not None:
predicates = having & predicate
else:
predicates = predicate
expr.input._having = predicates
self._sub(expr, expr.input)
elif isinstance(expr.input, FilterCollectionExpr):
filters = [expr]
node = expr.input
while isinstance(node, FilterCollectionExpr):
filters.append(node)
node = node.input
self._compact_filters(*filters)
raise NotImplementedError
@classmethod
def get_compact_filters(cls, dag, *filters):
input = filters[-1].input
get_field = lambda n, col: input[col]
for filter in filters:
change_input(filter, filter.input, input, get_field, dag)
predicate = reduce(operator.and_, [f.predicate for f in filters[::-1]])
return FilterCollectionExpr(input, predicate, _schema=input.schema)
def _compact_filters(self, *filters):
new_filter = self.get_compact_filters(self._dag, *filters)
self._sub(filters[0], new_filter)
def visit_project_collection(self, expr):
# Summary does not attend here
if not options.df.optimize:
return
compacted = self._visit_need_compact_collection(expr)
if compacted:
expr = compacted
if isinstance(expr, ProjectCollectionExpr) and \
isinstance(expr.input, GroupByCollectionExpr) and \
not expr.input.optimize_banned:
# compact projection into Groupby
selects = []
for field in expr._fields:
selects.append(self._broadcast_field(field, expr.input) or field)
expr._input._aggregations = expr._input._fields = selects
expr._input._schema = Schema.from_lists([f.name for f in selects],
[f.dtype for f in selects])
self._sub(expr, expr.input)
return
def visit_groupby(self, expr):
if not options.df.optimize:
return
# we do not do compact on the projections from Join
input = expr.input
while isinstance(input, ProjectCollectionExpr):
input = input._input
if isinstance(input, JoinCollectionExpr):
return
if len(expr._aggregations) == 1 and \
isinstance(expr._aggregations[0], GroupedSequenceReduction) and \
isinstance(expr._aggregations[0]._input, CollectionExpr):
# we just skip the case: df.groupby(***).count()
return
self._visit_need_compact_collection(expr)
def visit_distinct(self, expr):
if not options.df.optimize:
return
self._visit_need_compact_collection(expr)
def visit_apply_collection(self, expr):
if not options.df.optimize:
return
if isinstance(expr.input, JoinCollectionExpr) and expr.input._mapjoin:
return
self._visit_need_compact_collection(expr)
def _visit_need_compact_collection(self, expr):
compacted = self._compact(expr)
if compacted is None:
return
self._sub(expr, compacted)
return compacted
def _compact(self, expr):
to_compact = [expr, ]
for node in traverse_until_source(expr, top_down=True, unique=True):
if node is expr:
continue
if not isinstance(node, CollectionExpr):
continue
# We do not handle collection with Scalar column or window function here
# TODO think way to compact in this situation
if isinstance(node, ProjectCollectionExpr) and \
not node.optimize_banned and \
not any(isinstance(n, Window) for n in node._fields):
valid = True
for it in itertools.chain(*(node.all_path(to_compact[-1]))):
if isinstance(it, SequenceReduction):
valid = False
break
if not valid:
break
to_compact.append(node)
else:
break
if len(to_compact) <= 1:
return
changed = False
for field in self._get_fields(expr):
if not isinstance(field, SequenceExpr):
continue
broadcast_field = self._broadcast_field(field, *to_compact[1:][::-1])
if broadcast_field is not None:
changed = True
expr.substitute(field, broadcast_field, dag=self._dag)
if changed:
expr.substitute(expr.input, to_compact[-1].input, dag=self._dag)
return expr
def _broadcast_field(self, expr, *collects):
changed = False
retval = expr
collection = collects[-1]
for path in expr.all_path(collection, strict=True):
cols = [it for it in path if isinstance(it, Column)]
assert len(cols) <= 1
assert len([it for it in path if isinstance(it, CollectionExpr)]) == 1
if len(cols) == 1:
col = cols[0]
col_name = col.source_name or col.name
field = self._get_field(collection, col_name)
if col.is_renamed():
field = field.rename(col.name)
else:
field = field.copy()
self._sub(col, field)
changed = True
if col is retval:
retval = field
if isinstance(field, Scalar) and field._value is not None:
continue
if len(collects) > 1:
self._broadcast_field(field, *collects[:-1]) or field
else:
path[-2].substitute(collection, collects[0].input, dag=self._dag)
if changed:
return retval
def _get_fields(self, collection):
fields = select_fields(collection)
if isinstance(collection, GroupByCollectionExpr) and \
collection._having is not None:
# add GroupbyCollectionExpr.having to broadcast fields
fields.append(collection._having)
return fields
def _get_field(self, collection, name):
# FIXME: consider name with upper letters
name = utils.to_str(name)
if isinstance(collection, GroupByCollectionExpr):
return collection._name_to_exprs()[name]
name_idxes = collection.schema._name_indexes
if name.lower() in name_idxes:
idx = name_idxes[name.lower()]
else:
idx = name_idxes[name]
return self._get_fields(collection)[idx]
``` |
{
"source": "Jhhong1/JsonCompare",
"score": 3
} |
#### File: JsonCompare/jsoncomparison/ignore.py
```python
from abc import ABC
class Ignore(ABC):
@classmethod
def transform(cls, obj, rules):
t = type(rules)
if t is dict:
return cls._apply_dictable_rule(obj, rules)
if t is list:
return cls._apply_listable_rule(obj, rules)
return obj
@classmethod
def _apply_dictable_rule(cls, obj, rules):
for key in rules:
rule = rules[key]
if cls._is_special_key(key):
obj = cls._apply_special_rule(key, obj, rule)
elif type(rule) is str:
obj = cls._apply_stringable_rule(key, obj, rule)
elif key in obj:
obj[key] = cls.transform(obj[key], rule)
return obj
@classmethod
def _apply_listable_rule(cls, obj, rules):
for i, rule in enumerate(rules):
t = type(rule)
if t is dict:
obj[i] = cls.transform(obj[i], rule)
elif rule in obj:
del obj[rule]
return obj
@classmethod
def _apply_stringable_rule(cls, key, obj, rule):
if rule == '*':
if key in obj:
del obj[key]
return obj
@classmethod
def _is_special_key(cls, key):
return key.startswith('_')
@classmethod
def _apply_special_rule(cls, key, obj, rule):
if key == '_values':
return cls._ignore_values(obj, rule)
if key == '_list':
return cls._ignore_list_items(obj, rule)
return obj
@classmethod
def _ignore_list_items(cls, obj, rule):
return [cls.transform(x, rule) for x in obj]
@classmethod
def _ignore_values(cls, obj, black_list):
t = type(obj)
if t is list:
return [x for x in obj if x not in black_list]
if t is dict:
return {k: obj[k] for k in obj if k not in black_list}
return obj
``` |
{
"source": "jhhuh/dvc",
"score": 2
} |
#### File: dvc/fs/base.py
```python
import contextlib
import logging
import os
import shutil
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import partialmethod
from multiprocessing import cpu_count
from typing import (
IO,
TYPE_CHECKING,
Any,
ClassVar,
Dict,
Iterator,
List,
Optional,
Union,
overload,
)
from funcy import cached_property
from tqdm.utils import CallbackIOWrapper
from dvc.exceptions import DvcException
from dvc.fs._callback import DEFAULT_CALLBACK, FsspecCallback
from dvc.ui import ui
from dvc.utils import tmp_fname
from dvc.utils.fs import makedirs, move
if TYPE_CHECKING:
from fsspec.spec import AbstractFileSystem
from typing_extensions import Literal
logger = logging.getLogger(__name__)
FSPath = str
AnyFSPath = str
# An info() entry, might evolve to a TypedDict
# in the future (e.g for properly type 'size' etc).
Entry = Dict[str, Any]
class RemoteActionNotImplemented(DvcException):
def __init__(self, action, scheme):
m = f"{action} is not supported for {scheme} remotes"
super().__init__(m)
class RemoteMissingDepsError(DvcException):
pass
class FileSystem:
sep = "/"
scheme = "base"
REQUIRES: ClassVar[Dict[str, str]] = {}
_JOBS = 4 * cpu_count()
HASH_JOBS = max(1, min(4, cpu_count() // 2))
LIST_OBJECT_PAGE_SIZE = 1000
TRAVERSE_WEIGHT_MULTIPLIER = 5
TRAVERSE_PREFIX_LEN = 2
TRAVERSE_THRESHOLD_SIZE = 500000
CAN_TRAVERSE = True
# Needed for some providers, and http open()
CHUNK_SIZE = 64 * 1024 * 1024 # 64 MiB
PARAM_CHECKSUM: ClassVar[Optional[str]] = None
def __init__(self, **kwargs):
self._check_requires(**kwargs)
self.jobs = kwargs.get("jobs") or self._JOBS
self.hash_jobs = kwargs.get("checksum_jobs") or self.HASH_JOBS
self._config = kwargs
self.fs_args = {"skip_instance_cache": True}
self.fs_args.update(self._prepare_credentials(**kwargs))
@property
def config(self) -> Dict[str, Any]:
return self._config
@cached_property
def path(self):
from .path import Path
return Path(self.sep)
@classmethod
def _strip_protocol(cls, path: str) -> str:
return path
def unstrip_protocol(self, path: str) -> str:
return path
@cached_property
def fs(self) -> "AbstractFileSystem":
raise NotImplementedError
@staticmethod
def _get_kwargs_from_urls(urlpath: str) -> "Dict[str, Any]":
from fsspec.utils import infer_storage_options
options = infer_storage_options(urlpath)
options.pop("path", None)
options.pop("protocol", None)
return options
def _prepare_credentials(
self, **config: Dict[str, Any] # pylint: disable=unused-argument
) -> Dict[str, Any]:
"""Prepare the arguments for authentication to the
host filesystem"""
return {}
@classmethod
def get_missing_deps(cls) -> List[str]:
import importlib
missing: List[str] = []
for package, module in cls.REQUIRES.items():
try:
importlib.import_module(module)
except ImportError:
missing.append(package)
return missing
def _check_requires(self, **kwargs):
from ..scheme import Schemes
from ..utils import format_link
from ..utils.pkg import PKG
missing = self.get_missing_deps()
if not missing:
return
url = kwargs.get("url", f"{self.scheme}://")
scheme = self.scheme
if scheme == Schemes.WEBDAVS:
scheme = Schemes.WEBDAV
by_pkg = {
"pip": f"pip install 'dvc[{scheme}]'",
"conda": f"conda install -c conda-forge dvc-{scheme}",
}
cmd = by_pkg.get(PKG)
if cmd:
link = format_link("https://dvc.org/doc/install")
hint = (
f"To install dvc with those dependencies, run:\n"
"\n"
f"\t{cmd}\n"
"\n"
f"See {link} for more info."
)
else:
link = format_link("https://github.com/iterative/dvc/issues")
hint = f"Please report this bug to {link}. Thank you!"
raise RemoteMissingDepsError(
f"URL '{url}' is supported but requires these missing "
f"dependencies: {missing}. {hint}"
)
def isdir(self, path: AnyFSPath) -> bool:
return self.fs.isdir(path)
def isfile(self, path: AnyFSPath) -> bool:
return self.fs.isfile(path)
def is_empty(self, path: AnyFSPath) -> bool:
entry = self.info(path)
if entry["type"] == "directory":
return not self.fs.ls(path)
return entry["size"] == 0
def open(
self,
path: AnyFSPath,
mode: str = "r",
encoding: Optional[str] = None,
**kwargs,
) -> "IO": # pylint: disable=arguments-differ
return self.fs.open(path, mode=mode, encoding=encoding, **kwargs)
def checksum(self, path: AnyFSPath) -> str:
return self.fs.checksum(path)
def copy(self, from_info: AnyFSPath, to_info: AnyFSPath) -> None:
self.makedirs(self.path.parent(to_info))
self.fs.copy(from_info, to_info)
def exists(self, path: AnyFSPath) -> bool:
return self.fs.exists(path)
def lexists(self, path: AnyFSPath) -> bool:
return self.fs.lexists(path)
def symlink(self, from_info: AnyFSPath, to_info: AnyFSPath) -> None:
try:
return self.fs.symlink(from_info, to_info)
except AttributeError:
raise RemoteActionNotImplemented("symlink", self.scheme)
def hardlink(self, from_info: AnyFSPath, to_info: AnyFSPath) -> None:
try:
return self.fs.hardlink(from_info, to_info)
except AttributeError:
raise RemoteActionNotImplemented("hardlink", self.scheme)
def reflink(self, from_info: AnyFSPath, to_info: AnyFSPath) -> None:
try:
return self.fs.reflink(from_info, to_info)
except AttributeError:
raise RemoteActionNotImplemented("reflink", self.scheme)
def is_symlink(self, path: AnyFSPath) -> bool:
try:
return self.fs.is_symlink(path)
except AttributeError:
return False
def is_hardlink(self, path: AnyFSPath) -> bool:
try:
return self.fs.is_hardlink(path)
except AttributeError:
return False
def iscopy(self, path: AnyFSPath) -> bool:
return self.is_symlink(path) or self.is_hardlink(path)
@overload
def ls(
self, path: AnyFSPath, detail: "Literal[True]"
) -> "Iterator[Entry]":
...
@overload
def ls(self, path: AnyFSPath, detail: "Literal[False]") -> Iterator[str]:
...
def ls(self, path, detail=False, **kwargs):
return self.fs.ls(path, detail=detail)
def find(
self,
path: AnyFSPath,
prefix: bool = False, # pylint: disable=unused-argument
) -> Iterator[str]:
yield from self.fs.find(path)
def move(self, from_info: AnyFSPath, to_info: AnyFSPath) -> None:
self.fs.move(from_info, to_info)
def remove(self, path: AnyFSPath) -> None:
self.fs.rm_file(path)
def info(self, path: AnyFSPath) -> "Entry":
return self.fs.info(path)
def makedirs(self, path: AnyFSPath, **kwargs: Any) -> None:
self.fs.makedirs(path, exist_ok=kwargs.pop("exist_ok", True))
def put_file(
self,
from_file: AnyFSPath,
to_info: AnyFSPath,
callback: Any = DEFAULT_CALLBACK,
**kwargs,
) -> None:
self.fs.put_file(from_file, to_info, callback=callback, **kwargs)
self.fs.invalidate_cache(self.path.parent(to_info))
def get_file(
self,
from_info: AnyFSPath,
to_info: AnyFSPath,
callback: Any = DEFAULT_CALLBACK,
**kwargs,
) -> None:
self.fs.get_file(from_info, to_info, callback=callback, **kwargs)
def upload_fobj(self, fobj: IO, to_info: AnyFSPath, **kwargs) -> None:
self.makedirs(self.path.parent(to_info))
with self.open(to_info, "wb") as fdest:
shutil.copyfileobj(
fobj,
fdest,
length=getattr(fdest, "blocksize", None), # type: ignore
)
def walk(
self,
path: AnyFSPath,
topdown: bool = True,
**kwargs: Any,
):
return self.fs.walk(path, topdown=topdown, **kwargs)
def getsize(self, path: AnyFSPath) -> Optional[int]:
return self.info(path).get("size")
# pylint: enable=unused-argument
def upload(
self,
from_info: Union[AnyFSPath, IO],
to_info: AnyFSPath,
total: int = None,
desc: str = None,
callback=None,
no_progress_bar: bool = False,
**pbar_args: Any,
):
is_file_obj = hasattr(from_info, "read")
method = "upload_fobj" if is_file_obj else "put_file"
if not hasattr(self, method):
raise RemoteActionNotImplemented(method, self.scheme)
if not is_file_obj:
from .local import localfs
desc = desc or localfs.path.name(from_info)
stack = contextlib.ExitStack()
if not callback:
pbar = ui.progress(
desc=desc,
disable=no_progress_bar,
bytes=True,
total=total or -1,
**pbar_args,
)
stack.enter_context(pbar)
callback = pbar.as_callback()
if total:
callback.set_size(total)
with stack:
if is_file_obj:
wrapped = CallbackIOWrapper(
callback.relative_update, from_info
)
# `size` is used to provide hints to the WebdavFileSystem
# for legacy servers.
# pylint: disable=no-member
return self.upload_fobj(wrapped, to_info, size=total)
assert isinstance(from_info, str)
logger.debug("Uploading '%s' to '%s'", from_info, to_info)
# pylint: disable=no-member
return self.put_file(
os.fspath(from_info), to_info, callback=callback
)
def download(
self,
from_info: AnyFSPath,
to_info: AnyFSPath,
name: str = None,
callback=None,
no_progress_bar: bool = False,
jobs: int = None,
_only_file: bool = False,
**kwargs: Any,
):
from .local import localfs
if not hasattr(self, "get_file"):
raise RemoteActionNotImplemented("get_file", self.scheme)
download_dir = not _only_file and self.isdir(from_info)
desc = name or localfs.path.name(to_info)
stack = contextlib.ExitStack()
if not callback:
pbar_kwargs = {"unit": "Files"} if download_dir else {}
pbar = ui.progress(
total=-1,
desc="Downloading directory" if download_dir else desc,
bytes=not download_dir,
disable=no_progress_bar,
**pbar_kwargs,
)
stack.enter_context(pbar)
callback = pbar.as_callback()
with stack:
if download_dir:
return self._download_dir(
from_info, to_info, callback=callback, jobs=jobs, **kwargs
)
return self._download_file(from_info, to_info, callback=callback)
download_file = partialmethod(download, _only_file=True)
def _download_dir(
self,
from_info: AnyFSPath,
to_info: AnyFSPath,
callback=DEFAULT_CALLBACK,
jobs: int = None,
**kwargs,
):
from .local import localfs
from_infos = list(self.find(from_info, **kwargs))
if not from_infos:
makedirs(to_info, exist_ok=True)
return None
to_infos = (
localfs.path.join(to_info, *self.path.relparts(info, from_info))
for info in from_infos
)
callback.set_size(len(from_infos))
download_files = FsspecCallback.wrap_fn(callback, self._download_file)
max_workers = jobs or self.jobs
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = [
executor.submit(download_files, from_info, to_info)
for from_info, to_info in zip(from_infos, to_infos)
]
# NOTE: unlike pulling/fetching cache, where we need to
# download everything we can, not raising an error here might
# turn very ugly, as the user might think that he has
# downloaded a complete directory, while having a partial one,
# which might cause unexpected results in his pipeline.
for future in as_completed(futures):
# NOTE: executor won't let us raise until all futures that
# it has are finished, so we need to cancel them ourselves
# before re-raising.
exc = future.exception()
if exc:
for entry in futures:
entry.cancel()
raise exc
def _download_file(
self,
from_info: AnyFSPath,
to_info: AnyFSPath,
callback=DEFAULT_CALLBACK,
) -> None:
from .local import localfs
makedirs(localfs.path.parent(to_info), exist_ok=True)
tmp_file = tmp_fname(to_info)
logger.debug("Downloading '%s' to '%s'", from_info, to_info)
try:
# noqa, pylint: disable=no-member
self.get_file(from_info, tmp_file, callback=callback)
except Exception: # pylint: disable=broad-except
# do we need to rollback makedirs for previously not-existing
# directories?
with contextlib.suppress(FileNotFoundError):
os.unlink(tmp_file)
raise
move(tmp_file, to_info)
class ObjectFileSystem(FileSystem): # pylint: disable=abstract-method
TRAVERSE_PREFIX_LEN = 3
def makedirs(self, path: AnyFSPath, **kwargs: Any) -> None:
# For object storages make this method a no-op. The original
# fs.makedirs() method will only check if the bucket exists
# and create if it doesn't though we don't want to support
# that behavior, and the check will cost some time so we'll
# simply ignore all mkdir()/makedirs() calls.
return None
def _isdir(self, path: AnyFSPath) -> bool:
# Directory in object storages are interpreted differently
# among different fsspec providers, so this logic is a temporary
# measure for us to adapt as of now. It checks whether it is a
# directory (as in a prefix with contents) or whether it is an empty
# file where it's name ends with a forward slash
entry = self.info(path)
return entry["type"] == "directory" or (
entry["size"] == 0
and entry["type"] == "file"
and entry["name"].endswith("/")
)
def isdir(self, path: AnyFSPath) -> bool:
try:
return self._isdir(path)
except FileNotFoundError:
return False
def isfile(self, path: AnyFSPath) -> bool:
try:
return not self._isdir(path)
except FileNotFoundError:
return False
def find(self, path: AnyFSPath, prefix: bool = False) -> Iterator[str]:
if prefix:
with_prefix = self.path.parent(path)
files = self.fs.find(with_prefix, prefix=self.path.parts(path)[-1])
else:
with_prefix = path
files = self.fs.find(path)
# When calling find() on a file, it returns the same file in a list.
# For object-based storages, the same behavior applies to empty
# directories since they are represented as files. This condition
# checks whether we should yield an empty list (if it is an empty
# directory) or just yield the file itself.
if len(files) == 1 and files[0] == with_prefix and self.isdir(path):
return None
yield from files
```
#### File: unit/remote/test_base.py
```python
import math
import posixpath
from unittest import mock
from dvc.fs.base import FileSystem
from dvc.objects.db import ObjectDB
class _CallableOrNone:
"""Helper for testing if object is callable() or None."""
def __eq__(self, other):
return other is None or callable(other)
CallableOrNone = _CallableOrNone()
@mock.patch.object(ObjectDB, "_list_hashes_traverse")
@mock.patch.object(ObjectDB, "list_hashes_exists")
def test_hashes_exist(object_exists, traverse, dvc):
odb = ObjectDB(FileSystem(), None)
# remote does not support traverse
odb.fs.CAN_TRAVERSE = False
with mock.patch.object(odb, "_list_hashes", return_value=list(range(256))):
hashes = set(range(1000))
odb.hashes_exist(hashes)
object_exists.assert_called_with(hashes, None, None)
traverse.assert_not_called()
odb.fs.CAN_TRAVERSE = True
# large remote, small local
object_exists.reset_mock()
traverse.reset_mock()
with mock.patch.object(
odb, "_list_hashes", return_value=list(range(2048))
):
hashes = list(range(1000))
odb.hashes_exist(hashes)
# verify that _odb_paths_with_max() short circuits
# before returning all 2048 remote hashes
max_hashes = math.ceil(
odb._max_estimation_size(hashes)
/ pow(16, odb.fs.TRAVERSE_PREFIX_LEN)
)
assert max_hashes < 2048
object_exists.assert_called_with(
frozenset(range(max_hashes, 1000)), None, None
)
traverse.assert_not_called()
# large remote, large local
object_exists.reset_mock()
traverse.reset_mock()
odb.fs._JOBS = 16
with mock.patch.object(odb, "_list_hashes", return_value=list(range(256))):
hashes = list(range(1000000))
odb.hashes_exist(hashes)
object_exists.assert_not_called()
traverse.assert_called_with(
256 * pow(16, odb.fs.TRAVERSE_PREFIX_LEN),
set(range(256)),
jobs=None,
name=None,
)
@mock.patch.object(ObjectDB, "_list_hashes", return_value=[])
@mock.patch.object(ObjectDB, "_path_to_hash", side_effect=lambda x: x)
def test_list_hashes_traverse(_path_to_hash, list_hashes, dvc):
odb = ObjectDB(FileSystem(), None)
odb.fs_path = "foo"
# parallel traverse
size = 256 / odb.fs._JOBS * odb.fs.LIST_OBJECT_PAGE_SIZE
list(odb._list_hashes_traverse(size, {0}))
for i in range(1, 16):
list_hashes.assert_any_call(f"{i:0{odb.fs.TRAVERSE_PREFIX_LEN}x}")
for i in range(1, 256):
list_hashes.assert_any_call(f"{i:02x}")
# default traverse (small remote)
size -= 1
list_hashes.reset_mock()
list(odb._list_hashes_traverse(size - 1, {0}))
list_hashes.assert_called_with(None)
def test_list_hashes(dvc):
odb = ObjectDB(FileSystem(), None)
odb.fs_path = "foo"
with mock.patch.object(
odb, "_list_paths", return_value=["12/3456", "bar"]
):
hashes = list(odb._list_hashes())
assert hashes == ["123456"]
def test_list_paths(dvc):
path = "foo"
odb = ObjectDB(FileSystem(), path)
with mock.patch.object(odb.fs, "find", return_value=[]) as walk_mock:
for _ in odb._list_paths():
pass
walk_mock.assert_called_with(path, prefix=False)
for _ in odb._list_paths(prefix="000"):
pass
walk_mock.assert_called_with(
posixpath.join(path, "00", "0"), prefix=True
)
``` |
{
"source": "jhhwang4195/lbaas",
"score": 2
} |
#### File: xos/synchronizer/lbaas_log.py
```python
import inspect
from xos.logger import Logger, logging
from os.path import basename
logger = Logger(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
def debug(msg):
logger.debug(basename(str(inspect.stack()[1][1])) + ':' +
str(inspect.stack()[1][2]) + ' ' +
str(inspect.stack()[1][3]) + '() ' +
str(msg))
def info(msg):
logger.info(basename(str(inspect.stack()[1][1])) + ':' +
str(inspect.stack()[1][2]) + ' ' +
str(inspect.stack()[1][3]) + '() ' +
str(msg))
def error(msg):
logger.error(basename(str(inspect.stack()[1][1])) + ':' +
str(inspect.stack()[1][2]) + ' ' +
str(inspect.stack()[1][3]) + '() ' +
str(msg))
``` |
{
"source": "JHibbard/rest-api-base",
"score": 3
} |
#### File: rab/utils/discover.py
```python
import pkgutil
import importlib
# Internal Libraries
import rab
def iter_namespace(ns_pkg):
# Specifying the second argument (prefix) to iter_modules makes the
# returned name an absolute name instead of a relative one. This allows
# import_module to work without having to do additional modification to
# the name.
return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".")
try:
import rab.plugins
except ModuleNotFoundError as error:
pass
apis = (
{
name: importlib.import_module(name)
for finder, name, ispkg in iter_namespace(rab.plugins)
}
if hasattr(rab, "plugins")
else {}
)
``` |
{
"source": "jhickman-prominent/geowave",
"score": 2
} |
#### File: pygw/base/geowave_object.py
```python
from pygw.config import java_gateway
class GeoWaveObject:
"""
Base Class for pygw objects that wrap Java objects.
"""
def __init__(self, java_ref):
self._java_ref = java_ref
def __repr__(self):
return "pygw {} Object with JavaRef@{}".format(self.__class__, self._java_ref)
def __eq__(self, other):
if not isinstance(other, PyGwJavaWrapper):
return False
return self._java_ref == other._java_ref
def is_instance_of(self, java_class):
"""
Returns:
True if this object is of the type represented by the given java class.
"""
return isinstance(java_gateway, self._java_ref, java_class)
```
#### File: pygw/geotools/simple_feature_type_builder.py
```python
from pygw.config import java_pkg
from pygw.base import GeoWaveObject
from .simple_feature_type import SimpleFeatureType
from .attribute_descriptor import AttributeDescriptor
class SimpleFeatureTypeBuilder(GeoWaveObject):
"""
Builds `pygw.geotools.simple_feature_type.SimpleFeatureType` instances.
"""
def __init__(self):
self.attributes = []
super().__init__(java_pkg.org.geotools.feature.simple.SimpleFeatureTypeBuilder())
def set_name(self, name):
"""
Sets the name of the feature type.
Args:
name (str): The name to use.
Returns:
This feature type builder.
"""
self._java_ref.setName(name)
return self
def set_namespace_uri(self, namespace_uri):
"""
Sets the namespace URI of the feature type.
Args:
namespace_uri (str): The namespace URI to use.
Returns:
This feature type builder.
"""
self._java_ref.setNamespaceURI(namespace_uri)
return self
def set_srs(self, srs):
"""
Sets the spatial reference system of the feature type.
Args:
srs (str): The spatial reference system to use.
Returns:
This feature type builder.
"""
self._java_ref.setSRS(srs)
return self
def add(self, attribute_descriptor):
"""
Adds an attribute to the feature type.
Args:
attribute_descriptor (pygw.geotools.attribute_descriptor.AttributeDescriptor): The attribute to add.
Returns:
This feature type builder.
"""
if isinstance(attribute_descriptor, AttributeDescriptor):
self.attributes.append(attribute_descriptor)
self._java_ref.add(attribute_descriptor._java_ref)
return self
else:
raise ValueError("attribute_descriptor should be of type AttributeDescriptor")
def build_feature_type(self):
"""
Builds the configured feature type.
Returns:
A `pygw.geotools.simple_feature_type.SimpleFeatureType` with the given configuration.
"""
return SimpleFeatureType(self._java_ref.buildFeatureType(), self.attributes)
```
#### File: pygw/query/query_hint_key.py
```python
from enum import Enum
from pygw.config import geowave_pkg
class QueryHintKey(Enum):
"""
Keys for query hints.
"""
MAX_RANGE_DECOMPOSITION = 0
@classmethod
def get_key(cls, key):
"""
Gets the Java hint key from the given QueryHintKey.
Args:
key (pygw.query.query_hint_key.QueryHintKey): The enum value of QueryHintKey to get.
Returns:
The Java equivalent of the query hint key.
"""
return {
QueryHintKey.MAX_RANGE_DECOMPOSITION: geowave_pkg.core.store.util.DataStoreUtils.MAX_RANGE_DECOMPOSITION
}.get(key)
```
#### File: query/statistics/vector_statistics_query_builder.py
```python
from pygw.config import geowave_pkg
from ..query_builder import QueryBuilder
class VectorStatisticsQueryBuilder(QueryBuilder):
"""
A builder for creating statistics queries for vector data.
"""
def __init__(self):
j_stats_qbuilder = geowave_pkg.core.geotime.store.query.api.VectorStatisticsQueryBuilder.newBuilder()
super().__init__(j_stats_qbuilder)
def bbox(self):
# TODO: support FeatureBoundingBoxStatistics builder
return self._java_ref.bbox()
def time_range(self):
# TODO: support FeatureTimeRangeStatistics builder
return self._java_ref.timeRange()
```
#### File: store/accumulo/accumulo_options.py
```python
from pygw.config import geowave_pkg
from pygw.store import DataStoreOptions
class AccumuloOptions(DataStoreOptions):
"""
Accumulo data store options.
"""
def __init__(self):
super().__init__(geowave_pkg.datastore.accumulo.config.AccumuloRequiredOptions())
def set_zookeeper(self, zookeeper):
"""
Sets the list of Zookeper servers that the Accumulo instance uses as a comma-separated
string.
Args:
zookeeper (str): A comma-separated list of Zookeeper servers.
"""
self._java_ref.setZookeeper(zookeeper)
def get_zookeeper(self):
"""
Returns:
A comma-separated list of Zookeper servers.
"""
return self._java_ref.getZookeeper()
def set_instance(self, instance):
"""
Sets the Accumulo instance ID to use for the data store.
Args:
instance (str): The Accumulo instance ID to use.
"""
self._java_ref.setInstance(instance)
def get_instance(self):
"""
Returns:
The Accumulo instance ID.
"""
return self._java_ref.getInstance()
def set_user(self, user):
"""
Sets the Accumulo user ID.
Args:
user (str): The Accumulo user ID.
"""
self._java_ref.setUser(user)
def get_user(self):
"""
Returns:
The Accumulo user ID.
"""
return self._java_ref.getUser()
def set_password(self, password):
"""
Sets the Accumulo password.
Args:
password (str): The Accumulo password.
"""
self._java_ref.setPassword(password)
def get_password(self):
"""
Returns:
The Accumulo password.
"""
return self._java_ref.getPassword()
def set_use_locality_groups(self, use_locality_groups):
"""
Sets whether or not to use locality groups.
Args:
use_locality_groups (bool): Whether or not to use locality groups.
"""
self._base_options.setUseLocalityGroups(use_locality_groups)
def is_use_locality_groups(self):
"""
Returns:
True if locality groups are enabled, False otherwise.
"""
return self._base_options.isUseLocalityGroups()
``` |
{
"source": "jhidalgocarrio/e2calib",
"score": 3
} |
#### File: python/conversion/format.py
```python
from pathlib import Path
dat_conversion_possible = True
raw_conversion_possible = True
ros_found = True
pocolog_found = True
try:
import conversion.ros
except ImportError:
print("Conversion from .bag is not possible. If you want to extract .bag files, please install the ROS packages specified in the README.md")
ros_found = False
try:
import conversion.prophesee
except ImportError:
print("Conversion from .raw is not possible. If you want to extract .raw files, please install Metavision 2.2")
raw_conversion_possible = False
try:
import conversion.prophesee_dat
except ImportError:
print("Conversion from .dat is not possible. If you want to extract .dat files, please install Metavision 2.0")
dat_conversion_possible = False
metavision_found = False
try:
import conversion.pocolog
except ImportError:
print("Conversion from .log is not possible. If you want to extract .log files, please install the ROCK packages specified in the README.md")
pocolog_found = False
def get_generator(input_file: Path, delta_t_ms: int=1000, topic: str='/dvs/events'):
if input_file.suffix == '.raw':
assert raw_conversion_possible, 'Could not find Metavision packages to read .raw file'
return lambda: conversion.prophesee.ev_generator(input_file, delta_t_ms=delta_t_ms)
if input_file.suffix == '.dat':
assert dat_conversion_possible, 'Could not find Metavision packages to read .dat file'
return lambda: conversion.prophesee_dat.ev_generator(input_file, delta_t_ms=delta_t_ms)
if input_file.suffix == '.log':
assert pocolog_found, 'Could not find Rock packages'
return lambda: conversion.pocolog.ev_generator(input_file, delta_t_ms=delta_t_ms, topic=topic)
assert input_file.suffix == '.bag', f'File format {input_file.suffix} is not supported'
assert ros_found, 'Could not not find ROS packages'
return lambda: conversion.ros.ev_generator(input_file, delta_t_ms=delta_t_ms, topic=topic)
```
#### File: python/conversion/prophesee_dat.py
```python
from pathlib import Path
import warnings
import numpy as np
from conversion.prophesee_utils import load_td_data
from data.format import Events
def ev_generator(rawfile: Path, delta_t_ms: int=1000) -> Events:
assert rawfile.exists()
assert rawfile.suffix == '.dat'
delta_t_us = delta_t_ms * 1000
cd_data = load_td_data(rawfile)
for t in range(np.min(cd_data['t']), np.max(cd_data['t']), delta_t_us):
ev = cd_data[cd_data['t']>=t]
ev = ev[ev['t']<t+delta_t_us]
is_sorted = np.all(ev['t'][:-1] <= ev['t'][1:])
if not is_sorted:
warnings.warn('Event timestamps are not sorted.', stacklevel=2)
events = Events(
ev['x'].astype('uint16'),
ev['y'].astype('uint16'),
ev['p'].astype('uint8'),
ev['t'].astype('int64'))
yield events
```
#### File: python/conversion/ros.py
```python
from pathlib import Path
import numpy as np
import tqdm
import rosbag
from data.accumulator import EventAccumulatorRos
def ev_generator(bagpath: Path, delta_t_ms: int=1000, topic: str='/dvs/events'):
assert bagpath.exists()
assert bagpath.suffix == '.bag'
delta_t_ns = delta_t_ms * 10**6
t_ev_acc_end_ns = None
ev_acc = EventAccumulatorRos()
init = False
last_time = 0
with rosbag.Bag(str(bagpath), 'r') as bag:
pbar = tqdm.tqdm(total=bag.get_message_count(topic))
for topic, msg, ros_time in bag.read_messages(topics=[topic]):
if not init:
init = True
t_start_ns = msg.events[0].ts.to_nsec()
t_ev_acc_end_ns = t_start_ns + delta_t_ns
for event in msg.events:
time = event.ts.to_nsec()
assert time >= last_time, 'event timestamps must be equal or greater than the previous one'
last_time = time
if time < t_ev_acc_end_ns:
ev_acc.add_event(event)
else:
events = ev_acc.get_events()
yield events
t_ev_acc_end_ns = t_ev_acc_end_ns + delta_t_ns
ev_acc = EventAccumulatorRos()
ev_acc.add_event(event)
pbar.update(1)
``` |
{
"source": "jhidding/blender-shader-dsl",
"score": 3
} |
#### File: jhidding/blender-shader-dsl/shader_dsl.py
```python
from __future__ import annotations
from dataclasses import dataclass
from typing import List, Dict, Tuple, Any, Union
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|imports>>[1]
import functools
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|imports>>[2]
import bpy
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|graph>>[0]
@dataclass
class Graph:
nodes: List[Node]
links: List[Tuple[Output, Input]]
@property
def root(self):
return self.nodes[0]
# ~\~ begin <<docs/python_dsl.md|graph-getattr>>[0]
def __getattr__(self, name):
return Promise(self, Output(self.root, name))
# ~\~ end
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|graph>>[1]
@dataclass
class Node:
name: str
properties: Dict[str, Any]
input_defaults: Dict[Union[int, str], Value]
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|graph>>[2]
@dataclass
class Output:
node: Node
name: str
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|graph>>[3]
@dataclass
class Input:
node: Node
name: Union[int, str]
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|graph>>[4]
@dataclass
class Value:
value: Any
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|graph>>[5]
@dataclass
class Promise:
graph: Graph
output: Output
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|decorator>>[0]
def decorator(f):
"""Creates a paramatric decorator from a function. The resulting decorator
will optionally take keyword arguments."""
@functools.wraps(f)
def decoratored_function(*args, **kwargs):
if args and len(args) == 1:
return f(*args, **kwargs)
if args:
raise TypeError(
"This decorator only accepts extra keyword arguments.")
return lambda g: f(g, **kwargs)
return decoratored_function
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|node>>[0]
@decorator
def node(f, properties=["location"]):
@functools.wraps(f)
def g(*args, **kwargs):
# ~\~ begin <<docs/python_dsl.md|node-body>>[0]
name = f.__name__
property_values = {}
input_defaults = {}
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|node-body>>[1]
links = []
nodes = [Node(name, property_values, input_defaults)]
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|node-body>>[2]
def merge_graph(g):
for n in g.nodes:
if n not in nodes:
nodes.append(n)
for link in g.links:
if link not in links:
links.append(link)
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|node-body>>[3]
for i, a in enumerate(args):
if isinstance(a, Value):
input_defaults[i] = a
elif isinstance(a, Promise):
merge_graph(a.graph)
links.append((a.output, Input(nodes[0], i)))
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|node-body>>[4]
for k, v in kwargs.items():
if k in properties:
property_values[k] = v
elif isinstance(v, Value):
input_defaults[k] = v
elif isinstance(v, Promise):
merge_graph(v.graph)
links.append((v.output, Input(nodes[0], k)))
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|node-body>>[5]
return Graph(nodes, links)
# ~\~ end
return g
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|make-material>>[0]
def demangle(name: Union[int, str]) -> Union[int, str]:
if isinstance(name, int):
return name
def cap(s):
return s[0].upper() + s[1:]
return ' '.join([cap(w) for w in name.split('_')])
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|make-material>>[1]
def make_material(name: str, graph: Graph, **kwargs):
material = bpy.data.materials.new(name)
material.use_nodes = True
nodes = material.node_tree.nodes
nodes.clear()
nodemap = {}
for n in graph.nodes:
s = nodes.new(type=f"ShaderNode{n.name}")
nodemap[id(n)] = s
for k, v in n.properties.items():
setattr(s, k, v)
for q, v in n.input_defaults.items():
key = demangle(q)
s.inputs[key].default_value = v.value
links = material.node_tree.links
for (o, i) in graph.links:
node_out = nodemap[id(o.node)]
node_in = nodemap[id(i.node)]
links.new(node_out.outputs[demangle(o.name)],
node_in.inputs[demangle(i.name)])
for k, v in kwargs.items():
setattr(material, k, v)
return material
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|shaders>>[0]
@node(properties=["location", "layer_name"])
def VertexColor(**kwargs):
pass
# ~\~ end
# ~\~ begin <<docs/python_dsl.md|shaders>>[1]
@node(properties=["location"])
def BsdfPrincipled(**kwargs):
pass
@node(properties=["location"])
def OutputMaterial(**kwargs):
pass
@node(properties=["location"])
def MixShader(*args, **kwargs):
pass
@node
def BsdfTransparent(**kwargs):
pass
@node
def BsdfDiffuse(**kwargs):
pass
@node
def Emission(**kwargs):
pass
# ~\~ end
# ~\~ begin <<docs/about.md|about>>[0]
bl_info = {
"name": "Shader DSL",
"blender": (2, 83, 0),
"category": "Development",
"author": "<NAME>",
"version": (0, 1),
"description": "DSL for scripting node materials."
}
def register():
pass
def unregister():
pass
# ~\~ end
# ~\~ end
``` |
{
"source": "jhidding/ci-research",
"score": 3
} |
#### File: ci-research/boston/dataset.py
```python
from sklearn.datasets import load_boston
from bokeh.models import ColumnDataSource
FEATURE_INFO = {
"CRIM": "per capita crime rate by town",
"ZN": "proportion of residential land zoned for lots over 25,000 sq.ft.",
"INDUS": "proportion of non-retail business acres per town",
"CHAS": "Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)",
"NOX": "nitric oxides concentration (parts per 10 million)",
"RM": "average number of rooms per dwelling",
"AGE": "proportion of owner-occupied units built prior to 1940",
"DIS": "weighted distances to five Boston employment centres",
"RAD": "index of accessibility to radial highways",
"TAX": "full-value property-tax rate per $10,000",
"PTRATIO": "pupil-teacher ratio by town",
"B": "1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town",
"LSTAT": "%% lower status of the population",
}
class BostonDataset:
""" Encapsulates interactive Bokeh datasources for the Boston dataset """
def __init__(self):
# pylint: disable=no-member
batch = load_boston()
self._feature_names = batch.feature_names.tolist()
self._features = batch.data
self._prices = batch.target
self._x_feature = self._feature_names[0]
self._y_feature = self._feature_names[1]
self._source = ColumnDataSource()
self._update()
def _update(self):
x_index = self._feature_names.index(self._x_feature)
y_index = self._feature_names.index(self._y_feature)
x = self._features[:, x_index]
y = self._features[:, y_index]
self._source.data = dict(x=x, y=y, price=self._prices)
@property
def source(self):
""" The data sources for the different targets in the Boston dataset """
return self._source
@property
def prices(self):
""" The target house prices """
return self._prices
@property
def title(self):
""" The current plot title given the x and y features """
return "{} x {}".format(self._x_feature, self._y_feature)
@property
def num_features(self):
""" The number of feature dimensions """
return len(self._feature_names)
@property
def feature_names(self):
""" The names of the features """
return self._feature_names
@property
def x_feature(self):
""" The current x-axis feature """
return self._x_feature
@property
def x_title(self):
""" The readable title for the x-axis """
return FEATURE_INFO[self._x_feature]
@x_feature.setter
def x_feature(self, feature):
""" Used to set the current x-axis feature.
Description:
This method updates the data sources to reflect the new feature.
Args:
feature -- the feature name
"""
if feature != self._x_feature:
self._x_feature = feature
self._update()
@property
def y_feature(self):
""" The current y-axis feature """
return self._y_feature
@property
def y_title(self):
""" The readable title for the y-axis """
return FEATURE_INFO[self._y_feature]
@y_feature.setter
def y_feature(self, feature):
""" Used to set the current y-axis feature.
Description:
This method updates the data sources to reflect the new feature.
Args:
feature -- the feature name
"""
if feature != self._y_feature:
self._y_feature = feature
self._update()
@property
def name(self):
""" The correct name for the plot """
return "Boston Dataset"
```
#### File: ci-research/tests/test_iris.py
```python
import pytest
import numpy as np
from iris import IrisDataset
@pytest.fixture(scope="module")
def iris():
""" Module fixture for the IrisDataset class """
return IrisDataset()
def test_features(iris):
""" Test that the dataset exposes features correctly """
assert iris.num_features == 4
assert iris.feature_names == [
"sepal length (cm)",
"sepal width (cm)",
"petal length (cm)",
"petal width (cm)",
]
def test_targets(iris):
""" Test that the dataset exposes targets correctly """
assert iris.num_targets == 3
np.testing.assert_array_equal(
iris.target_names, ["setosa", "versicolor", "virginica"]
)
@pytest.mark.parametrize(
"name, x_feature, y_feature, x_vals, y_vals",
[
("setosa", "sepal length (cm)", "sepal width (cm)", [5.1, 4.9], [3.5, 3.0]),
("versicolor", "sepal length (cm)", "sepal width (cm)", [7, 6.4], [3.2, 3.2]),
("virginica", "sepal length (cm)", "sepal width (cm)", [6.3, 5.8], [3.3, 2.7]),
("setosa", "petal length (cm)", "petal width (cm)", [1.4, 1.4], [0.2, 0.2]),
("versicolor", "petal length (cm)", "petal width (cm)", [4.7, 4.5], [1.4, 1.5]),
("virginica", "petal length (cm)", "petal width (cm)", [6, 5.1], [2.5, 1.9]),
],
)
def test_feature_values(iris, name, x_feature, y_feature, x_vals, y_vals):
""" Test that the setting of feature values works as expected """
iris.x_feature = x_feature
iris.y_feature = y_feature
assert iris.title == "{} x {}".format(x_feature, y_feature)
data = iris.sources[name].data
np.testing.assert_array_almost_equal(data["x"][:2], x_vals)
np.testing.assert_array_almost_equal(data["y"][:2], y_vals)
``` |
{
"source": "jhidding/noodles-experiments",
"score": 3
} |
#### File: noodles-experiments/primes/multiprocessing_example.py
```python
from noodles import (
gather, schedule, run_process, serial, run_parallel)
import multiprocessing
import threading
import queue
from queue import Empty
import time
from utilities_cython import sum_primes
from utilities_python import sumPrimes_noodles
def worker(q):
while True:
try:
x = q.get(block=False)
print(sumPrimes_noodles(x))
except Empty:
break
if __name__ == "__main__":
range_of_values = range(int(1e6), int(2e6), int(5e4))
ncpus = multiprocessing.cpu_count()
print("First, one thread.")
start_st = time.time()
for i in range_of_values:
print(sum_primes(i))
end_st = time.time()
print()
print("Next, as many threads as there are logical cores, taken from multiprocessing.cpu_count().")
start_mt = time.time()
my_q = queue.Queue()
for i in range_of_values:
my_q.put(i)
procs = [
threading.Thread(target=worker, args=(my_q,))
for i in range(ncpus)]
for ps in procs:
ps.start()
for ps in procs:
ps.join()
end_mt = time.time()
print()
print("Now Noodles with as many threads as there are logical cores.")
start_noodles = time.time()
result = run_parallel(
gather(*(schedule(sumPrimes_noodles)(x) for x in range_of_values)),
n_threads=ncpus)
for item in result:
print(item)
end_noodles = time.time()
print()
print("A single thread takes {0:.2f} seconds".format(end_st - start_st))
print("Multithreading takes {0:.2f} seconds".format(end_mt - start_mt))
print("Noodles takes {0:.2f} seconds".format( end_noodles - start_noodles))
```
#### File: noodles-experiments/primes/utilities_python.py
```python
from utilities_cython import sum_primes
def sumPrimes_noodles(n):
return sum_primes(n)
``` |
{
"source": "jhidding/parallel-python-workshop",
"score": 2
} |
#### File: parallel-python-workshop/test/test_environment.py
```python
import numpy as np
import scipy
import dask
import snakemake
import numba
from packaging.version import parse as version
def test_library_versions():
assert version(np.__version__) >= version("1.15")
assert version(scipy.__version__) >= version("1.5")
assert version(dask.__version__) >= version("2.20")
assert version(snakemake.__version__) >= version("5.28")
assert version(numba.__version__) >= version("0.50")
```
#### File: parallel-python-workshop/test/test_pi.py
```python
import numpy as np
import numba
import random
def calc_pi(N):
M = 0
for i in range(N):
# Simulate impact coordinates
x = random.uniform(-1, 1)
y = random.uniform(-1, 1)
# True if impact happens inside the circle
if x**2 + y**2 < 1.0:
M += 1
return 4 * M / N
def calc_pi_numpy(N):
# Simulate impact coordinates
pts = np.random.uniform(-1, 1, (2, N))
# Count number of impacts inside the circle
M = np.count_nonzero((pts**2).sum(axis=0) < 1)
return 4 * M / N
@numba.jit
def sum_range_numba(a: int):
"""Compute the sum of the numbers in the range [0, a)."""
x = 0
for i in range(a):
x += i
return x
@numba.jit
def calc_pi_numba(N):
M = 0
for i in range(N):
# Simulate impact coordinates
x = random.uniform(-1, 1)
y = random.uniform(-1, 1)
# True if impact happens inside the circle
if x**2 + y**2 < 1.0:
M += 1
return 4 * M / N
def test_calc_pi():
assert round(calc_pi(10**6)) == 3
def test_calc_pi_numpy():
assert round(calc_pi_numpy(10**6)) == 3
def test_sum_range_numba():
for n in np.random.randint(1000, 10000, size=10):
assert sum_range_numba(n) == (n * (n - 1)) // 2
def test_calc_pi_numba():
assert round(calc_pi_numba(10**6)) == 3
```
#### File: parallel-python-workshop/test/test_snakemake.py
```python
from subprocess import run
from shutil import copy
def test_snakemake(tmp_path):
copy("./test/hello/Snakefile", tmp_path)
run(["snakemake", "-j1"], cwd=tmp_path, check=True)
assert (tmp_path / "combined.txt").exists()
``` |
{
"source": "jhidding/qnoodles",
"score": 3
} |
#### File: qnoodles/qnoodles/nodebox.py
```python
import sys, os
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
from .noodlet import Noodlet
class MySeparator(QtGui.QWidget):
"""
Qt doesn't have a `QSeparator` widget. This draws a horizontal line.
"""
def __init__(self, parent=None):
super(MySeparator, self).__init__(parent)
self.setAutoFillBackground(False)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setFixedHeight(8)
def paintEvent(self, event):
pt = QtGui.QPainter(self)
pt.setRenderHints(pt.Antialiasing)
w, h = self.size().toTuple()
pen = QtGui.QPen(QtGui.QBrush(Qt.black), 1)
pt.drawLine(4, h/2, w-4, h/2)
class MyFrame(QtGui.QWidget):
"""
The standard `QFrame` doesn't have the fine-grained control over
appearance, different colors, rounded corners etc, that we need.
Maybe if I understand better how Qt+CSS works we can revert to using
`QFrame`.
"""
def __init__(self, parent=None):
super(MyFrame, self).__init__(parent)
self.setAutoFillBackground(False)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
def _alt_paintEvent(self, event):
pt = QtGui.QPainter(self)
pt.setRenderHints(pt.Antialiasing)
w, h = self.size().toTuple()
path = QtGui.QPainterPath()
brush = QtGui.QBrush(Qt.gray)
pen = QtGui.QPen(QtGui.QBrush(Qt.black), 3)
path.addRoundedRect(1, 1, w-2, h-2, 16, 16)
pt.fillPath(path, brush)
pt.strokePath(path, pen)
def paintEvent(self, event):
pt = QtGui.QPainter(self)
pt.setRenderHints(pt.Antialiasing)
w, h = self.size().toTuple()
path = QtGui.QPainterPath()
brush = QtGui.QBrush(Qt.gray)
pen = QtGui.QPen(QtGui.QBrush(Qt.black), 0.5)
path.addRoundedRect(1, 1, w-2, h-2, 8, 8)
pt.fillPath(path, brush)
pt.strokePath(path, pen)
def _make_widget(noodlet):
"""
Arguments:
noodlet - named tuple which should have `name`, `dtype`, `widget`
attributes.
Returns:
a QWidget
"""
w = QtGui.QLabel("{name} [{dtype}]".format(name=noodlet.name, dtype=noodlet.dtype.__name__))
if noodlet.direction == 'out':
w.setAlignment(QtCore.Qt.AlignRight)
w.setProperty('labelClass', 'noodlet')
w.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
w.setContentsMargins(5, 2, 5, 2)
return w
class NodeBox(MyFrame):
"""
The NodeBox is the widget that displays a Node and its childs. It also
creates the Noodlets needed for this node, but those are added to the
QGraphicsScene independently. This seems necesary for the noodlets to
recieve the mouse events.
Only during the dragging of a node are the Noodlets grouped with the node
so that they move in unison with the node. Once the mouse button is
released the group is descroyed so that the noodlets recieve their own
events once more.
"""
def __init__(self, node, scene):
super(NodeBox, self).__init__()
self.scene = scene
self.data = node
style = str(open("static/qt-style.css", "r").read())
#self.setFrameStyle(self.StyledPanel | self.Plain)
self.box = QtGui.QVBoxLayout()
self.setLayout(self.box)
self.title = QtGui.QLabel(node.name, self)
self.title.setAlignment(Qt.AlignCenter | Qt.AlignTop)
self.title.setProperty('labelClass', 'title')
self.box.addWidget(self.title)
#self.items = [QtGui.QPushButton("Blah {0}".format(i), self) for i in range(3)]
self.input_items = [_make_widget(i) for i in self.data.input_noodlets()]
for i in self.input_items:
self.box.addWidget(i)
self._sep = MySeparator()
self.box.addWidget(self._sep)
self.output_items = [_make_widget(i) for i in self.data.output_noodlets()]
for i in self.output_items:
self.box.addWidget(i)
self.proxy = scene.addWidget(self)
self.proxy.setZValue(0)
self.move(*node.location)
#self.group = QtGui.QGraphicsItemGroup(self.proxy, scene)
#self.group.addToGroup(self.proxy)
self.noodlets = [Noodlet(*self.output_item_pos(i)) for i in self.output_items] \
+ [Noodlet(*self.input_item_pos(i)) for i in self.input_items]
for n in self.noodlets:
scene.addItem(n)
n.signal.pressed.connect(scene.noodletPressed)
n.signal.released.connect(scene.noodletReleased)
n.setZValue(10)
# self.group.addToGroup(n)
#scene.addItem(self.group)
#self.setProperty('frameClass', 'blue')
self.setStyleSheet(style)
self.dragging = False
self.show()
def input_item_pos(self, item):
x = self.x()
y = item.y() + item.height()/2 + self.y()
return x, y
def output_item_pos(self, item):
x = self.x() + self.width() - 2
y = item.y() + item.height()/2 + self.y()
return x, y
#class manual_drag:
def mousePressEvent(self, event):
self.group = QtGui.QGraphicsItemGroup(self.proxy, self.scene)
for n in self.noodlets:
n.setZValue(20)
self.group.addToGroup(n)
self.proxy.setZValue(19)
self.dragging = True
self.drag_pos = (event.x(), event.y())
#print("draging... ({0}, {1}) ".format(*self.drag_pos), end='', flush=True)
def mouseMoveEvent(self, event):
if self.dragging:
x = self.x(); y = self.y()
#self.scene.update(x-10, y-10, self.width() + 20, self.height()+20)
#for n in self.noodlets:
#n.update()
x += (event.x() - self.drag_pos[0])
y += (event.y() - self.drag_pos[1])
self.move(x, y)
def mouseReleaseEvent(self, event):
self.scene.destroyItemGroup(self.group)
for n in self.noodlets:
n.setZValue(10)
self.proxy.setZValue(0)
self.dragging = False
#rint("drop")
```
#### File: qnoodles/qnoodles/qnoodles.py
```python
import sys, os
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
from .nodebox import NodeBox
#from .sourceview import SourceView
class NodeView(QtGui.QGraphicsView):
def __init__(self, scene):
super(NodeView, self).__init__(scene)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.show()
class NodeScene(QtGui.QGraphicsScene):
def __init__(self, data_model):
super(NodeScene, self).__init__()
self.nodes = [NodeBox(n, self) for i, n in data_model.all_nodes()]
def noodletPressed(self, i, s):
pass
#print("{0}-{1} pressed".format(i, s))
def noodletReleased(self, i, s):
pass
#print("{0}-{1} released".format(i, s))
class NoodlesWindow(QtGui.QMainWindow):
def __init__(self, data_model):
super(NoodlesWindow, self).__init__()
self.data_model = data_model
self.initUI()
def initUI(self):
style = str(open("static/qt-style.css", "r").read())
self.nodeScene = NodeScene(self.data_model)
self.nodeView = NodeView(self.nodeScene)
self.nodeView.setStyleSheet(style)
#self.sourceView = SourceView()
self.tabWidget = QtGui.QTabWidget()
self.tabWidget.addTab(self.nodeView, "Graph view")
#self.tabWidget.addTab(self.sourceView, "Source view")
self.setCentralWidget(self.tabWidget)
self.setGeometry(300, 300, 1024, 600)
self.setWindowTitle('Noodles')
self.setWindowIcon(QtGui.QIcon('static/noodles-icon.png'))
self.statusBar().showMessage('Ready')
exitAction = QtGui.QAction(QtGui.QIcon.fromTheme('application-exit'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
self.toolbar = self.addToolBar('Exit')
self.toolbar.addAction(exitAction)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
self.nodeRepository = QtGui.QToolBox()
self.flowNodeList = QtGui.QListWidget()
self.compositeNodeList = QtGui.QListWidget()
self.libraryNodeList = QtGui.QListWidget()
self.nodeRepository.addItem(self.flowNodeList, "flow control")
self.nodeRepository.addItem(self.libraryNodeList, "library nodes")
self.nodeRepository.addItem(self.compositeNodeList, "composite nodes")
dockWidget = QtGui.QDockWidget("Noodles node repository")
dockWidget.setWidget(self.nodeRepository)
self.addDockWidget(Qt.RightDockWidgetArea, dockWidget)
self.show()
def closeEvent(self, event):
pass
# reply = QtGui.QMessageBox.question(self, 'Message',
# "Are you sure to quit?", QtGui.QMessageBox.Yes |
# QtGui.QMessageBox.No, QtGui.QMessageBox.No)
# if reply == QtGui.QMessageBox.Yes:
# event.accept()
# else:
# event.ignore()
#self.sourceView.backend.stop()
def main(model):
app = QtGui.QApplication(sys.argv)
# Qode.backend.CodeCompletionWorker.providers.append(
# backend.DocumentWordsProvider())
# Qode.backend.serve_forever()
win = NoodlesWindow(model)
sys.exit(app.exec_())
```
#### File: qnoodles/qnoodles/sourceview.py
```python
import os
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
os.environ['QT_API'] = 'pyside'
from pyqode.core import api as QodeApi
from pyqode.core import modes as QodeModes
from pyqode.core import panels as QodePanels
from pyqode.core import backend as QodeBackend
class SourceView(QodeApi.CodeEdit):
def __init__(self):
super(SourceView, self).__init__()
self.backend.start(QodeBackend.server.__file__)
self.modes.append(QodeModes.CodeCompletionMode())
self.modes.append(QodeModes.PygmentsSyntaxHighlighter(self.document()))
self.modes.append(QodeModes.CaretLineHighlighterMode())
self.panels.append(QodePanels.LineNumberPanel())
margin = self.modes.append(QodeModes.RightMarginMode())
margin.position = 80
margin.color = Qt.GlobalColor.gray
self.font_name = "Inconsolata"
self.font_size = 12
self.file.open(__file__)
``` |
{
"source": "jhietanen/stat-canard",
"score": 3
} |
#### File: stat-canard/tests/test_queries.py
```python
import unittest
import requests
import json
from src import settings
class QueryTestCase(unittest.TestCase):
def setUp(self):
self.queries = settings.DATA_QUERIES
def test_table_status_code(self):
for i in self.queries:
response = requests.get(self.queries[i]['URL'])
self.assertEqual(200, response.status_code)
def test_query_status_code(self):
for i in self.queries:
with open(self.queries[i]['JSON_QUERY']) as file:
json_query = json.load(file)
response = requests.post(self.queries[i]['URL'], json=json_query)
self.assertEqual(200, response.status_code)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Jhiggin/DPS_Python_ETL",
"score": 3
} |
#### File: DPS_Python_ETL/Classes/transforms.py
```python
import pandas as pd
class transforms:
def __init__(self, dataset, param0 = '', param1 = '', param2 = ''):
self.dataset = dataset
self.param0 = param0
self.param1 = param1
self.param2 = param2
def split_columns(self):
df = self.dataset[self.param0].str.split(self.param1, expand=True)
return df
def transform_state(self):
df = pd.merge(self.dataset, self.param0, on=self.param1, how='inner')
return df
``` |
{
"source": "Jhiggin/Python_CICD",
"score": 4
} |
#### File: Python_CICD/functions/transformations.py
```python
from datetime import date
def create_fullName(firstname, lastname, middlename = ""):
if (middlename != ""):
full_name = firstname + " " + middlename + " " + lastname
else:
full_name = firstname + " " + lastname
return full_name
def calculate_age(DateOfBirth):
if (DateOfBirth != ""):
today = date.today()
age = today.year - DateOfBirth.year - ((today.month, today.day) < (DateOfBirth.month, DateOfBirth.day))
return age
``` |
{
"source": "jhiggins-NZ/genielibs",
"score": 2
} |
#### File: iosxr/platform/execute.py
```python
import logging
# Genie
from genie.utils.timeout import Timeout
from genie.harness.utils import connect_device
from genie.metaparser.util.exceptions import SchemaEmptyParserError
# Logger
log = logging.getLogger(__name__)
def execute_install_pie(device, image_dir, image, server=None,
prompt_level="none", synchronous=True, install_timeout=600, _install=True):
''' Installs and activates given IOSXR pie on device
Args:
device (`obj`): Device object
image_dir (`str`): Directory where pie file is located in
image (`str`): Pie file name
server(`str`): Hostname or IP address of server to use for install command
Default None (Optional - uses testbed YAML reverse lookup for protocol server)
prompt_level(`str`): Prompt-level argument for install command
Default 'none' (Optional)
synchronous (`bool`): Synchronous option for install command
Default True (Optional)
install_timeout (`int`): Maximum time required for install command to complete
Default 600 seconds (Optional)
_install (`bool`): True to install, False to uninstall.
Not meant to be changed manually.
Raises:
Exception
'''
# Verify prompt_level type is correct
assert prompt_level in ['none', 'all']
# Get protocol and address from testbed YAML
protocol = 'tftp'
if not server:
if not hasattr(device.testbed, 'servers'):
raise Exception("Server not provided and testbed YAML is missing "
"servers block section")
else:
if not device.testbed.servers.get(protocol, {}).get('address', {}):
raise Exception("Unable to find valid {} server within testbed "
"YAML servers block".format(protocol))
server = device.testbed.servers.get(protocol, {}).address
# Build 'install' command
if _install:
cmd = "install add source {protocol}://{server}/{image_dir} {image} activate".\
format(protocol=protocol, server=server, image_dir=image_dir,
image=image)
else:
cmd = "install deactivate {image}".format(image=image)
if prompt_level:
cmd += " prompt-level {}".format(prompt_level)
if synchronous:
cmd += " synchronous"
elif not synchronous:
cmd += " asynchronous"
# Execute command
try:
device.admin_execute(cmd, timeout=install_timeout)
except Exception as e:
log.error(str(e))
raise Exception("Error while executing install command for pie {} "
"on device {}".format(image, device.name))
if _install:
log.info("Installed and activated pie {} on device {}".\
format(image, device.name))
else:
log.info("Deactivated pie {} on device {}".format(image, device.name))
def execute_deactivate_pie(device, image, server=None, prompt_level="none",
synchronous=True, install_timeout=600):
''' De-activates given IOSXR pie on device
Args:
device (`obj`): Device object
image (`str`): Pie file name
server(`str`): Hostname or IP address of server to use for install command
Default None (Optional - uses testbed YAML reverse lookup for protocol server)
prompt_level(`str`): Prompt-level argument for install command
Default 'none' (Optional)
synchronous (`bool`): Synchronous option for install command
Default True (Optional)
install_timeout (`int`): Maximum time required for install command to complete
Default 600 seconds (Optional)
Raises:
Exception
'''
execute_install_pie(device, None, image, server, prompt_level,
synchronous, install_timeout, _install=False)
def execute_remove_inactive_pies(device, remove_timeout=300):
''' Removes given IOSXR pie on device
Args:
device (`obj`): Device object
remove_timeout (`str`): Maximum time to execute command
Default 300 seconds (Optional)
Raises:
Exception
'''
log.info("Removing inactive pies on device {}".format(device.name))
# Execute command to remove pie if uninstall specified
try:
device.admin_execute("install remove inactive", timeout=remove_timeout)
except Exception as e:
log.error(str(e))
raise Exception("Error while removing inactive pies on device {}".\
format(device.name))
else:
log.info("Successfully removed inactive pies on device {}".format(device.name))
```
#### File: iosxr/platform/verify.py
```python
import logging
# Genie
from genie.utils.timeout import Timeout
# Logger
log = logging.getLogger(__name__)
def verify_module_serial_num(device, module, expected_serial_num,
max_time=60, check_interval=20):
''' Verify module serial number is matched with expected number
Args:
device (`obj`): Device object
module (`str`): Module name
expected_serial_num (`str`): Expected serial number
max_time (`int`): Max time
check_interval (`int`): Check interval
Returns:
result (`bool`): Verified result
'''
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
try:
sn = device.api.get_module_info(module, key='sn')
except Exception as e:
log.error(e)
timeout.sleep()
continue
log.info("Module {} serial number is {}, expected value is {}"
.format(module, sn, expected_serial_num))
if sn == expected_serial_num:
return True
timeout.sleep()
return False
def verify_installed_pies(device, installed_packages, max_time=300,
check_interval=60):
''' Verify module serial number is matched with expected number
Args:
device (`obj`): Device object
installed_packages (`list`): List of packages to verify that exist
max_time (`int`): Maximum time to wait while checking for pies installed
Default 300 seconds (Optional)
check_interval (`int`): Time interval while checking for pies installed
Default 30 seconds (Optional)
Returns:
result (`bool`): Verified result
'''
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
active_packages = device.api.get_current_active_pies()
# Trim out active_packages
if active_packages:
active_packages = [item.split(":")[1] for item in active_packages]
if set(installed_packages).intersection(active_packages):
log.info("Installed packages {} present under 'Active Packages'".\
format(installed_packages))
return True
log.warning("Installed packages {} *not* present in 'Active Packages'"
"\nRe-checking after {} seconds...".\
format(installed_packages, check_interval))
timeout.sleep()
return False
```
#### File: junos/interface/verify.py
```python
import re
import logging
# Genie
from genie.utils.timeout import Timeout
from genie.metaparser.util.exceptions import SchemaEmptyParserError
log = logging.getLogger(__name__)
def verify_interfaces_terse_state(device, interface, expected_admin_state=None, expected_link_state=None, expected_oper_status=None, max_time=30, check_interval=10, expected_result=True):
""" Verify interfaces terse
Args:
device (`obj`): Device object
interface (`str`): Interface name
expected_admin_state (`str`): Expected admin state for interface
ex.) expected_admin_state = 'up'
expected_link_state (`str`): Expected link state for interface
ex.) expected_link_state = 'down'
expected_oper_status (`str`): Expected oper state for interface
ex.) expected_oper_status = 'up'
Returns:
result (`bool`): Verified result
Raises:
N/A
"""
timeout = Timeout(max_time, check_interval)
interface_terse_out = None
result = True
while timeout.iterate():
try:
if interface:
interface_terse_out = device.parse('show interfaces {interface} terse'.format(
interface=interface))
else:
interface_terse_out = device.parse('show interfaces terse')
result = True
except SchemaEmptyParserError:
log.info('Failed to parse. Device output might contain nothing.')
if not expected_result:
return False
result = False
timeout.sleep()
continue
for intf, intf_dict in interface_terse_out.items():
admin_state = intf_dict.get('admin_state', None)
link_state = intf_dict.get('link_state', None)
oper_status = intf_dict.get('oper_status', None)
enabled = intf_dict.get('enabled', None)
if expected_admin_state and admin_state != expected_admin_state:
result = False
if expected_link_state and link_state != expected_link_state:
result = False
if expected_oper_status and oper_status != expected_oper_status:
result = False
if result == expected_result:
return expected_result
timeout.sleep()
return False
```
#### File: triggers/blitz/yangexec.py
```python
import logging
from time import sleep
from copy import deepcopy
from pyats.log.utils import banner
from .rpcbuilder import YSNetconfRPCBuilder
from .rpcverify import RpcVerify
log = logging.getLogger(__name__)
try:
from ncclient.operations import RaiseMode
except Exception:
log.error('Make sure you have ncclient installed in your virtual env')
lock_retry_errors = ['lock-denied', 'resource-denied', 'in-use']
def try_lock(uut, target, timer=30, sleeptime=1):
"""Tries to lock the datastore to perform edit-config operation.
Attempts to acquire the lock on the datastore. If exception thrown,
retries the lock on the datastore till the specified timer expires.
Helper function to :func:`lock_datastore`.
Args:
session (NetconfSession): active session
target (str): Datastore to be locked
timer: lock retry counter.
sleeptime: sleep timer.
Returns:
bool: True if datastore was successfully locked, else False.
"""
for counter in range(1, timer+1):
ret = uut.lock(target=target)
if ret.ok:
return True
retry = False
if ret.error.tag in lock_retry_errors:
retry = True
if not retry:
log.error(banner('ERROR - CANNOT ACQUIRE LOCK - {0}'.format(
ret.error.tag)))
break
elif counter < timer:
log.info("RETRYING LOCK - {0}".format(counter))
sleep(sleeptime)
else:
log.error(
banner('ERROR - LOCKING FAILED. RETRY TIMER EXCEEDED!!!')
)
return False
def netconf_send(uut, rpcs, ds_state, lock=True, lock_retry=40, timeout=30):
"""Handle NETCONF messaging with exceptions caught by pyATS."""
# TODO: handle edit-data and get-data
if not uut.connected:
uut.connect()
result = []
target_locked = False
running_locked = False
for nc_op, kwargs in rpcs:
try:
ret = ''
if nc_op == 'edit-config':
# default to running datastore
target_state = ds_state.get(
kwargs.get('target', 'running'),
[]
)
if lock and 'lock_ok' in target_state:
target_locked = try_lock(
uut, kwargs['target'],
timer=lock_retry
)
ret = uut.edit_config(**kwargs)
if ret.ok and 'commit' in target_state:
if target_locked and 'lock_running' in target_state:
running_locked = try_lock(
uut, 'running', timer=lock_retry
)
ret = uut.commit()
if not ret.ok and ret.error.tag in lock_retry_errors:
# writable-running not advertized but running is locked
running_locked = try_lock(
uut, 'running', timer=lock_retry
)
ret = uut.commit()
if running_locked:
uut.unlock(target='running')
running_locked = False
if running_locked:
uut.unlock(target='running')
running_locked = False
if target_locked:
uut.unlock(target=kwargs['target'])
target_locked = False
elif nc_op == 'commit':
ret = uut.commit()
elif nc_op == 'get-config':
ret = uut.get_config(**kwargs)
elif nc_op == 'get':
ret = uut.get(**kwargs)
elif nc_op == 'rpc':
target = 'running'
rpc = kwargs.get('rpc')
if 'edit-config' in rpc and lock:
if 'candidate/>' in rpc:
target = 'candidate'
target_locked = try_lock(uut, target, timer=lock_retry)
# raw return
reply = uut.request(rpc)
if target_locked:
uut.unlock(target)
target_locked = False
result.append((nc_op, reply))
continue
if ret.ok:
result.append((nc_op, str(ret)))
else:
log.error("NETCONF Reply with error(s):")
for rpcerror in ret.errors:
if rpcerror.message:
log.error("ERROR MESSAGE - {0}".format(
rpcerror.message))
if hasattr(ret, 'xml') and ret.xml is not None:
result.append((nc_op, ret.xml))
except Exception as exe:
msg = str(exe)
e = ''
if target_locked:
try:
uut.unlock(target=kwargs['target'])
except Exception as e:
msg += '\n' + str(e)
target_locked = False
if running_locked:
try:
uut.unlock(target='running')
except Exception as e:
msg += '\n' + str(e)
running_locked = False
result.append(('traceback', msg))
continue
return result
def gen_ncclient_rpc(rpc_data, prefix_type="minimal"):
"""Construct the XML Element(s) needed for the given config dict.
Helper function to :func:`gen_rpc_api`.
Creates lxml Element instances specific to what :mod:`ncclient` is looking
for per netconf protocol operation.
.. note::
Unlike :func:`gen_raw_rpc`, the XML generated here will NOT be declared
to the netconf 1.0 namespace but instead any NETCONF XML elements
will be left un-namespaced.
This is so that :mod:`ncclient` can select the appropriate
namespace (1.0, 1.1, etc.) as needed for the session.
Args:
cfgd (dict): Relevant keys - 'proto-op', 'dsstore', 'modules'.
prefix_namespaces (str): One of "always" (prefer namespace prefixes) or
"minimal" (prefer unprefixed namespaces)
Returns:
list: of lists [protocol operation, kwargs], or None
Raises:
ysnetconf.RpcInputError: if cfgd is invalid;
see :meth:`YSNetconfRPCBuilder.get_payload`.
"""
if not rpc_data:
log.warning("No configuration sent for RPC generation")
return None
datastore = rpc_data.get('datastore')
prt_op = rpc_data['operation']
with_defaults = rpc_data.get('with-defaults', '')
# Add prefixes for all NETCONF containers
rpcbuilder = YSNetconfRPCBuilder(prefix_namespaces="always")
container = None
if prt_op == 'edit-config':
container = rpcbuilder.netconf_element('config')
elif prt_op == 'get-config':
container = rpcbuilder.netconf_element('filter')
elif prt_op == 'get':
container = rpcbuilder.netconf_element('filter')
elif prt_op == 'action':
container = rpcbuilder.yang_element('action')
elif prt_op == 'get-data':
container = rpcbuilder.get_data('get-data')
elif prt_op == 'edit-data':
container = rpcbuilder.edit_data('edit-data')
else:
container = rpcbuilder.netconf_element('TEMPORARY')
# Now create the builder for the payload
rpcbuilder = YSNetconfRPCBuilder(
prefix_namespaces=prefix_type,
nsmap=rpc_data.get('namespace', {}),
netconf_ns=None
)
# XML so all the values must be string or bytes type
nodes = []
for node in rpc_data.get('nodes', []):
if 'value' in node:
node['value'] = str(node.get('value', ''))
nodes.append(node)
rpcbuilder.get_payload(nodes, container)
kwargs = {}
if prt_op == "rpc":
# The outer container is temporary - the child element(s) created
# should be the actual raw RPC(s), which is what we want to return
return [[prt_op, {'rpc_command': elem}] for elem in container]
if prt_op == 'edit-config':
kwargs['target'] = datastore
if len(container):
kwargs['config'] = container
elif prt_op == 'get-config':
kwargs['source'] = datastore
if len(container):
kwargs['filter'] = container
if with_defaults:
kwargs['with_defaults'] = with_defaults
elif prt_op == 'get':
if len(container):
kwargs['filter'] = container
if with_defaults:
kwargs['with_defaults'] = with_defaults
elif prt_op in ['get-data', 'edit-data', 'action']:
kwargs['rpc_command'] = container
return prt_op, kwargs
def get_datastore_state(target, device):
"""Apply datastore rules according to device and desired datastore.
- If no target is passed in and device has candidate, choose candidate.
- If candidate is chosen, allow commit.
- If candidate is chosen and writable-running exists, allow lock on running
prior to commit.
- If running, allow lock, no commit.
- If startup, allow lock, no commit.
- If intent, no lock, no commit.
- If operational, no lock, no commit.
- Default: running
Args:
target (str): Target datastore for YANG interaction.
device (rpcverify.RpcVerify): Class containing runtime capabilities.
Returns:
(tuple): Target datastore (str): assigned according to capabilities
Datastore state (dict):
commit - can apply a commit to datastore
lock_ok - can apply a lock to datastore
lock_running - apply lock to running datastore prior to commit
"""
target_state = {}
for store in device.datastore:
if store == 'candidate':
if not target:
target = 'candidate'
target_state['candidate'] = ['commit', 'lock_ok']
if 'running' in target_state:
target_state['candidate'].append('lock_running')
continue
if store == 'running':
if 'candidate' in target_state:
target_state['candidate'].append('lock_running')
target_state['running'] = ['lock_ok']
continue
if store == 'startup':
target_state['startup'] = ['lock_ok']
continue
if store == 'intent':
# read only
target_state['intent'] = []
continue
if store == 'operational':
# read only
target_state['operational'] = []
continue
if not target:
target = 'running'
return target, target_state
def in_capabilities(caps, returns={}):
"""Find capabilities in expected returns."""
result = True
if returns:
includes = returns.get('includes', [])
excludes = returns.get('excludes', [])
if includes and isinstance(includes, (bytes, str)):
includes = [includes]
if excludes and isinstance(excludes, (bytes, str)):
excludes = [excludes]
include_caps = _included_excluded(caps, includes)
exclude_caps = _included_excluded(caps, excludes)
if not include_caps or excludes and exclude_caps:
result = False
return result
def _included_excluded(caps, returns=[]):
result = True
for item in returns:
if item not in caps:
if isinstance(caps, list):
log.warning("{0} not in capabilities".format(
item
))
result = False
continue
log.warning("{0} not in capabilities {1}".format(
item, caps.keys()
))
result = False
elif isinstance(caps, list):
log.info("{0} in capabilities".format(
item
))
continue
elif isinstance(returns[item], (bytes, str)):
if returns[item] != caps[item]:
log.warning("{0} != {1} in capabilities".format(
item, returns[item]
))
result = False
else:
log.info("{0} == {1} in capabilities".format(
item, returns[item]
))
elif isinstance(returns[item], list):
for value in returns[item]:
if value in caps[item]:
log.info("{0}: {1} in capabilities".format(
item, value
))
else:
log.warning("{0}: {1} not in capabilities".format(
item, value
))
result = False
return result
def run_netconf(operation, device, steps, datastore, rpc_data, returns, **kwargs):
"""Form NETCONF message and send to testbed."""
log.debug('NETCONF MESSAGE')
try:
device.raise_mode = RaiseMode.NONE
except NameError:
log.error('Make sure you have ncclient installed in your virtual env')
return
if operation == 'capabilities':
if not returns:
log.error(banner('No NETCONF data to compare capability.'))
return False
return in_capabilities(
list(device.server_capabilities),
returns
)
rpc_verify = RpcVerify(
log=log,
capabilities=list(device.server_capabilities)
)
if not rpc_data:
log.error('NETCONF message data not present')
return False
if not datastore:
log.warning('"datastore" variables not set so choosing:\n'
'datastore:\n type: running\n lock: True\n retry: 10\n')
datastore = {}
ds = datastore.get('type', '')
lock = datastore.get('lock', True)
retry = datastore.get('retry', 10)
actual_ds, ds_state = get_datastore_state(ds, rpc_verify)
if not ds:
log.info('USING DEVICE DATASTORE: {0}'.format(actual_ds))
ds = actual_ds
else:
log.info('USING TEST DATASTORE: {0}'.format(ds))
rpc_data['datastore'] = ds
rpc_data['operation'] = operation
if operation == 'rpc':
# Custom RPC represented in raw string form
result = netconf_send(
device,
[('rpc', {'rpc': rpc_data['rpc']})],
ds_state,
lock=lock,
lock_retry=retry
)
else:
prt_op, kwargs = gen_ncclient_rpc(rpc_data)
result = netconf_send(
device,
[(prt_op, kwargs)],
ds_state,
lock=lock,
lock_retry=retry
)
# rpc-reply should show up in NETCONF log
if not result:
log.error(banner('NETCONF rpc-reply NOT RECIEVED'))
return False
errors = []
for op, res in result:
if '<rpc-error>' in res:
errors.append(res)
elif op == 'traceback':
errors.append(res)
if errors:
log.error(
banner('NETCONF MESSAGE ERRORED\n{0}'.format('\n'.join(errors)))
)
return False
if rpc_data['operation'] == 'edit-config':
# Verify the get-config TODO: what do we do with custom rpc's?
rpc_clone = deepcopy(rpc_data)
rpc_clone['operation'] = 'get-config'
rpc_clone['datastore'] = 'running'
for node in rpc_clone.get('nodes'):
node.pop('value', '')
node.pop('edit-op', '')
prt_op, kwargs = gen_ncclient_rpc(rpc_clone)
resp_xml = netconf_send(
device,
[(prt_op, kwargs)],
ds_state,
lock=False
)
resp_elements = rpc_verify.process_rpc_reply(resp_xml)
return rpc_verify.verify_rpc_data_reply(resp_elements, rpc_data)
elif rpc_data['operation'] in ['get', 'get-config']:
if not returns:
log.error(banner('No NETCONF data to compare rpc-reply to.'))
return False
# should be just one result
if len(result) >= 1:
op, resp_xml = result[0]
resp_elements = rpc_verify.process_rpc_reply(resp_xml)
return rpc_verify.process_operational_state(
resp_elements, returns
)
else:
log.error(banner('NO XML RESPONSE'))
return False
elif rpc_data['operation'] == 'edit-data':
# TODO: get-data return may not be relevent depending on datastore
log.debug('Use "get-data" yang action to verify this "edit-data".')
elif rpc_data['operation'] == 'subscribe':
log.info(banner('Subscribed to {0}'.format('TODO: device name')))
return True
def run_gnmi(operation, device, steps,
datastore, rpc_data, returns, **kwargs):
"""Form gNMI message and send to testbed."""
log.debug('gNMI MESSAGE')
result = True
rpc_verify = RpcVerify(log=log, capabilities=[])
if operation == 'edit-config':
result = device.set(rpc_data)
elif operation == 'get':
if not returns:
log.error(banner('No gNMI data to compare to GET'))
return False
response = device.get(rpc_data)
if not response:
return False
for resp in response:
update = resp.get('update')
if not update:
result = False
continue
if not rpc_verify.process_operational_state(update, returns):
result = False
return result
elif operation == 'get-config':
response = device.get_config(rpc_data)
deletes = False
updates = False
result = True
for resp in response:
if 'update' in resp:
updates = True
if not rpc_verify.process_operational_state(
resp['update'], returns):
result = False
if 'delete' in resp:
deletes = True
if not updates and deletes:
log.info('All configs were deleted')
return True
return result
elif operation == 'subscribe':
format = kwargs.get('format', {})
rpc_data['format'] = format
if format.get('request_mode', 'STREAM') == 'ONCE':
response = device.subscribe(rpc_data)
else:
rpc_data['returns'] = returns
rpc_data['verifier'] = rpc_verify.process_operational_state
return device.subscribe(rpc_data)
elif operation == 'capabilities':
if not returns:
log.error(banner('No gNMI data to compare to GET'))
return False
resp = device.capabilities()
result = in_capabilities(resp, returns)
else:
log.warning(banner('OPERATION: {0} not allowed'.format(operation)))
return result
def notify_wait(steps, device):
if hasattr(device, 'notify_wait'):
return device.notify_wait(steps)
``` |
{
"source": "jhihruei/sqlakeyset",
"score": 2
} |
#### File: sqlakeyset/sqlakeyset/columns.py
```python
from __future__ import (absolute_import, division, print_function, unicode_literals)
import sys
from copy import copy
from sqlalchemy import asc, column
from sqlalchemy.sql.expression import UnaryExpression
from sqlalchemy.sql.operators import asc_op, desc_op
PY2 = sys.version_info.major <= 2
if not PY2:
unicode = str
def parse_clause(clause):
return [OC(c) for c in clause]
class OC(object):
def __init__(self, x):
if isinstance(x, unicode):
x = column(x)
if not isinstance(x, UnaryExpression):
x = asc(x)
self.uo = x
self.full_name = str(self.element)
try:
table_name, name = self.full_name.split('.', 1)
except ValueError:
table_name = None
name = self.full_name
self.table_name = table_name
self.name = name
@property
def quoted_full_name(self):
return str(self).split()[0]
@property
def element(self):
x = self.uo
while isinstance(x, UnaryExpression):
x = x.element
return x
@property
def is_ascending(self):
x = self.uo
while isinstance(x, UnaryExpression):
if x.modifier in (asc_op, desc_op):
return x.modifier == asc_op
else:
x = x.element
raise ValueError # pragma: no cover
@property
def reversed(self):
x = copied = copy(self.uo)
while isinstance(x, UnaryExpression):
if x.modifier in (asc_op, desc_op):
if x.modifier == asc_op:
x.modifier = desc_op
else:
x.modifier = asc_op
return OC(copied)
else:
x = x.element
raise ValueError # pragma: no cover
def __str__(self):
return str(self.uo)
def __repr__(self):
return '<OC: {}>'.format(str(self))
```
#### File: sqlakeyset/serial/serial.py
```python
from __future__ import unicode_literals
import decimal
import datetime
import base64
import uuid
import dateutil.parser
from .compat import csvreader, csvwriter, sio, text_type, binary_type
NONE = 'x'
TRUE = 'true'
FALSE = 'false'
STRING = 's'
BINARY = 'b'
INTEGER = 'i'
FLOAT = 'f'
DECIMAL = 'n'
DATE = 'd'
DATETIME = 'dt'
TIME = 't'
UUID = 'uuid'
class Serial(object):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
self.custom_serializations = {}
self.custom_unserializations = {}
def split(self, joined):
s = sio(joined)
r = csvreader(s, **self.kwargs)
row = next(r)
return row
def join(self, string_list):
s = sio()
w = csvwriter(s, **self.kwargs)
w.writerow(string_list)
return s.getvalue()
def serialize_values(self, values):
if values is None:
return ''
return self.join(self.serialize_value(_) for _ in values)
def unserialize_values(self, s):
if s == '':
return None
return [self.unserialize_value(_) for _ in self.split(s)]
def serialize_value(self, x):
if x is None:
return NONE
elif x is True:
return TRUE
elif x is False:
return FALSE
t = type(x)
if t in self.custom_serializations:
c, x = self.custom_serializations[t](x)
elif t == text_type:
c = STRING
elif t == binary_type:
c = BINARY
x = base64.b64encode(x).decode('utf-8')
elif t == int:
c = INTEGER
elif t == float:
c = FLOAT
elif t == decimal.Decimal:
c = DECIMAL
elif t == datetime.date:
c = DATE
elif t == datetime.datetime:
c = DATETIME
elif t == datetime.time:
c = TIME
elif t == uuid.UUID:
c = UUID
else:
raise NotImplementedError(
"don't know how to serialize type of {} ({})".format(x, type(x)))
return '{}:{}'.format(c, x)
def unserialize_value(self, x):
try:
c, v = x.split(':', 1)
except ValueError:
c = x
v = None
if c in self.custom_unserializations:
return self.custom_unserializations[c](v)
elif c == NONE:
return None
elif c == TRUE:
return True
elif c == FALSE:
return False
elif c == STRING:
pass
elif c == BINARY:
v = base64.b64decode(v.encode('utf-8'))
elif c == INTEGER:
v = int(v)
elif c == FLOAT:
v = float(v)
elif c == DECIMAL:
v = decimal.Decimal(v)
elif c == DATE:
v = dateutil.parser.parse(v)
v = v.date()
elif c == DATETIME:
v = dateutil.parser.parse(v)
elif c == UUID:
v = uuid.UUID(v)
else:
raise ValueError('unrecognized value {}'.format(x))
return v
``` |
{
"source": "jhihwei/eshop",
"score": 2
} |
#### File: eshop/payment/ecpay_payment.py
```python
import hashlib
from urllib import parse
import collections
from dotenv import load_dotenv
import os
from datetime import datetime
import importlib.util
spec = importlib.util.spec_from_file_location(
"ecpay_payment_sdk",
"payment/ecpay_payment_sdk.py"
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
load_dotenv()
params = {}
if os.getenv('web_type') == 'offical':
# 正式環境
params = {
'MerchantID': 'ID隱藏',
'HashKey': 'Key 隱藏',
'HashIV': 'IV 隱藏',
'action_url':
'https://payment.ecpay.com.tw/Cashier/AioCheckOut/V5'
}
else:
# 測試環境
params = {
'MerchantID': '2000132',
'HashKey': '<KEY>',
'HashIV': 'v77hoKGq4kWxNNIS',
'action_url': 'https://payment-stage.ecpay.com.tw/Cashier/AioCheckOut/V5'
}
def get_mac_value(cls, get_request_form):
params = dict(get_request_form)
if params.get('CheckMacValue'):
params.pop('CheckMacValue')
ordered_params = collections.OrderedDict(
sorted(params.items(), key=lambda k: k[0].lower()))
HahKy = cls().params['HashKey']
HashIV = cls().params['HashIV']
encoding_lst = []
encoding_lst.append('HashKey=%s&' % HahKy)
encoding_lst.append(''.join([
'{}={}&'.format(key, value)
for key, value in ordered_params.items()
]))
encoding_lst.append('HashIV=%s' % HashIV)
safe_characters = '-_.!*()'
encoding_str = ''.join(encoding_lst)
encoding_str = parse.quote_plus(str(encoding_str),
safe=safe_characters).lower()
check_mac_value = ''
check_mac_value = hashlib.sha256(
encoding_str.encode('utf-8')).hexdigest().upper()
return check_mac_value
def process(order_params, inv_params):
# 建立實體
ecpay_payment_sdk = module.ECPayPaymentSdk(
MerchantID='2000132',
HashKey='<KEY>',
HashIV='v77hoKGq4kWxNNIS'
)
# 合併延伸參數
# order_params.update(extend_params_1)
# order_params.update(extend_params_2)
# order_params.update(extend_params_3)
# order_params.update(extend_params_4)
# 合併發票參數
order_params.update(inv_params)
try:
# 產生綠界訂單所需參數
final_order_params = ecpay_payment_sdk.create_order(order_params)
# 產生 html 的 form 格式
action_url = 'https://payment-stage.ecpay.com.tw/Cashier/AioCheckOut/V5' # 測試環境
# action_url = 'https://payment.ecpay.com.tw/Cashier/AioCheckOut/V5' # 正式環境
html = ecpay_payment_sdk.gen_html_post_form(
action_url, final_order_params)
return html
except Exception as error:
print('An exception happened: ' + str(error))
```
#### File: eshop/payment/models.py
```python
from django.db import models
from orders.models import Order
class Ecpay_Payment(models.Model):
MerchantID = models.CharField(null=False, verbose_name='特店編號', max_length=30)
MerchantTradeNo = models.CharField(null=False, verbose_name='特店交易編號', max_length=30)
RtnCode = models.IntegerField(null=False, verbose_name='交易狀態')
TradeNo = models.CharField(null=False, verbose_name='綠界交易編號', max_length=30)
TradeAmt = models.IntegerField(null=False, verbose_name='交易金額')
PaymentDate = models.DateTimeField()
order = models.ForeignKey(Order, related_name='order', on_delete=models.CASCADE, default=0)
class Meta:
verbose_name = '綠界訂單'
verbose_name_plural = verbose_name
def __str__(self):
return self.id
```
#### File: eshop/payment/payment.py
```python
from .models import Ecpay_Payment
from .forms import Ecpay_Return_Form
def save_ecpay_return(form_data):
form = Ecpay_Return_Form(form_data)
if form.is_valid():
result = form.save()
else:
print(form.errors)
return form.errors
```
#### File: eshop/shop/tools.py
```python
from orders.models import OrderItem
def get_product_string(order_item:OrderItem):
products = [i.product.name+"#" for i in order_item]
return "".join(products)
```
#### File: eshop/shop/views.py
```python
from django.shortcuts import render, get_object_or_404
from cart.forms import CartAddProductForm
from .models import Category, Page_Content, Product
from vip.models import VIP_User
from django.views.decorators.csrf import csrf_exempt
def category_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
return render(request,
'shop_v2/category/list.html',
{'category': category,
'categories': categories,
'products': products})
@csrf_exempt
def product_list(request, category_slug='enzyme', vip_class='z', ):
products = Product.objects.filter(available=True)
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
if request.method == 'POST':
print(request.POST.get('line_id'))
vip_user = VIP_User.objects.filter(line_id = request.POST.get('line_id')).exists()
if vip_user:
for i, p in enumerate(products):
products[i].price = int(products[i].price) *0.2
return render(request, 'shop_v2/category/product/list.html',
{'products': products})
def vip(request):
return render(request, 'shop_v2/vip.html')
def vip_product_list(request, category_slug='enzyme', vip_class='a'):
products = Product.objects.filter(available=True)
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
for i, p in enumerate(products):
products[i].price = int(products[i].price) *0.2
return render(request, 'shop_v2/category/product/list.html',
{'products': products,
'discount': 0.3})
def product_detail(request, product_id, slug, vip_class='z'):
product = get_object_or_404(Product,
id=product_id,
slug=slug,
available=True)
if vip_class is not 'z':
product.price = int(product.price) *0.2
cart_product_form = CartAddProductForm()
return render(request,
'shop_v2/category/product/detail.html',
{'product': product,
'cart_product_form': cart_product_form})
def page_content(request, slug=1):
page_content = get_object_or_404(Page_Content, slug=slug)
return render(request,
'shop_v2/page_content.html',
{'page_content': page_content})
``` |
{
"source": "jhildreth/falcon-jwt-checker",
"score": 3
} |
#### File: falcon-jwt-checker/falcon_jwt_checker/falcon_jwt_checker.py
```python
from falcon import HTTPUnauthorized
import jwt
class JwtChecker:
"""A middleware for the Falcon web framework.
It will verify that a valid, signed jwt is present on requests to any
resource, except for the specified exempt routes and the specified
exempt methods.
"""
def __init__(self, secret='', algorithm='', exempt_routes=None,
exempt_methods=None, issuer=None, audience=None, leeway=0):
"""Set up the JwtChecker, including the expected secret,
algorithm, audience, and any exempted routes and exempted methods
for which a jwt shall not be required.
"""
self.secret = secret
self.algorithm = algorithm
self.exempt_routes = exempt_routes or []
self.exempt_methods = exempt_methods or []
self.issuer = issuer
self.audience = audience
self.leeway = leeway
algorithms = [
'HS256', 'HS384', 'HS512',
'ES256', 'ES384', 'ES512',
'RS256', 'RS384', 'RS512',
'PS256', 'PS384', 'PS512'
]
if self.algorithm not in algorithms:
raise RuntimeError('Unsupported algorithm')
def process_resource(self, req, resp, resource, params):
"""If this is not an exempt route or exempt method, verify that
a valid, signed jwt is present.
"""
if req.path in self.exempt_routes or req.method in self.exempt_methods:
return
token = req.headers.get('AUTHORIZATION', '').partition('Bearer ')[2]
try:
claims = jwt.decode(token,
key=self.secret,
issuer=self.issuer,
audience=self.audience,
leeway=self.leeway)
params['jwt_claims'] = {}
for claim in claims:
params['jwt_claims'][claim] = claims[claim]
except jwt.InvalidTokenError as err:
raise HTTPUnauthorized('Authentication Required',
'Please provide a valid auth token.',
None)
``` |
{
"source": "jhill1/thetis",
"score": 3
} |
#### File: examples/sediment_meander_2d/meander_example.py
```python
from thetis import *
# import bathymetry and mesh for meander
from meander_setup import *
# Note it is necessary to run meander_hydro first to get the hydrodynamics simulation
def update_forcings_bnd(t_new):
if t_new != t_old.dat.data[:]:
# update boundary condtions
if t_new*morfac <= 6000:
elev_constant.assign(gradient_elev*t_new*morfac + elev_init_const)
flux_constant.assign((gradient_flux*t_new*morfac) - 0.02)
else:
flux_constant.assign((gradient_flux2*(t_new*morfac-6000)) - 0.053)
elev_constant.assign(gradient_elev2*(t_new*morfac-18000) + elev_init_const)
t_old.assign(t_new)
t_old = Constant(0.0)
# define function spaces
DG_2d = FunctionSpace(mesh2d, 'DG', 1)
vector_dg = VectorFunctionSpace(mesh2d, 'DG', 1)
# choose directory to output results
outputdir = 'outputs'
morfac = 50
dt = 2
end_time = 5*3600
viscosity_hydro = Constant(5*10**(-2))
if os.getenv('THETIS_REGRESSION_TEST') is not None:
# the example is being run as a test
# run the spin-up by importing it
import meander_hydro # NOQA
end_time = 1800.
# initialise velocity and elevation
with DumbCheckpoint("hydrodynamics_meander/elevation", mode=FILE_READ) as chk:
elev = Function(DG_2d, name="elevation")
chk.load(elev)
chk.close()
with DumbCheckpoint('hydrodynamics_meander/velocity', mode=FILE_READ) as chk:
uv = Function(vector_dg, name="velocity")
chk.load(uv)
chk.close()
# set up solver
solver_obj = solver2d.FlowSolver2d(mesh2d, bathymetry_2d)
options = solver_obj.options
# this test case only uses bedload transport but using all slope effect corrections and secondary current
options.sediment_model_options.solve_suspended_sediment = False
options.sediment_model_options.use_bedload = True
options.sediment_model_options.solve_exner = True
options.sediment_model_options.use_angle_correction = True
options.sediment_model_options.use_slope_mag_correction = True
options.sediment_model_options.use_secondary_current = True
options.sediment_model_options.use_advective_velocity_correction = False
options.sediment_model_options.morphological_viscosity = Constant(1e-6)
options.sediment_model_options.average_sediment_size = Constant(10**(-3))
options.sediment_model_options.bed_reference_height = Constant(0.003)
options.sediment_model_options.morphological_acceleration_factor = Constant(morfac)
options.simulation_end_time = end_time/morfac
options.simulation_export_time = options.simulation_end_time/45
options.output_directory = outputdir
options.check_volume_conservation_2d = True
options.fields_to_export = ['uv_2d', 'elev_2d', 'bathymetry_2d']
# using nikuradse friction
options.nikuradse_bed_roughness = Constant(3*options.sediment_model_options.average_sediment_size)
# set horizontal viscosity parameter
options.horizontal_viscosity = Constant(viscosity_hydro)
# crank-nicholson used to integrate in time system of ODEs resulting from application of galerkin FEM
options.timestepper_type = 'CrankNicolson'
options.timestepper_options.implicitness_theta = 1.0
if not hasattr(options.timestepper_options, 'use_automatic_timestep'):
options.timestep = dt
left_bnd_id = 1
right_bnd_id = 2
# set boundary conditions
gradient_flux = (-0.053 + 0.02)/6000
gradient_flux2 = (-0.02+0.053)/(18000-6000)
gradient_elev = (10.04414-9.9955)/6000
gradient_elev2 = (9.9955-10.04414)/(18000-6000)
elev_init_const = (-max(bathymetry_2d.dat.data[:]) + 0.05436)
flux_constant = Constant(-0.02)
elev_constant = Constant(elev_init_const)
swe_bnd = {}
swe_bnd[3] = {'un': Constant(0.0)}
swe_bnd[left_bnd_id] = {'flux': flux_constant}
swe_bnd[right_bnd_id] = {'elev': elev_constant}
solver_obj.bnd_functions['shallow_water'] = swe_bnd
# set initial conditions
solver_obj.assign_initial_conditions(uv=uv, elev=elev)
# run model
solver_obj.iterate(update_forcings=update_forcings_bnd)
```
#### File: test/tracerEq/test_bcs_2d.py
```python
from thetis import *
import pytest
def fourier_series_solution(mesh, lx, diff_flux, **model_options):
r"""
Consider a diffusion problem with a inhomogeneous Neumann condition and zero initial condition:
.. math::
c_t = \nu c_{xx}, c_x(0, t) = D, c_x(l, t) = 0, c(x, 0) = 0
where :math:`D` is the diffusive flux boundary condition imposed, `diff_flux`.
In order to solve it analytically, we decompose it into two diffusion problems:
- a diffusion problem with homogeneous Neumann conditions and a nonzero initial condition,
.. math::
z_t = \nu z_{xx}, z_x(0, t) = 0, z_x(l, t) = 0, z(x, 0) = -I;
- and a diffusion problem with homogeneous Neumann conditions and a nonzero source term,
.. math::
w_t = \nu w_{xx} + S, w_x(0, t) = 0, w_x(l, t) = 0, w(x, 0) = 0.
Here :math:`I = I(x)` is set as :math:`I(x) = \alpha(x) D`, where
:math:`\alpha(x) = -\frac{(l - x)^2}{2l}` and :math:`l` is the horizontal length of the domain.
The source term :math:`S = S(x, t)` is given by :math:`S = I_t - \nu I_xx = -\nu\frac Dl`.
Solving the inhomogeneous problem amounts to summing the solutions of the homogeneous problems
and subtracting :math:`I`.
The exact solution takes the form of a Fourier series, which we truncate appropriately.
"""
x, y = SpatialCoordinate(mesh)
nu = model_options['horizontal_diffusivity']
time = model_options['simulation_end_time']
# Initial condition and source term for two homogeneous Neumann problems
P1 = get_functionspace(mesh, 'CG', 1)
ic = Function(P1).interpolate(diff_flux*0.5*(lx - x)*(lx - x)/lx)
source = Constant(-nu*diff_flux/lx, domain=mesh)
# The solution uses truncated Fourier expansions, meaning we need the following...
def phi(n):
return cos(n*pi*x/lx)
def ic_fourier_coeff(n):
return assemble(2/lx*ic*phi(n)*dx)
def source_fourier_coeff(n):
return assemble(2/lx*source*phi(n)*dx)
def source_term(n):
"""
A simple quadrature routine is used to approximate the time integral.
"""
I = 0
tau = 0
dt = 0.05
while tau < time - 0.5*dt:
I += exp(-nu*(n*pi/lx)**2*(t-tau))
tau += dt
I *= source_fourier_coeff(n)
return I*phi(n)
def ic_term(n):
return ic_fourier_coeff(n)*exp(-nu*(n*pi/lx)**2*time)*phi(n)
# Assemble truncated Fourier expansion
sol = Function(P1, name='Fourier expansion')
num_terms_source = 1 # Only one needed since source is constant
num_terms_ic = 100
expr = Constant(0.5*source_fourier_coeff(0)*time)
expr = expr + Constant(0.5*ic_fourier_coeff(0))
for k in range(1, num_terms_source):
expr = expr + source_term(k)
for k in range(1, num_terms_ic):
expr = expr + ic_term(k)
expr -= ic
sol.interpolate(-expr)
return sol
def run(refinement, **model_options):
# Domain
lx = 10
ly = 1
nx = 40*refinement
ny = 4
mesh2d = RectangleMesh(nx, ny, lx, ly)
depth = 40.0
# Time interval
dt = 0.1/refinement
t_end = 1.0
t_export = 0.1
model_options['simulation_end_time'] = t_end
# Bathymetry
P1_2d = get_functionspace(mesh2d, 'CG', 1)
bathy_2d = Function(P1_2d, name='Bathymetry')
bathy_2d.assign(depth)
# Diffusivity and diffusive flux BC to impose
nu = Constant(0.1)
diff_flux = 0.2
# Solver
solver_obj = solver2d.FlowSolver2d(mesh2d, bathy_2d)
options = solver_obj.options
options.output_directory = 'outputs'
options.no_exports = True
options.timestep = dt
options.simulation_export_time = t_export
options.solve_tracer = True
options.tracer_only = True
options.horizontal_diffusivity = nu
options.horizontal_diffusivity_scale = nu
options.horizontal_velocity_scale = Constant(0.0)
options.fields_to_export = ['tracer_2d']
options.update(model_options)
options.use_limiter_for_tracers = options.tracer_element_family == 'dg'
options.use_supg_tracer = options.tracer_element_family == 'cg'
options.simulation_end_time = t_end - 0.5*dt
# Boundary conditions
solver_obj.bnd_functions['tracer_2d'] = {1: {'diff_flux': diff_flux*nu}}
# NOTE: Zero diff_flux boundaries are enforced elsewhere by default
# Run model
solver_obj.assign_initial_conditions()
solver_obj.iterate()
sol = solver_obj.fields.tracer_2d
# Get truncated Fourier series solution
fsol = fourier_series_solution(mesh2d, lx, diff_flux, **model_options)
if not options.no_exports:
File('outputs/fourier_series_solution.pvd').write(fsol)
return errornorm(sol, fsol)
def run_convergence(**model_options):
errors = []
for refinement in (1, 2, 4):
errors.append(run(refinement, **model_options))
msg = "Wrong convergence rate {:.4f}, expected 2.0000."
slope = errors[0]/errors[1]
assert slope > 2, msg.format(slope)
slope = errors[1]/errors[2]
assert slope > 2, msg.format(slope)
@pytest.fixture(params=['dg', 'cg'])
def family(request):
return request.param
@pytest.fixture(params=[1])
def polynomial_degree(request):
return request.param
@pytest.fixture(params=['CrankNicolson', 'SSPRK33', 'ForwardEuler', 'BackwardEuler', 'DIRK22', 'DIRK33'])
def stepper(request):
return request.param
@pytest.mark.parametrize(('diffusivity'),
[(Constant(0.1))])
def test_horizontal_advection(polynomial_degree, stepper, diffusivity, family):
run_convergence(polynomial_degree=polynomial_degree,
timestepper_type=stepper,
horizontal_diffusivity=diffusivity,
tracer_element_family=family)
if __name__ == '__main__':
run_convergence(polynomial_degree=1,
timestepper_type='CrankNicolson',
horizontal_diffusivity=Constant(0.1),
tracer_element_family='cg',
no_exports=False)
```
#### File: thetis/thetis/tracer_eq.py
```python
r"""
3D advection diffusion equation for tracers.
The advection-diffusion equation of tracer :math:`T` in conservative form reads
.. math::
\frac{\partial T}{\partial t}
+ \nabla_h \cdot (\textbf{u} T)
+ \frac{\partial (w T)}{\partial z}
= \nabla_h \cdot (\mu_h \nabla_h T)
+ \frac{\partial}{\partial z} \Big(\mu \frac{\partial T}{\partial z}\Big)
:label: tracer_eq
where :math:`\nabla_h` denotes horizontal gradient, :math:`\textbf{u}` and
:math:`w` are the horizontal and vertical velocities, respectively, and
:math:`\mu_h` and :math:`\mu` denote horizontal and vertical diffusivity.
"""
from __future__ import absolute_import
from .utility import *
from .equation import Term, Equation
__all__ = [
'TracerEquation',
'TracerTerm',
'HorizontalAdvectionTerm',
'VerticalAdvectionTerm',
'HorizontalDiffusionTerm',
'VerticalDiffusionTerm',
'SourceTerm',
]
class TracerTerm(Term):
"""
Generic tracer term that provides commonly used members and mapping for
boundary functions.
"""
def __init__(self, function_space,
bathymetry=None, v_elem_size=None, h_elem_size=None,
use_symmetric_surf_bnd=True, use_lax_friedrichs=True,
sipg_factor=Constant(1.0),
sipg_factor_vertical=Constant(1.0)):
"""
:arg function_space: :class:`FunctionSpace` where the solution belongs
:kwarg bathymetry: bathymetry of the domain
:type bathymetry: 3D :class:`Function` or :class:`Constant`
:kwarg v_elem_size: scalar :class:`Function` that defines the vertical
element size
:kwarg h_elem_size: scalar :class:`Function` that defines the horizontal
element size
:kwarg bool use_symmetric_surf_bnd: If True, use symmetric surface boundary
condition in the horizontal advection term
:kwarg sipg_factor: :class: `Constant` or :class: `Function` horizontal SIPG penalty scaling factor
:kwarg sipg_factor_vertical: :class: `Constant` or :class: `Function` vertical SIPG penalty scaling factor
"""
super(TracerTerm, self).__init__(function_space)
self.bathymetry = bathymetry
self.h_elem_size = h_elem_size
self.v_elem_size = v_elem_size
continuity = element_continuity(self.function_space.ufl_element())
self.horizontal_dg = continuity.horizontal == 'dg'
self.vertical_dg = continuity.vertical == 'dg'
self.use_symmetric_surf_bnd = use_symmetric_surf_bnd
self.use_lax_friedrichs = use_lax_friedrichs
self.sipg_factor = sipg_factor
self.sipg_factor_vertical = sipg_factor_vertical
# define measures with a reasonable quadrature degree
p, q = self.function_space.ufl_element().degree()
self.quad_degree = (2*p + 1, 2*q + 1)
self.dx = dx(degree=self.quad_degree)
self.dS_h = dS_h(degree=self.quad_degree)
self.dS_v = dS_v(degree=self.quad_degree)
self.ds = ds(degree=self.quad_degree)
self.ds_surf = ds_surf(degree=self.quad_degree)
self.ds_bottom = ds_bottom(degree=self.quad_degree)
def get_bnd_functions(self, c_in, uv_in, elev_in, bnd_id, bnd_conditions):
"""
Returns external values of tracer and uv for all supported
boundary conditions.
Volume flux (flux) and normal velocity (un) are defined positive out of
the domain.
:arg c_in: Internal value of tracer
:arg uv_in: Internal value of horizontal velocity
:arg elev_in: Internal value of elevation
:arg bnd_id: boundary id
:type bnd_id: int
:arg bnd_conditions: dict of boundary conditions:
``{bnd_id: {field: value, ...}, ...}``
"""
funcs = bnd_conditions.get(bnd_id)
if 'elev' in funcs:
elev_ext = funcs['elev']
else:
elev_ext = elev_in
if 'value' in funcs:
c_ext = funcs['value']
else:
c_ext = c_in
if 'uv' in funcs:
uv_ext = funcs['uv']
elif 'flux' in funcs:
assert self.bathymetry is not None
h_ext = elev_ext + self.bathymetry
area = h_ext*self.boundary_len # NOTE using external data only
uv_ext = funcs['flux']/area*self.normal
elif 'un' in funcs:
uv_ext = funcs['un']*self.normal
else:
uv_ext = uv_in
return c_ext, uv_ext, elev_ext
class HorizontalAdvectionTerm(TracerTerm):
r"""
Horizontal advection term :math:`\nabla_h \cdot (\textbf{u} T)`
The weak formulation reads
.. math::
\int_\Omega \nabla_h \cdot (\textbf{u} T) \phi dx
= -\int_\Omega T\textbf{u} \cdot \nabla_h \phi dx
+ \int_{\mathcal{I}_h\cup\mathcal{I}_v}
T^{\text{up}} \text{avg}(\textbf{u}) \cdot
\text{jump}(\phi \textbf{n}_h) dS
where the right hand side has been integrated by parts;
:math:`\mathcal{I}_h,\mathcal{I}_v` denote the set of horizontal and
vertical facets,
:math:`\textbf{n}_h` is the horizontal projection of the unit normal vector,
:math:`T^{\text{up}}` is the upwind value, and :math:`\text{jump}` and
:math:`\text{avg}` denote the jump and average operators across the
interface.
"""
def residual(self, solution, solution_old, fields, fields_old, bnd_conditions=None):
if fields_old.get('uv_3d') is None:
return 0
elev = fields_old['elev_3d']
uv = fields_old['uv_3d']
uv_depth_av = fields_old['uv_depth_av']
if uv_depth_av is not None:
uv = uv + uv_depth_av
# FIXME is this an option?
lax_friedrichs_factor = fields_old.get('lax_friedrichs_tracer_scaling_factor')
f = 0
f += -solution*inner(uv, nabla_grad(self.test))*self.dx
if self.horizontal_dg:
# add interface term
uv_av = avg(uv)
un_av = (uv_av[0]*self.normal('-')[0]
+ uv_av[1]*self.normal('-')[1])
s = 0.5*(sign(un_av) + 1.0)
c_up = solution('-')*s + solution('+')*(1-s)
f += c_up*(uv_av[0]*jump(self.test, self.normal[0])
+ uv_av[1]*jump(self.test, self.normal[1])
+ uv_av[2]*jump(self.test, self.normal[2]))*(self.dS_v)
f += c_up*(uv_av[0]*jump(self.test, self.normal[0])
+ uv_av[1]*jump(self.test, self.normal[1])
+ uv_av[2]*jump(self.test, self.normal[2]))*(self.dS_h)
# Lax-Friedrichs stabilization
if self.use_lax_friedrichs:
gamma = 0.5*abs(un_av)*lax_friedrichs_factor
f += gamma*dot(jump(self.test), jump(solution))*(self.dS_v + self.dS_h)
if bnd_conditions is not None:
for bnd_marker in self.boundary_markers:
funcs = bnd_conditions.get(bnd_marker)
ds_bnd = ds_v(int(bnd_marker), degree=self.quad_degree)
if funcs is None:
continue
else:
c_in = solution
c_ext, uv_ext, eta_ext = self.get_bnd_functions(c_in, uv, elev, bnd_marker, bnd_conditions)
# add interior tracer flux
f += c_in*(uv[0]*self.normal[0]
+ uv[1]*self.normal[1])*self.test*ds_bnd
# add boundary contribution if inflow
uv_av = 0.5*(uv + uv_ext)
un_av = self.normal[0]*uv_av[0] + self.normal[1]*uv_av[1]
s = 0.5*(sign(un_av) + 1.0)
f += (1-s)*(c_ext - c_in)*un_av*self.test*ds_bnd
if self.use_symmetric_surf_bnd:
f += solution*(uv[0]*self.normal[0] + uv[1]*self.normal[1])*self.test*ds_surf
return -f
class VerticalAdvectionTerm(TracerTerm):
r"""
Vertical advection term :math:`\partial (w T)/(\partial z)`
The weak form reads
.. math::
\int_\Omega \frac{\partial (w T)}{\partial z} \phi dx
= - \int_\Omega T w \frac{\partial \phi}{\partial z} dx
+ \int_{\mathcal{I}_v} T^{\text{up}} \text{avg}(w) \text{jump}(\phi n_z) dS
where the right hand side has been integrated by parts;
:math:`\mathcal{I}_v` denotes the set of vertical facets,
:math:`n_z` is the vertical projection of the unit normal vector,
:math:`T^{\text{up}}` is the
upwind value, and :math:`\text{jump}` and :math:`\text{avg}` denote the
jump and average operators across the interface.
In the case of ALE moving mesh we substitute :math:`w` with :math:`w - w_m`,
:math:`w_m` being the mesh velocity.
"""
def residual(self, solution, solution_old, fields, fields_old, bnd_conditions=None):
w = fields_old.get('w')
if w is None:
return 0
w_mesh = fields_old.get('w_mesh')
lax_friedrichs_factor = fields_old.get('lax_friedrichs_tracer_scaling_factor')
vertvelo = w[2]
if w_mesh is not None:
vertvelo = w[2] - w_mesh
f = 0
f += -solution*vertvelo*Dx(self.test, 2)*self.dx
if self.vertical_dg:
w_av = avg(vertvelo)
s = 0.5*(sign(w_av*self.normal[2]('-')) + 1.0)
c_up = solution('-')*s + solution('+')*(1-s)
f += c_up*w_av*jump(self.test, self.normal[2])*self.dS_h
if self.use_lax_friedrichs:
# Lax-Friedrichs
gamma = 0.5*abs(w_av*self.normal('-')[2])*lax_friedrichs_factor
f += gamma*dot(jump(self.test), jump(solution))*self.dS_h
# NOTE Bottom impermeability condition is naturally satisfied by the definition of w
# NOTE imex solver fails with this in tracerBox example
f += solution*vertvelo*self.normal[2]*self.test*self.ds_surf
return -f
class HorizontalDiffusionTerm(TracerTerm):
r"""
Horizontal diffusion term :math:`-\nabla_h \cdot (\mu_h \nabla_h T)`
Using the symmetric interior penalty method the weak form becomes
.. math::
\int_\Omega \nabla_h \cdot (\mu_h \nabla_h T) \phi dx
=& -\int_\Omega \mu_h (\nabla_h \phi) \cdot (\nabla_h T) dx \\
&+ \int_{\mathcal{I}_h\cup\mathcal{I}_v} \text{jump}(\phi \textbf{n}_h)
\cdot \text{avg}(\mu_h \nabla_h T) dS
+ \int_{\mathcal{I}_h\cup\mathcal{I}_v} \text{jump}(T \textbf{n}_h)
\cdot \text{avg}(\mu_h \nabla \phi) dS \\
&- \int_{\mathcal{I}_h\cup\mathcal{I}_v} \sigma \text{avg}(\mu_h) \text{jump}(T \textbf{n}_h) \cdot
\text{jump}(\phi \textbf{n}_h) dS
where :math:`\sigma` is a penalty parameter, see Hillewaert (2013).
<NAME> (2013). Development of the discontinuous Galerkin method
for high-resolution, large scale CFD and acoustics in industrial
geometries. PhD Thesis. Université catholique de Louvain.
https://dial.uclouvain.be/pr/boreal/object/boreal:128254/
"""
def residual(self, solution, solution_old, fields, fields_old, bnd_conditions=None):
if fields_old.get('diffusivity_h') is None:
return 0
diffusivity_h = fields_old['diffusivity_h']
sipg_factor = self.sipg_factor
diff_tensor = as_matrix([[diffusivity_h, 0, 0],
[0, diffusivity_h, 0],
[0, 0, 0]])
grad_test = grad(self.test)
diff_flux = dot(diff_tensor, grad(solution))
f = 0
f += inner(grad_test, diff_flux)*self.dx
if self.horizontal_dg:
h_cell = self.mesh.ufl_cell().sub_cells()[0]
p, q = self.function_space.ufl_element().degree()
cp = (p + 1) * (p + 2) / 2 if h_cell == triangle else (p + 1)**2
# by default the factor is multiplied by 2 to ensure convergence
sigma = cp * FacetArea(self.mesh) / CellVolume(self.mesh)
sp = sigma('+')
sm = sigma('-')
sigma_max = sipg_factor * conditional(sp > sm, sp, sm)
ds_interior = (self.dS_h + self.dS_v)
f += sigma_max * inner(
jump(self.test, self.normal),
dot(avg(diff_tensor), jump(solution, self.normal))
)*ds_interior
f += -inner(avg(dot(diff_tensor, grad(self.test))),
jump(solution, self.normal))*ds_interior
f += -inner(jump(self.test, self.normal),
avg(dot(diff_tensor, grad(solution))))*ds_interior
# symmetric bottom boundary condition
# NOTE introduces a flux through the bed - breaks mass conservation
f += - inner(diff_flux, self.normal)*self.test*self.ds_bottom
f += - inner(diff_flux, self.normal)*self.test*self.ds_surf
return -f
class VerticalDiffusionTerm(TracerTerm):
r"""
Vertical diffusion term :math:`-\frac{\partial}{\partial z} \Big(\mu \frac{T}{\partial z}\Big)`
Using the symmetric interior penalty method the weak form becomes
.. math::
\int_\Omega \frac{\partial}{\partial z} \Big(\mu \frac{T}{\partial z}\Big) \phi dx
=& -\int_\Omega \mu \frac{\partial T}{\partial z} \frac{\partial \phi}{\partial z} dz \\
&+ \int_{\mathcal{I}_{h}} \text{jump}(\phi n_z) \text{avg}\Big(\mu \frac{\partial T}{\partial z}\Big) dS
+ \int_{\mathcal{I}_{h}} \text{jump}(T n_z) \text{avg}\Big(\mu \frac{\partial \phi}{\partial z}\Big) dS \\
&- \int_{\mathcal{I}_{h}} \sigma \text{avg}(\mu) \text{jump}(T n_z) \cdot
\text{jump}(\phi n_z) dS
where :math:`\sigma` is a penalty parameter, see Hillewaert (2013).
Hillewaert, Koen (2013). Development of the discontinuous Galerkin method
for high-resolution, large scale CFD and acoustics in industrial
geometries. PhD Thesis. Université catholique de Louvain.
https://dial.uclouvain.be/pr/boreal/object/boreal:128254/
"""
def residual(self, solution, solution_old, fields, fields_old, bnd_conditions=None):
if fields_old.get('diffusivity_v') is None:
return 0
diffusivity_v = fields_old['diffusivity_v']
sipg_factor = self.sipg_factor_vertical
grad_test = Dx(self.test, 2)
diff_flux = dot(diffusivity_v, Dx(solution, 2))
f = 0
f += inner(grad_test, diff_flux)*self.dx
if self.vertical_dg:
p, q = self.function_space.ufl_element().degree()
cp = (q + 1)**2
l_normal = CellVolume(self.mesh) / FacetArea(self.mesh)
# by default the factor is multiplied by 2 to ensure convergence
sigma = sipg_factor * cp / l_normal
sp = sigma('+')
sm = sigma('-')
sigma_max = conditional(sp > sm, sp, sm)
ds_interior = (self.dS_h)
f += sigma_max*inner(
jump(self.test, self.normal[2]),
dot(avg(diffusivity_v), jump(solution, self.normal[2]))
)*ds_interior
f += -inner(avg(dot(diffusivity_v, Dx(self.test, 2))),
jump(solution, self.normal[2]))*ds_interior
f += -inner(jump(self.test, self.normal[2]),
avg(dot(diffusivity_v, Dx(solution, 2))))*ds_interior
return -f
class SourceTerm(TracerTerm):
r"""
Generic source term
The weak form reads
.. math::
F_s = \int_\Omega \sigma \phi dx
where :math:`\sigma` is a user defined scalar :class:`Function`.
"""
def residual(self, solution, solution_old, fields, fields_old, bnd_conditions=None):
f = 0
source = fields_old.get('source')
if source is not None:
f += inner(source, self.test)*self.dx
return f
class TracerEquation(Equation):
"""
3D tracer advection-diffusion equation :eq:`tracer_eq` in conservative form
"""
def __init__(self, function_space,
bathymetry=None, v_elem_size=None, h_elem_size=None,
use_symmetric_surf_bnd=True, use_lax_friedrichs=True,
sipg_factor=Constant(10.0),
sipg_factor_vertical=Constant(10.0)):
"""
:arg function_space: :class:`FunctionSpace` where the solution belongs
:kwarg bathymetry: bathymetry of the domain
:type bathymetry: 3D :class:`Function` or :class:`Constant`
:kwarg v_elem_size: scalar :class:`Function` that defines the vertical
element size
:kwarg h_elem_size: scalar :class:`Function` that defines the horizontal
element size
:kwarg bool use_symmetric_surf_bnd: If True, use symmetric surface boundary
condition in the horizontal advection term
:kwarg sipg_factor: :class: `Constant` or :class: `Function` horizontal SIPG penalty scaling factor
:kwarg sipg_factor_vertical: :class: `Constant` or :class: `Function` vertical SIPG penalty scaling factor
"""
super(TracerEquation, self).__init__(function_space)
args = (function_space, bathymetry,
v_elem_size, h_elem_size, use_symmetric_surf_bnd, use_lax_friedrichs,
sipg_factor, sipg_factor_vertical)
self.add_term(HorizontalAdvectionTerm(*args), 'explicit')
self.add_term(VerticalAdvectionTerm(*args), 'explicit')
self.add_term(HorizontalDiffusionTerm(*args), 'explicit')
self.add_term(VerticalDiffusionTerm(*args), 'explicit')
self.add_term(SourceTerm(*args), 'source')
``` |
{
"source": "jhillairet/fusion_plots",
"score": 3
} |
#### File: fusion_plots/plasma_zoo/plasma_zoo.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__copyright__ = 'University of Stuttgart'
__license__ = 'MIT'
# import standard modules
import matplotlib.pyplot as plt
import numpy as np
import scipy.constants as consts
from matplotlib.colors import LogNorm
from matplotlib import ticker
# credit string to include at top of plot, to ensure people know they can use the plot
# (someone once told me, every plot appearing somewhere in the internet
# should contain information on how to use it, otherwise it is useless)
# note that the license refers only to that specific plot
# the license for the code is mentioned in the LICENSE file (and above)
credit_str = f'{__author__}, CC BY-SA 4.0'
def calc_debye( n=1e20, T=1, unit='eV' ):
#;{{{
"""
Calculate the Debye length.
Parameters
----------
n: float
plasma density in m^-3
T: float
plasma temperature in K (or eV, see parameter 'unit')
unit: str
if set to 'eV', plasma temperature is assumed to be in eV
Returns
-------
float
Debye length in meters.
"""
if unit == 'eV':
T *= consts.e/consts.k
return np.sqrt( consts.epsilon_0 * consts.k * T / (consts.e**2 * n) )
#;}}}
def calc_ND( n=1e20, T=1, unit='eV' ):
#;{{{
"""
Calculate the plasma parameter (number of particles in Debye sphere).
Parameters
----------
n: float
plasma density in m^-3
T: float
plasma temperature in K (or eV, see parameter 'unit')
unit: str
if set to 'eV', plasma temperature is assumed to be in eV
Returns
-------
float
Number of particles in Debye sphere.
"""
lambda_D = calc_debye(n,T,unit=unit)
return n * 4./3. * np.pi * lambda_D**3
#;}}}
def calc_Trel():
#;{{{
"""
Calculate the temperature when a plasma becomes relativistic.
Parameters
----------
Returns
-------
float
Temperature in eV above which the plasma becomes relativitic.
"""
return consts.m_e*consts.c**2 / consts.e
#;}}}
def calc_Tdeg( plasma_density ):
#;{{{
"""
Calculate the plasma temperature, when the plasma becomes degenerated.
Parameters
----------
plasma_density: float
plasma density in m^-3
Returns
-------
float
temperature in eV
"""
return consts.hbar**2/(2.*consts.m_e) * (3.*np.pi**2*plasma_density)**(2./3.) / consts.e
#;}}}
def calc_Tnonideal( plasma_density ):
#;{{{
"""
Calculate the plasma temperature, when the plasma becomes non-ideal
Parameters
----------
plasma_density: float
plasma density in m^-3
Returns
-------
float
temperature in eV
"""
# non-ideal plasmas with strong coupling parameter
return consts.e**2/(4.*np.pi*consts.epsilon_0) * plasma_density**(1./3.) / consts.e
#;}}}
def build_plasma_zoo():
#;{{{
"""
Return a dictionary containing the plasma zoo.
The keys of the dictionary are strings labelling the plasma type.
For each key, a numpy array with two elements is returned,
where the first element corresponds to the plasma density,
the second to the plasma temperature.
Parameters
----------
Returns
-------
dictionary
"""
plasma_zoo = {
'interstellar\nmedium': np.array([1e7, .8e0]), #rather:ne=1e6
'solar\ncore': np.array([1e30, 1e3]), #ok
'ionosphere': np.array([1e11, 1e-1]), #ok
'flames': np.array([1e16, 1e-1]), #ok
r'e$^{-}$'+'gas in\nmetals':np.array([1e29, 5e-2]), #ok
'solar\nwind': np.array([1e7, 1e1]), #ok
# 'interplanetary': np.array([1e11,1e1]), #
'gas\ndischarge': np.array([5e16, 1e0]), #ok
'lightning': np.array([1e20, 1e0]), #ok
'white\ndwarf': np.array([1e33, 2e0]), #ok
'solar\ncorona': np.array([1e15, 1e2]), #ok
'magnetic\nfusion': np.array([1e20, 1e4]), #ok
'inertial\nfusion': np.array([1e30, 1e4]), #300-1000g/cm^3 in burn phase = 1e32
'magnetosphere\nof pulsars':np.array([1e10, 1e6]), #ok
}
return plasma_zoo
#;}}}
def str_fmt(x):
power = int(round(np.log10(x)))
return r'$N_D=10^{{{0}}}$'.format(power)
def write_plasma_zoo_into_plot( ax, plasma_zoo,
plot__lambda_D=False,
silent=True
):
#;{{{
"""
Write plasma zoo into plot.
Parameters
----------
ax: Axes object
Axes object into which the plasma zoo will be written
plasma_zoo: dict
dict object which contains the plasma zoo, expected to consist
of a key (well, obviously, otherwise no dict...) and a two-element
numpy array: [plasma density in m^-3, plasma temperature in eV]
plot__lambda_D: bool
set if also the Debye-length is included in the Axes object,
a few formatting things will be different then
silent: bool
if False, some useful (?) output will be printed to console
Returns
-------
"""
if not silent:
print( 'write_plasma_zoo_into_plot' )
if plot__lambda_D:
plasma_zoo_col = 'dimgrey'
else:
plasma_zoo_col = 'black'
for key in plasma_zoo.keys():
if not silent:
print( ' {0}: {1:8.2e} m^-3, {2:8.2e} eV'.format(
key.replace('\n', ' '), plasma_zoo[key][0], plasma_zoo[key][1]) )
ax.text( plasma_zoo[key][0], plasma_zoo[key][1],
key,
color=plasma_zoo_col,
horizontalalignment='center', verticalalignment='center'
)
#;}}}
def make_lambda_D_contours( fig, ax,
T_vals=[], n_vals=[],
silent=True,
):
#;{{{
"""
Plot filled contours of Debye length into plot.
Parameters
----------
fig: Figure object
Figure object belonging to 'ax' (see parameter below)
ax: Axes object
Axes object into which the plasma zoo will be written
T_vals: numpy array of floats
plasma temperature in eV, corresponding to y-axis
n_vals: numpy array of floats
plasma density in m^-3, corresponding to x-axis
silent: bool
if False, some useful (?) output will be printed to console
Returns
-------
"""
fct_name = 'make_lambda_D_contours'
# check if temperature and density arrays were provided
# if not, create them
if len(T_vals) == 0:
# plasma temperature in eV
T_vals = np.logspace( np.log10(1e-2), np.log10(1e7), num=1000 )
if len(n_vals) == 0:
# plasma density in m^-3
n_vals = np.logspace( np.log10(1e5), np.log10(1e35), num=2000 )
# spatial coordinates (2D) for contour plot
nn, TT = np.meshgrid( n_vals, T_vals )
# caclulate the Debye length
lambda_D = np.empty( (T_vals.shape[0], n_vals.shape[0] ) )
for ii in np.arange(n_vals.shape[0]):
for jj in np.arange(T_vals.shape[0]):
# print( 'ii={0:d}, jj={1:d}, n={2:13.6e}, T={3:13.6e}, lambda_D={4:13.6e}'.
# format( ii, jj, n_vals[ii], T_vals[jj], calc_debye(n=n_vals[ii],T=T_vals[jj]) )
# )
lambda_D[jj,ii] = calc_debye( n=n_vals[ii], T=T_vals[jj] )
# identify non-ideal plasma
# relativistic plasmas
T_rel = calc_Trel()
# degenerated plasmas
TT_deg = calc_Tdeg( nn )
# non-ideal plasmas with strong coupling parameter
T_nonideal = calc_Tnonideal( nn )
# get indices of non-ideal plasmas in spatial coordinates
TT_rel_ids = (TT >= T_rel)
TT_deg_ids = (TT <= TT_deg)
TT_nonideal_ids = (TT <= T_nonideal)
# set lambda_D at non-ideal plasma to NaN in order to not plot it
lambda_D[TT_rel_ids] = np.nan
lambda_D[TT_deg_ids] = np.nan
lambda_D[TT_nonideal_ids] = np.nan
# contour levels are logarithmic due to large range
lD_contLevels = np.logspace( np.log10(1e-12),
np.log10(1e4),
9 )
if not silent:
print( '{0}: lambda_D contour levels:'.format(fct_name) )
print( lD_contLevels )
cont_lD = ax.contourf( nn, TT, lambda_D,
levels=lD_contLevels,
norm=LogNorm()
)
locator = ticker.LogLocator(base=10)
# add colorbar
cbar = fig.colorbar( cont_lD, fraction=0.046, pad=0.04, ticks=locator )
cbar.ax.tick_params( direction='in' )
cbar.set_label( 'Debye length in m' )
#;}}}
def make_N_D_contours( fig, ax,
T_vals=[], n_vals=[],
silent=True,
):
#;{{{
"""
Plot contour levels of plasma parameter into plot.
Parameters
----------
fig: Figure object
Figure object belonging to 'ax' (see parameter below)
ax: Axes object
Axes object into which the plasma zoo will be written
T_vals: numpy array of floats
plasma temperature in eV, corresponding to y-axis
n_vals: numpy array of floats
plasma density in m^-3, corresponding to x-axis
silent: bool
if False, some useful (?) output will be printed to console
Returns
-------
"""
fct_name = 'make_N_D_contours'
# check if temperature and density arrays were provided
# if not, create them
if len(T_vals) == 0:
# plasma temperature in eV
T_vals = np.logspace( np.log10(1e-2), np.log10(1e7), num=1000 )
if len(n_vals) == 0:
# plasma density in m^-3
n_vals = np.logspace( np.log10(1e5), np.log10(1e35), num=2000 )
# spatial coordinates (2D) for contour plot
nn, TT = np.meshgrid( n_vals, T_vals )
# calculate plasma parameter
N_D = np.empty( (T_vals.shape[0], n_vals.shape[0] ) )
for ii in np.arange(n_vals.shape[0]):
for jj in np.arange(T_vals.shape[0]):
N_D[jj,ii] = calc_ND( n=n_vals[ii], T=T_vals[jj] )
# identify non-ideal plasma
# relativistic plasmas
T_rel = calc_Trel()
# degenerated plasmas
TT_deg = calc_Tdeg( nn )
# non-ideal plasmas with strong coupling parameter
T_nonideal = calc_Tnonideal( nn )
# get indices of non-ideal plasmas in spatial coordinates
TT_rel_ids = (TT >= T_rel)
TT_deg_ids = (TT <= TT_deg)
TT_nonideal_ids = (TT <= T_nonideal)
# set N_D at non-ideal plasma to NaN in order to not plot it
N_D[TT_rel_ids] = np.nan
N_D[TT_deg_ids] = np.nan
N_D[TT_nonideal_ids] = np.nan
# contour levels are logarithmic due to large range covered
ND_contLevels = np.logspace( np.log10(1e0),
np.log10(1e15),
6 )
if not silent:
print( '{0}: N_D contour levels:'.format(fct_name) )
print( ND_contLevels )
# manually set position for labels of contour levels
ND_contLabelsPos = [ (1e26,6e0),
(1e31,3e4),
(1e25,3e4),
(1e19,3e4),
(1e13,3e4),
(1e7, 3e4)
]
cont_ND = ax.contour( nn, TT, N_D,
levels=ND_contLevels,
colors='darkgrey', linestyles='dashed',
)
# NOTE: EVIL HACK to manually write contour label
# reason was that clabels was not working properly
# probably due to setting some areas to NaN
for ii in np.arange(len(ND_contLabelsPos)):
ax.text( ND_contLabelsPos[ii][0], ND_contLabelsPos[ii][1],
str_fmt(ND_contLevels[ii]),
rotation=40,
fontsize=10, color='darkgrey'
)
if not silent:
print( '{0}: {1}, contour-level = {2}, formatted string-label = {3}'.format(
fct_name, ii, ND_contLevels[ii], str_fmt(ND_contLevels[ii])) )
#;}}}
def write_plasma_limits_into_plot( ax,
plot__lambda_D=False, xkcd_style=True,
T_vals=[], n_vals=[],
silent=True
):
#;{{{
"""
Mark (and label) limit of ideal plasmas in plot.
Parameters
----------
ax: Axes object
Axes object into which the plasma zoo will be written
plot__lambda_D: bool
set if also the Debye-length is included in the Axes object,
a few formatting things will be different then
xkcd_style: bool
set True, if xkcd plot style is used,
a few formatting things will be different then
T_vals: numpy array of floats
plasma temperature in eV, corresponding to y-axis
n_vals: numpy array of floats
plasma density in m^-3, corresponding to x-axis
silent: bool
if False, some useful (?) output will be printed to console
Returns
-------
"""
fct_name = 'write_plasma_limits_into_plot'
# check if temperature and density arrays were provided
# if not, create them
if len(T_vals) == 0:
# plasma temperature in eV
T_vals = np.logspace( np.log10(1e-2), np.log10(1e7), num=1000 )
if len(n_vals) == 0:
# plasma density in m^-3
n_vals = np.logspace( np.log10(1e5), np.log10(1e35), num=2000 )
# spatial coordinates (2D) for contour plot
nn, TT = np.meshgrid( n_vals, T_vals )
# label boundary for relativistic plasmas
ax.hlines( y=calc_Trel(), xmin=np.nanmin(nn), xmax=np.nanmax(nn),
linestyles='solid', linewidth=3, colors='grey' )
ax.text( 1e20, 9e5, 'relativistic plasmas', color='grey' )
# label boundary for degenerated plasmas
ax.plot( n_vals, calc_Tdeg(n_vals),
linestyle='solid', linewidth=3, color='grey' )
label_deg_n = 5e30
label_deg_T = 8e0
# failed attemp to make rotation fit to T_deg-function
label_deg_n_id = np.where( np.abs(n_vals-label_deg_n) == np.abs(n_vals-label_deg_n).min() )
label_deg_n_id = label_deg_n_id[0][0]
T_deg_vals = calc_Tdeg(n_vals)
## angle in data coordinates
label_deg_angle_data = np.rad2deg( np.arctan2( T_deg_vals[label_deg_n_id] - T_deg_vals[(label_deg_n_id-1)],
n_vals[label_deg_n_id] - n_vals[(label_deg_n_id-1)]) )
## angle in screen coordinates
label_deg_angle_screen = ax.transData.transform_angles( np.array((label_deg_angle_data,)),
np.array([n_vals[(label_deg_n_id-1)],
T_deg_vals[(label_deg_n_id-1)]]).reshape((1,2)))[0]
# ax.annotate( 'Text',
# xy=(n_vals[label_deg_n_id-1], T_deg_vals[label_deg_n_id-1]),
# rotation_mode='anchor', rotation=label_deg_angle_screen
# )
if not silent:
print( fct_name )
print( label_deg_n, label_deg_n_id, n_vals[label_deg_n_id], T_deg_vals[label_deg_n_id] )
print( n_vals[label_deg_n_id-1], T_deg_vals[label_deg_n_id-1] )
print( label_deg_angle_data, label_deg_angle_screen )
if plot__lambda_D:
label_deg_angle = 62.
label_nonideal_angle = 42.
else:
label_deg_angle = 59.
label_nonideal_angle = 39.
if xkcd_style:
label_deg_n = 3e29
label_nonideal_T = 1.2e-2
else:
label_nonideal_T = 5e-1
label_nonideal_n = 3e21
ax.text( label_deg_n, label_deg_T,
'degenerated plasmas',
rotation=label_deg_angle,
color='grey' )
# label boundary to non-ideal plasmas with strong coupling
ax.plot( n_vals, calc_Tnonideal( n_vals ),
linestyle='solid', linewidth=3, color='grey' )
ax.text( label_nonideal_n, label_nonideal_T,
'non-ideal plasmas',
rotation=label_nonideal_angle,
color='grey' )
#;}}}
def main():
#;{{{
print( '\n' )
print( 'Let me know if you have questions, requests or found some bugs.')
print( ' -- <NAME>, April 2020\n' )
plot__lambda_D = True
plot__N_D = True
plot__limits = True
label_plasmas = True
# plasma temperature in eV, plasma density in m^-3
T_vals = np.logspace( np.log10(1e-2), np.log10(1e7), num=1000 )
n_vals = np.logspace( np.log10(1e5), np.log10(1e35), num=2000 )
#fname_plot = ''
fname_plot = 'plasma_zoo.png'
xkcd_style = True
# plot configuration
# optionally acitvate xkcd-style plot
if xkcd_style:
plt.xkcd()
fig1 = plt.figure( figsize=(8,6) )
ax1 = fig1.add_subplot( 1,1,1 )
if plot__lambda_D:
make_lambda_D_contours( fig1, ax1,
T_vals=T_vals, n_vals=n_vals,
silent=True,
)
if plot__N_D:
make_N_D_contours( fig1, ax1,
T_vals=T_vals, n_vals=n_vals,
silent=True,
)
if label_plasmas:
# get the plasma zoo
plasma_zoo = build_plasma_zoo()
# for xkcd-style, a small correction is necessary
# otherwise, the following label would overlap with another
# (due to different font size)
if xkcd_style:
plasma_zoo['lightning'][0] = 5e21
write_plasma_zoo_into_plot( ax1, plasma_zoo, plot__lambda_D )
if plot__limits:
write_plasma_limits_into_plot( ax1,
plot__lambda_D=plot__lambda_D, xkcd_style=xkcd_style,
T_vals=T_vals, n_vals=n_vals,
)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlim( np.nanmin(n_vals), np.nanmax(n_vals) )
ax1.set_ylim( np.nanmin(T_vals), np.nanmax(T_vals) )
ax1.set_xticks([1e5,1e10,1e15,1e20,1e25,1e30,1e35])
ax1.set_yticks([1e-2,1e0,1e2,1e4,1e6])
ax1.set_xlabel( r'plasma density in m$^{-3}$' )
ax1.set_ylabel( r'temperature in eV' )
# force ticks to point inwards
ax1.tick_params( axis='both', which='both', direction='in',
top=True, right=True
)
ax1.minorticks_off()
# write credits into plot, to ensure that people know they can use the plot
# (somebody once told me, every plot appearing somewhere in the internet
# should contain information on how to use it, otherwise it is useless)
# you probably want to remove it when you make you own plot
# attribution would still be gratefully acknowledged :)
# also note that the licence refers only to that specific plot
# the licence for the code is mentioned above and in the LICENCE file
credit_str = u'<NAME>, CC BY-SA 4.0'
fig1.text( .65, .89, credit_str, fontsize=7 )
if len( fname_plot ):
plt.savefig( fname_plot, bbox_inches='tight', dpi=600 )
print( ' plot written into {0}'.format( fname_plot ) )
else:
plt.show()
#;}}}
if __name__ == '__main__':
main()
``` |
{
"source": "jhillairet/Multipactor",
"score": 2
} |
#### File: Multipactor/Work_Spark3D/spark3d.py
```python
import os
import numpy as np
import subprocess # used instead of os.system()
import shlex # used to convert Spark3d command line into a list
import logging
class Spark3d(object):
"""
SPARK3D simulation object
"""
DIR_PATH = '/Applications/SPARK3D/1.6.3/dist'
BIN_PATH = '/Applications/SPARK3D/1.6.3/dist/spark3d'
def __init__(self, project_path, EMfields_file, file_type='hfss',
output_path='results/', tmp_path='.', config_file='config.min'):
"""
Constructor.
Arguments
---------
project_path : absolute path (important!) of the project.
data_file : relative path of the data file
[file_type : {'hfss' (default, .dsp), 'cst', 'fest'(.mfe), 'csv'}]
[output_path: relative path of the output dir (default: "results/")]
[tmp_path: temporary file absolute path (default: '/tmp/SPKTMP')]
[config_file: name of the config file (default: 'config.min')]
"""
self.project_path = project_path
self.data_file = EMfields_file
self.file_type = file_type
self.tmp_path = tmp_path
self.config_file = config_file
self.output_path = output_path
self.log = logging.basicConfig(filename='spark3drun.log', filemode='w')
def run(self, show_stdout=False):
"""
Run the SPARK3D modeling.
"""
cmd = self.__get_run_command__()
print(cmd)
try:
print('Running Spark3D...')
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE)
# print the output of Spark3D in the standard output
# untill the end of the simulation
if show_stdout:
while p.poll() is None:
print(p.stdout.readline())
else:
p.wait()
retcode = p.returncode
if retcode < 0:
print('Child was terminated by signal', retcode)
else:
print('Child returned', retcode)
except OSError as e:
print('Non existent file ? (OSError):', e)
except ValueError as e:
print('Invalid Argument', e)
except FileNotFoundError as e:
print('file not found', e)
except Exception as e:
print('Something else occured:', e)
def __get_run_command__(self):
cmd = self.BIN_PATH + \
' --mode=multipactor' + \
' --project_path='+self.project_path + \
' --tmp_path='+self.tmp_path + \
' --config_file='+self.config_file + \
' --output_path='+self.output_path + \
' --data_file='+self.data_file + \
' --file_type='+self.file_type
#cmd = '/Applications/SPARK3D/1.6.3/dist/spark3d --config_file=config.min --output_path=results/ --HFSS_units="m" --mode=multipactor --project_path=/home/JH218595/Multipactor/Spark3D/Simple_Waveguide_WR284_3.7GHz --tmp_path=. --data_file=data_HFSS/SimpleWaveguideFields_72.14x22mm_50mm_V2.dsp --file_type=hfss'
return(cmd)
def get_results(self):
"""
Returns the SPARK3D run results
Arguments
----------
none
Returns
----------
freq: array of frequency
power: array of breakdown power
"""
result_file = self.project_path+'/'+self.output_path+'general_results.txt'
try:
freq, power = np.loadtxt(result_file,
skiprows=1,
delimiter='\t',
usecols=(3,4), # use only columns 3 and 4
unpack=True)
return freq, power
except Exception as e:
print(e)
return None, None
def read_config(self):
"""
Returns the content of the configuration file
into a python dictionary
"""
config = {}
with open(self.project_path+'/'+self.config_file) as config_lines:
for line in config_lines:
# creates a new section if line start with begin
if 'begin' in line:
dummy, section_name = line.split()
config[section_name] = dict()
elif 'end' in line:
pass
else:
dummy = line.split()
if len(dummy) ==2: # parameter value couple
parameter, value = line.split()
config[parameter] = value
return config
def write_config(self):
"""
TODO
"""
pass
def get_config_parameter(self, parameter):
"""
Return the value of a parameter defined in the configuration file.
"""
config = self.read_config()
return config[parameter]
def set_config_parameter(self, parameter, new_value):
"""
Replace a parameter value in the configuration file
"""
current_config = self.read_config()
config_file = self.project_path+'/'+self.config_file
with open(config_file, 'r') as config:
config_contents = config.read()
# replace the previous parameter value with new one
new_config_contents = config_contents.replace(current_config[parameter], new_value)
if new_config_contents is not None:
with open(config_file, 'w') as config:
print(new_config_contents)
config.write(new_config_contents)
if __name__ == "__main__":
project_path = '/home/JH218595/Multipactor/Work_Spark3D/Projects/Simple_Waveguide_WR284_3.7GHz'
data_file = '../../EM_Fields/SimpleWaveguideFields_72.14x22mm/SimpleWaveguideFields_72.14x22mm_100mm_V2.dsp'
spk = Spark3d(project_path, data_file)
spk.run()
freq, power = spk.get_results()
print(freq, power)
# with open('RESULTS.txt','ab') as f_handle:
# np.savetxt(f_handle, np.array([power]))
``` |
{
"source": "jhillairet/posCouche",
"score": 3
} |
#### File: posCouche/posCouche/formeTS.py
```python
import pywed as pw
import numpy as np
import os
def vacuum_vessel(shot):
"""
Get the coordinates of the Tore Supra / WEST vacuum vessel
R_wall, Z_wall = vacuum_vessel(shot)
Arguments:
- shot: Tore Supra or WEST shot number
Returns:
- R_wall: radius of the vacuum chamber walls [m]
- Z_wall: height of the vacuum chamber walls [m]
TODO: once WEST will have started, get the final vacuum vessel coordinates
"""
if (shot <= 0) or (not isinstance(shot, int)):
raise ValueError('Shot number should be a positive integer')
elif shot < 50000: # Tore Supra vacuum chamber profile
wall = pw.tsmat(shot, 'APOLO;+Parametres;Paroi')
R_wall = wall[:,0]
Z_wall = wall[:,1]
else: # WEST vacuum chamber profile
# get the absolute path of the filename, in order to work even if launched from other dir
filename = os.path.dirname(__file__) + os.sep + 'WEST_vacuum_vessel.txt'
R_wall, Z_wall = np.loadtxt(filename, skiprows=1, unpack=True)
return R_wall, Z_wall
def LCFS(shot):
"""
Get the coordinates of the LCFS as a function of time.
R_ext, Z_ext, t = LCFS(shot)
Arguments:
shot: Tore Supra or WEST shot number
Returns:
R_ext: radius of LCFS [m]
Z_ext: height of LCFS [m]
t: time [s]
"""
if (shot <= 0) or (not isinstance(shot, int)):
raise ValueError('Shot number should be a positive integer')
if shot < 28540:
raise ValueError('Shot number should be larger than 28540')
# small radius vs time
y, t = pw.tsbase(shot, 'GRHO', nargout=2)
t = t[:,0]
# poloidal profile (assumed circular)
theta = np.arange(0, 24*15, 15) * np.pi/180
R0 = 2.42
R_ext = R0 + y*np.cos(theta)
Z_ext = y*np.sin(theta)
# trick to have a full profile
R_ext = np.column_stack((R_ext, R_ext[:,0]))
Z_ext = np.column_stack((Z_ext, Z_ext[:,0]))
return R_ext, Z_ext, t
# Below a test code which is run only if this file is executed directly
if __name__ == '__main__':
import matplotlib.pyplot as plt
R_wall, Z_wall = vacuum_vessel(50001)
R_ext, Z_ext, t = LCFS(47979)
fig, ax = plt.subplots(1,1)
ax.plot(R_wall, Z_wall, 'k', lw=2)
ax.axis('equal')
# plasma profile at the middle of the shot
R_e = R_ext[int(len(R_ext)/2)]
Z_e = Z_ext[int(len(R_ext)/2)]
ax.plot(R_e, Z_e, 'b')
``` |
{
"source": "JHillard/SmartScope",
"score": 2
} |
#### File: JHillard/SmartScope/OscopePlayground.py
```python
import numpy
import numpy as np
import matplotlib.pyplot as plot
import time as TIME
import csv
import usbtmc
#CONSTANTS
TIME_LENGTH = 600
# In[ ]:
r = usbtmc.usb_instrument()
s1 = r.sample_norm("CHAN1")
print("Sample Captured")
# In[ ]:
data = numpy.frombuffer(s1, 'B')
print(data)
voltscale = float( r.ask(":CHAN1:SCAL?", length=20))
voltageOffset = float( r.ask(":CHAN1:OFFS?", length=20))
timescale = float( r.ask(":TIM:SCAL?", length = 20))
timeOffset = float( r.ask(":TIM:OFFS?", length =20))
# In[2]:
def sample(channel="CHAN1"):
dtemp = r.sample_norm(channel)
if len(dtemp) < TIME_LENGTH:
raise "Device unresponsive. Please Try again."
voltscale = float( r.ask(":CHAN1:SCAL?", length=20))
voltageOffset = float( r.ask(":CHAN1:OFFS?", length=20))
timescale = float( r.ask(":TIM:SCAL?", length = 20))
timeOffset = float( r.ask(":TIM:OFFS?", length =20))
weird_offset = 11
data = data*-1+255
data = data[weird_offset:]
data = (data - 130.0 - voltageOffset/voltscale*25) / 25 * voltscale
return data
def writeSample(filename, data, time):
with open(filename, 'wb') as csvfile:
cartographer = csv.writer(csvfile, delimiter = " ",
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for i in range(0,len(data)):
cartographer.writerow([str(data[i]), str(time[i])])
def graphSample(anex, save = False, img_name = str(TIME.strftime("%H%M%S"))): #anex=(data,time)
data = anex[0,:]
t = anex[1,:]
# # See if we should use a different time axis
if (t[599] < 1e-3):
t = t * 1e6
tUnit = "uS"
elif (time[599] < 1):
t = t * 1e3
tUnit = "mS"
else:
tUnit = "S"
# Plot the data
newFig = plot.figure()
plot.plot(t, data)
plot.title("Oscilloscope Channel 1")
plot.ylabel("Voltage (V)")
plot.xlabel("Time (" + tUnit + ")") #Relabel tUnit if re-enabling scale
plot.xlim(t[0], t[599])
if(save): plot.savefig(img_name)
plot.show()
# In[ ]:
weird_offset = 11
data = data*-1+255
data = data[weird_offset:]
data = (data - 130.0 - voltageOffset/voltscale*25) / 25 * voltscale
#
# In[3]:
timescale = 1
time = numpy.arange(-300.0/50*timescale, 300.0/50*timescale, timescale/50.0)
fake_data = numpy.arange(1000.0/50*timescale, 1600.0/50*timescale, timescale/50.0)
package = np.vstack((fake_data,time))
np.savetxt("test.csv", package, delimiter=",")
# writeSample("Test.csv", fake_data, time)
new_pk = np.loadtxt("test.csv", delimiter=",")
print(new_pk)
graphSample(package, save=True)
```
#### File: JHillard/SmartScope/usbtmc2.py
```python
import usb.core
import usb.util
import struct
import time
import os
import re
import sys
# constants
USBTMC_bInterfaceClass = 0xFE
USBTMC_bInterfaceSubClass = 3
USBTMC_bInterfaceProtocol = 0
USB488_bInterfaceProtocol = 1
USBTMC_MSGID_DEV_DEP_MSG_OUT = 1
USBTMC_MSGID_REQUEST_DEV_DEP_MSG_IN = 2
USBTMC_MSGID_DEV_DEP_MSG_IN = 2
USBTMC_MSGID_VENDOR_SPECIFIC_OUT = 126
USBTMC_MSGID_REQUEST_VENDOR_SPECIFIC_IN = 127
USBTMC_MSGID_VENDOR_SPECIFIC_IN = 127
USB488_MSGID_TRIGGER = 128
USBTMC_STATUS_SUCCESS = 0x01
USBTMC_STATUS_PENDING = 0x02
USBTMC_STATUS_FAILED = 0x80
USBTMC_STATUS_TRANSFER_NOT_IN_PROGRESS = 0x81
USBTMC_STATUS_SPLIT_NOT_IN_PROGRESS = 0x82
USBTMC_STATUS_SPLIT_IN_PROGRESS = 0x83
USB488_STATUS_INTERRUPT_IN_BUSY = 0x20
USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT = 1
USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS = 2
USBTMC_REQUEST_INITIATE_ABORT_BULK_IN = 3
USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS = 4
USBTMC_REQUEST_INITIATE_CLEAR = 5
USBTMC_REQUEST_CHECK_CLEAR_STATUS = 6
USBTMC_REQUEST_GET_CAPABILITIES = 7
USBTMC_REQUEST_INDICATOR_PULSE = 64
USB488_READ_STATUS_BYTE = 128
USB488_REN_CONTROL = 160
USB488_GOTO_LOCAL = 161
USB488_LOCAL_LOCKOUT = 162
USBTMC_HEADER_SIZE = 12
RIGOL_QUIRK_PIDS = [0x04ce, 0x0588]
def parse_visa_resource_string(resource_string):
# valid resource strings:
# USB::1234::5678::INSTR
# USB::1234::5678::SERIAL::INSTR
# USB0::0x1234::0x5678::INSTR
# USB0::0x1234::0x5678::SERIAL::INSTR
m = re.match('^(?P<prefix>(?P<type>USB)\d*)(::(?P<arg1>[^\s:]+))'
'(::(?P<arg2>[^\s:]+(\[.+\])?))(::(?P<arg3>[^\s:]+))?'
'(::(?P<suffix>INSTR))$', resource_string, re.I)
if m is not None:
return dict(
type=m.group('type').upper(),
prefix=m.group('prefix'),
arg1=m.group('arg1'),
arg2=m.group('arg2'),
arg3=m.group('arg3'),
suffix=m.group('suffix')
)
# Exceptions
class UsbtmcException(Exception):
em = {0: "No error"}
def __init__(self, err=None, note=None):
self.err = err
self.note = note
self.msg = ''
if err is None:
self.msg = note
else:
if type(err) is int:
if err in self.em:
self.msg = "%d: %s" % (err, self.em[err])
else:
self.msg = "%d: Unknown error" % err
else:
self.msg = err
if note is not None:
self.msg = "%s [%s]" % (self.msg, note)
def __str__(self):
return self.msg
def list_devices():
"List all connected USBTMC devices"
def is_usbtmc_device(dev):
for cfg in dev:
d = usb.util.find_descriptor(cfg, bInterfaceClass=USBTMC_bInterfaceClass,
bInterfaceSubClass=USBTMC_bInterfaceSubClass)
is_advantest = dev.idVendor == 0x1334
return d is not None or is_advantest
return list(usb.core.find(find_all=True, custom_match=is_usbtmc_device))
def find_device(idVendor=None, idProduct=None, iSerial=None):
"Find USBTMC instrument"
devs = list_devices()
if len(devs) == 0:
return None
for dev in devs:
if dev.idVendor != idVendor or dev.idProduct != idProduct:
continue
if iSerial is None:
return dev
else:
s = ''
# try reading serial number
try:
s = dev.serial_number
except:
pass
if iSerial == s:
return dev
return None
class Instrument(object):
"USBTMC instrument interface client"
def __init__(self, *args, **kwargs):
"Create new USBTMC instrument object"
self.idVendor = 0
self.idProduct = 0
self.iSerial = None
self.device = None
self.cfg = None
self.iface = None
self.term_char = None
self.advantest_quirk = False
self.advantest_locked = False
self.rigol_quirk = False
self.rigol_quirk_ieee_block = False
self.bcdUSBTMC = 0
self.support_pulse = False
self.support_talk_only = False
self.support_listen_only = False
self.support_term_char = False
self.bcdUSB488 = 0
self.support_USB4882 = False
self.support_remote_local = False
self.support_trigger = False
self.support_scpi = False
self.support_SR = False
self.support_RL = False
self.support_DT = False
self.max_transfer_size = 1024*1024
self.timeout = 1.0
self.bulk_in_ep = None
self.bulk_out_ep = None
self.interrupt_in_ep = None
self.last_btag = 0
self.last_rstb_btag = 0
self.connected = False
self.reattach = []
self.old_cfg = None
resource = None
# process arguments
if len(args) == 1:
if type(args[0]) == str:
resource = args[0]
else:
self.device = args[0]
if len(args) >= 2:
self.idVendor = args[0]
self.idProduct = args[1]
if len(args) >= 3:
self.iSerial = args[2]
for op in kwargs:
val = kwargs[op]
if op == 'idVendor':
self.idVendor = val
elif op == 'idProduct':
self.idProduct = val
elif op == 'iSerial':
self.iSerial = val
elif op == 'device':
self.device = val
elif op == 'dev':
self.device = val
elif op == 'term_char':
self.term_char = val
elif op == 'resource':
resource = val
if resource is not None:
res = parse_visa_resource_string(resource)
if res is None:
raise UsbtmcException("Invalid resource string", 'init')
if res['arg1'] is None and res['arg2'] is None:
raise UsbtmcException("Invalid resource string", 'init')
self.idVendor = int(res['arg1'], 0)
self.idProduct = int(res['arg2'], 0)
self.iSerial = res['arg3']
# find device
if self.device is None:
if self.idVendor is None or self.idProduct is None:
raise UsbtmcException("No device specified", 'init')
else:
self.device = find_device(self.idVendor, self.idProduct, self.iSerial)
if self.device is None:
raise UsbtmcException("Device not found", 'init')
def __del__(self):
if self.connected:
self.close()
def open(self):
if self.connected:
return
# initialize device
# find first USBTMC interface
for cfg in self.device:
for iface in cfg:
if (self.device.idVendor == 0x1334) or \
(iface.bInterfaceClass == USBTMC_bInterfaceClass and
iface.bInterfaceSubClass == USBTMC_bInterfaceSubClass):
self.cfg = cfg
self.iface = iface
break
else:
continue
break
if self.iface is None:
raise UsbtmcException("Not a USBTMC device", 'init')
self.old_cfg = self.device.get_active_configuration()
if self.old_cfg.bConfigurationValue == self.cfg.bConfigurationValue:
# already set to correct configuration
# release kernel driver on USBTMC interface
self._release_kernel_driver(self.iface.bInterfaceNumber)
else:
# wrong configuration
# release all kernel drivers
for iface in self.old_cfg:
self._release_kernel_driver(iface.bInterfaceNumber)
# set proper configuration
self.device.set_configuration(self.cfg)
# don't need to set altsetting - USBTMC devices have 1 altsetting as per the spec
# find endpoints
for ep in self.iface:
ep_dir = usb.util.endpoint_direction(ep.bEndpointAddress)
ep_type = usb.util.endpoint_type(ep.bmAttributes)
if (ep_type == usb.util.ENDPOINT_TYPE_BULK):
if (ep_dir == usb.util.ENDPOINT_IN):
self.bulk_in_ep = ep
elif (ep_dir == usb.util.ENDPOINT_OUT):
self.bulk_out_ep = ep
elif (ep_type == usb.util.ENDPOINT_TYPE_INTR):
if (ep_dir == usb.util.ENDPOINT_IN):
self.interrupt_in_ep = ep
if self.bulk_in_ep is None or self.bulk_out_ep is None:
raise UsbtmcException("Invalid endpoint configuration", 'init')
# set quirk flags if necessary
if self.device.idVendor == 0x1334:
# Advantest/ADCMT devices have a very odd USBTMC implementation
# which requires max 63 byte reads and never signals EOI on read
self.max_transfer_size = 63
self.advantest_quirk = True
if self.device.idVendor == 0x1ab1 and self.device.idProduct in RIGOL_QUIRK_PIDS:
self.rigol_quirk = True
if self.device.idProduct == 0x04ce:
self.rigol_quirk_ieee_block = True
self.connected = True
self.clear()
self.get_capabilities()
def close(self):
if not self.connected:
return
usb.util.dispose_resources(self.device)
try:
# reset configuration
if self.cfg.bConfigurationValue != self.old_cfg.bConfigurationValue:
self.device.set_configuration(self.old_cfg)
# try to reattach kernel driver
for iface in self.reattach:
try:
self.device.attach_kernel_driver(iface)
except:
pass
except:
pass
self.reattach = []
self.connected = False
def is_usb488(self):
return self.iface.bInterfaceProtocol == USB488_bInterfaceProtocol
def get_capabilities(self):
if not self.connected:
self.open()
b = self.device.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE),
USBTMC_REQUEST_GET_CAPABILITIES,
0x0000,
self.iface.index,
0x0018,
timeout=int(self.timeout*1000))
if (b[0] == USBTMC_STATUS_SUCCESS):
self.bcdUSBTMC = (b[3] << 8) + b[2]
self.support_pulse = b[4] & 4 != 0
self.support_talk_only = b[4] & 2 != 0
self.support_listen_only = b[4] & 1 != 0
self.support_term_char = b[5] & 1 != 0
if self.is_usb488():
self.bcdUSB488 = (b[13] << 8) + b[12]
self.support_USB4882 = b[4] & 4 != 0
self.support_remote_local = b[4] & 2 != 0
self.support_trigger = b[4] & 1 != 0
self.support_scpi = b[4] & 8 != 0
self.support_SR = b[4] & 4 != 0
self.support_RL = b[4] & 2 != 0
self.support_DT = b[4] & 1 != 0
else:
raise UsbtmcException("Get capabilities failed", 'get_capabilities')
def pulse(self):
"""
Send a pulse indicator request, this should blink a light
for 500-1000ms and then turn off again. (Only if supported)
"""
if not self.connected:
self.open()
if self.support_pulse:
b = self.device.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE),
USBTMC_REQUEST_INDICATOR_PULSE,
0x0000,
self.iface.index,
0x0001,
timeout=int(self.timeout*1000))
if (b[0] != USBTMC_STATUS_SUCCESS):
raise UsbtmcException("Pulse failed", 'pulse')
# message header management
def pack_bulk_out_header(self, msgid):
self.last_btag = btag = (self.last_btag % 255) + 1
return struct.pack('BBBx', msgid, btag, ~btag & 0xFF)
def pack_dev_dep_msg_out_header(self, transfer_size, eom = True):
hdr = self.pack_bulk_out_header(USBTMC_MSGID_DEV_DEP_MSG_OUT)
return hdr+struct.pack("<LBxxx", transfer_size, eom)
def pack_dev_dep_msg_in_header(self, transfer_size, term_char = None):
hdr = self.pack_bulk_out_header(USBTMC_MSGID_DEV_DEP_MSG_IN)
transfer_attributes = 0
if term_char is None:
term_char = 0
else:
transfer_attributes = 2
term_char = self.term_char
return hdr+struct.pack("<LBBxx", transfer_size, transfer_attributes, term_char)
def pack_vendor_specific_out_header(self, transfer_size):
hdr = self.pack_bulk_out_header(USBTMC_MSGID_VENDOR_SPECIFIC_OUT)
return hdr+struct.pack("<Lxxxx", transfer_size)
def pack_vendor_specific_in_header(self, transfer_size):
hdr = self.pack_bulk_out_header(USBTMC_MSGID_VENDOR_SPECIFIC_IN)
return hdr+struct.pack("<Lxxxx", transfer_size)
def pack_usb488_trigger(self):
hdr = self.pack_bulk_out_header(USB488_MSGID_TRIGGER)
return hdr+b'\x00'*8
def unpack_bulk_in_header(self, data):
msgid, btag, btaginverse = struct.unpack_from('BBBx', data)
return (msgid, btag, btaginverse)
def unpack_dev_dep_resp_header(self, data):
msgid, btag, btaginverse = self.unpack_bulk_in_header(data)
transfer_size, transfer_attributes = struct.unpack_from('<LBxxx', data, 4)
data = data[USBTMC_HEADER_SIZE:transfer_size+USBTMC_HEADER_SIZE]
return (msgid, btag, btaginverse, transfer_size, transfer_attributes, data)
def write_raw(self, data):
"Write binary data to instrument"
if not self.connected:
self.open()
eom = False
num = len(data)
offset = 0
while num > 0:
if num <= self.max_transfer_size:
eom = True
block = data[offset:offset+self.max_transfer_size]
size = len(block)
req = self.pack_dev_dep_msg_out_header(size, eom) + block + b'\0'*((4 - (size % 4)) % 4)
self.bulk_out_ep.write(req)
offset += size
num -= size
def read_raw(self, num=-1):
"Read binary data from instrument"
if not self.connected:
self.open()
read_len = self.max_transfer_size
if 0 < num < read_len:
read_len = num
eom = False
term_char = None
if self.term_char is not None:
term_char = self.term_char
read_data = b''
while not eom:
if not self.rigol_quirk or read_data == b'':
# if the rigol sees this again, it will restart the transfer
# so only send it the first time
req = self.pack_dev_dep_msg_in_header(read_len, term_char)
self.bulk_out_ep.write(req)
resp = self.bulk_in_ep.read(read_len+USBTMC_HEADER_SIZE+3, timeout = int(self.timeout*1000))
if sys.version_info >= (3, 0):
resp = resp.tobytes()
else:
resp = resp.tostring()
if self.rigol_quirk and read_data:
pass # do nothing, the packet has no header if it isn't the first
else:
msgid, btag, btaginverse, transfer_size, transfer_attributes, data = self.unpack_dev_dep_resp_header(resp)
if self.rigol_quirk:
# rigol devices only send the header in the first packet, and they lie about whether the transaction is complete
if read_data:
read_data += resp
else:
if self.rigol_quirk_ieee_block and data.startswith(b"#"):
# ieee block incoming, the transfer_size usbtmc header is lying about the transaction size
l = int(chr(data[1]))
n = int(data[2:l+2])
transfer_size = n + (l+2) # account for ieee header
read_data += data
if len(read_data) >= transfer_size:
read_data = read_data[:transfer_size] # as per usbtmc spec section 3.2 note 2
eom = True
else:
eom = False
else:
eom = transfer_attributes & 1
read_data += data
# Advantest devices never signal EOI and may only send one read packet
if self.advantest_quirk:
break
if num > 0:
num = num - len(data)
if num <= 0:
break
if num < read_len:
read_len = num
return read_data
def ask_raw(self, data, num=-1):
"Write then read binary data"
# Advantest/ADCMT hardware won't respond to a command unless it's in Local Lockout mode
was_locked = self.advantest_locked
try:
if self.advantest_quirk and not was_locked:
self.lock()
self.write_raw(data)
return self.read_raw(num)
finally:
if self.advantest_quirk and not was_locked:
self.unlock()
def write(self, message, encoding='utf-8'):
"Write string to instrument"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
for message_i in message:
self.write(message_i, encoding)
return
self.write_raw(str(message).encode(encoding))
def read(self, num=-1, encoding='utf-8'):
"Read string from instrument"
return self.read_raw(num).decode(encoding).rstrip('\r\n')
def ask(self, message, num=-1, encoding='utf-8'):
"Write then read string"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
val = list()
for message_i in message:
val.append(self.ask(message_i, num, encoding))
return val
# Advantest/ADCMT hardware won't respond to a command unless it's in Local Lockout mode
was_locked = self.advantest_locked
try:
if self.advantest_quirk and not was_locked:
self.lock()
self.write(message, encoding)
return self.read(num, encoding)
finally:
if self.advantest_quirk and not was_locked:
self.unlock()
def read_stb(self):
"Read status byte"
if not self.connected:
self.open()
if self.is_usb488():
rstb_btag = (self.last_rstb_btag % 128) + 1
if rstb_btag < 2:
rstb_btag = 2
self.last_rstb_btag = rstb_btag
b = self.device.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE),
USB488_READ_STATUS_BYTE,
rstb_btag,
self.iface.index,
0x0003,
timeout=int(self.timeout*1000))
if (b[0] == USBTMC_STATUS_SUCCESS):
# check btag
if rstb_btag != b[1]:
raise UsbtmcException("Read status byte btag mismatch", 'read_stb')
if self.interrupt_in_ep is None:
# no interrupt channel, value is here
return b[2]
else:
# read response from interrupt channel
resp = self.interrupt_in_ep.read(2, timeout=int(self.timeout*1000))
if resp[0] != rstb_btag + 128:
raise UsbtmcException("Read status byte btag mismatch", 'read_stb')
else:
return resp[1]
else:
raise UsbtmcException("Read status failed", 'read_stb')
else:
return int(self.ask("*STB?"))
def trigger(self):
"Send trigger command"
if not self.connected:
self.open()
if self.support_trigger:
data = self.pack_usb488_trigger()
print(repr(data))
self.bulk_out_ep.write(data)
else:
self.write("*TRG")
def clear(self):
"Send clear command"
if not self.connected:
self.open()
# Send INITIATE_CLEAR
b = self.device.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE),
USBTMC_REQUEST_INITIATE_CLEAR,
0x0000,
self.iface.index,
0x0001,
timeout=int(self.timeout*1000))
if (b[0] == USBTMC_STATUS_SUCCESS):
# Initiate clear succeeded, wait for completion
while True:
# Check status
b = self.device.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE),
USBTMC_REQUEST_CHECK_CLEAR_STATUS,
0x0000,
self.iface.index,
0x0002,
timeout=int(self.timeout*1000))
if (b[0] == USBTMC_STATUS_PENDING):
time.sleep(0.1)
else:
break
# Clear halt condition
self.bulk_out_ep.clear_halt()
else:
raise UsbtmcException("Clear failed", 'clear')
def remote(self):
"Send remote command"
raise NotImplementedError()
def local(self):
"Send local command"
raise NotImplementedError()
def lock(self):
"Send lock command"
if not self.connected:
self.open()
if self.advantest_quirk:
# This Advantest/ADCMT vendor-specific control command enables remote control and must be sent before any commands are exchanged
# (otherwise READ commands will only retrieve the latest measurement)
self.advantest_locked = True
self.device.ctrl_transfer(bmRequestType=0xA1, bRequest=0xA0, wValue=0x0001, wIndex=0x0000, data_or_wLength=1)
else:
raise NotImplementedError()
def unlock(self):
"Send unlock command"
if not self.connected:
self.open()
if self.advantest_quirk:
# This Advantest/ADCMT vendor-specific control command enables remote control and must be sent before any commands are exchanged
# (otherwise READ commands will only retrieve the latest measurement)
self.advantest_locked = False
self.device.ctrl_transfer(bmRequestType=0xA1, bRequest=0xA0, wValue=0x0000, wIndex=0x0000, data_or_wLength=1)
else:
raise NotImplementedError()
def advantest_read_myid(self):
if not self.connected:
self.open()
"Read MyID value from Advantest and ADCMT devices"
if self.advantest_quirk:
# This Advantest/ADCMT vendor-specific control command reads the "MyID" identifier
try:
return int(self.device.ctrl_transfer(bmRequestType=0xC1, bRequest=0xF5, wValue=0x0000, wIndex=0x0000, data_or_wLength=1)[0])
except:
return None
else:
raise NotImplementedError()
def _release_kernel_driver(self, interface_number):
if os.name == 'posix':
if self.device.is_kernel_driver_active(interface_number):
self.reattach.append(interface_number)
try:
self.device.detach_kernel_driver(interface_number)
except usb.core.USBError as e:
sys.exit(
"Could not detach kernel driver from interface({0}): {1}".format(interface_number,
str(e)))
``` |
{
"source": "jhinAza/python-flask-microservice-template",
"score": 2
} |
#### File: {{cookiecutter.project}}/{{cookiecutter.project_slug}}/__init__.py
```python
from flask import Flask
from {{cookiecutter.project_slug}}.{{cookiecutter.project_slug}}_root import {{cookiecutter.project_slug}}
__author__ = """{{cookiecutter.mantainer_name}}"""
__email__ = '{{cookiecutter.mantainer_email}}'
__version__ = '0.1.0'
def create_app():
app = Flask(__name__)
app.register_blueprint({{cookiecutter.project_slug}})
return app
```
#### File: python-flask-microservice-template/{{cookiecutter.project}}/main.py
```python
import {{cookiecutter.project_slug}}
app = {{cookiecutter.project_slug}}.create_app()
def main():
app.run(host="127.0.0.1", port={{cookiecutter.port}}, debug=True)
if __name__ == '__main__':
main()
``` |
{
"source": "jhinsdale/arduino-rgb-matrix",
"score": 3
} |
#### File: arduino-rgb-matrix/font/horiz_text.py
```python
from collections import deque
import ledfont
from font_util import *
# Panel array of scrolling lines
class TextPanel:
# Init with message, color pairs
def __init__(self, pairs = []):
self._lines = []
self._nlines = len(pairs)
for i in range(self._nlines):
self._lines.append(HorizText(pairs[i][0], pairs[i][1]))
# Get line
def get_line(self, i):
return self._lines[i]
# Scroll all lines left
def scroll(self):
for i in range(self._nlines):
self._lines[i].scroll()
# Horizontal text scroll object. Streams horizontally, maintaining
# a FIFO queue of pixel bit columns. Other state: the message text,
# color, replacment (next) message.
class HorizText:
# Init with message
def __init__(self, msg = None, color = None):
self._height = 8
self._message = msg
self._color = color
self._wrapped_message_space = 5
self._fontmap = ledfont.FONT_MAP
self._cursor = 0
self._bitcols = deque()
self._curline = deque()
# Init current line
for i in range(32):
self._curline.append(self.get_bitcol())
# Return current line bit columns
def get_line_bitcols(self):
return self._curline
# Get the color
def get_color(self):
return self._color
# Scroll current line left
def scroll(self):
self._curline.popleft()
self._curline.append(self.get_bitcol())
# Shift pixel column for this scrolling line and return next one
def get_bitcol(self):
if not self._message:
self._bitcols.append(0)
# If need to get more pixel columns, get and append
elif not self._bitcols:
(letter, wrapped) = self._fetch_letter()
bitcol_arr = get_letter_bitcols(letter)
for bc in bitcol_arr:
self._bitcols.append(bc)
# Also append inter-letter blank column
self._bitcols.append(0)
if wrapped:
for wrapcol in range(self._wrapped_message_space):
self._bitcols.append(0)
return self._bitcols.popleft()
# Set the message for line #i
def set_message(self, msg):
self._message = msg
self._cursor = 0
# Fetch the next letter from message #i
def _fetch_letter(self):
result = self._message[self._cursor]
self._cursor += 1
# Wrap around to beginning of message
wrapped = False
if self._cursor >= len(self._message):
self._cursor = 0
wrapped = True
return (result, wrapped)
```
#### File: arduino-rgb-matrix/py/usb_button.py
```python
import select
from evdev import InputDevice, list_devices, categorize, ecodes, KeyEvent
# USB device needs to have this, uniquely, in its name
BUTTON_DEVICE_DISTINGUISHING_NAME = "Delcom"
# The InputDevice object
BUTTON_DEVICE = None
# Currently pressed button
BUTTON_CURRENTLY_DOWN = None
# Map key code to color
CODE_TO_COLOR = {
'KEY_Z': 'red',
'KEY_C': 'green',
'KEY_COMMA': 'blue',
'KEY_SLASH': 'yellow',
}
# Button state codes 0=up 1=down 2=hold
BUTTON_STATES = ["Up", "Down", "Hold"]
# Get a button event, timing out, filtering out lots of stuff and only
# returning a strict sequence of button-down, button-up events for a
# specific button at a time. Overlapping events for multiple buttons
# are discarded.
def get_button_event(timeout):
global BUTTON_DEVICE
global BUTTON_CURRENTLY_DOWN
states = ["Up", "Down", "Hold"]
dev = BUTTON_DEVICE
if not BUTTON_DEVICE:
devices = map(InputDevice, list_devices())
for d in devices:
if BUTTON_DEVICE_DISTINGUISHING_NAME in d.name:
if dev:
raise Exception("More than one device name containing '" + BUTTON_DEVICE_DISTINGUISHING_NAME + "'")
dev = d
# Grab the device so we are the only one using it
dev.grab()
fileno = dev.fileno()
while True:
r, w, e = select.select([fileno], [], [], timeout)
if fileno not in r:
# Timed out
break
# Delcom sends EV_KEY, EV_SYN and EV_MSC
event = dev.read_one()
if event.type != ecodes.EV_KEY:
continue
kev = KeyEvent(event)
state = kev.keystate # 0=up 1=down 2=hold
# Ignore hold events
if state == 2:
continue
# Ignore a down-press when another is already down
if BUTTON_CURRENTLY_DOWN and state == 1:
continue
# Ignore an up event if nothing is known to be down
if not BUTTON_CURRENTLY_DOWN and state == 0:
continue
color = CODE_TO_COLOR[kev.keycode]
# Ignore an up event if it is not the one currently down
if state == 0 and BUTTON_CURRENTLY_DOWN and color != BUTTON_CURRENTLY_DOWN:
continue
# Record button currently down
if state == 1:
BUTTON_CURRENTLY_DOWN = color
# Record button released
if state == 0:
BUTTON_CURRENTLY_DOWN = None
return { 'button': color, 'action': states[state] }
# Timed out
return None
``` |
{
"source": "jhintr/lxgw-fonttools",
"score": 3
} |
#### File: jhintr/lxgw-fonttools/unicode.py
```python
import json
def supplement_range(unicode_range: str):
"""Returns supplemented range.
Parameters:
unicode_range (str): string looks like `U+1f1f7-1f1ff, U+1f21a`,
some items use `-` to omit middle characters
Returns:
List of supplemented characters
"""
range_list = unicode_range.split(",")
range_list = [r.strip()[2:] for r in range_list]
result = ["U+" + i for i in range_list if "-" not in i]
for u in [i for i in range_list if "-" in i]:
chars = u.split("-")
c0 = int(chars[0], base=16)
if len(chars[0]) > len(chars[1]):
diff = len(chars[0]) - len(chars[1])
chars[1] = chars[0][:diff] + chars[1]
c1 = int(chars[1], base=16)
t_list = ["U+" + hex(j)[2:] for j in range(c0, c1 + 1)]
result += t_list
return result
def get_missing_cjk():
"""Get missing characters in `U+4e00 ~ U+9fff`.
Returns:
sorted list of cjk characters.
"""
cjk_dict = {hex(i).replace("0x", "U+"): 0 for i in range(0x4E00, 0x9FFF + 1)}
with open("unicode.json") as f:
range_dict = json.load(f)
for subset, unicode_range in range_dict.items():
sr = supplement_range(unicode_range)
for i in sr:
if i in cjk_dict:
cjk_dict[i] = 1
missing = sorted([k for k, v in cjk_dict.items() if v == 0])
return missing
def make_ex_json():
"""Make extended json from `missing_cjk`
Returns:
json file for missing cjk characters
"""
missing_cjk = get_missing_cjk()
missing_dict = {}
range_num = 150
count = 0
while count * range_num < len(missing_cjk):
_cjk = missing_cjk[count * range_num : (count + 1) * range_num]
missing_dict[f"ex{count}"] = ", ".join(_cjk)
count += 1
with open("unicode_ex.json", "w") as outfile:
json.dump(missing_dict, outfile, indent=2)
if __name__ == "__main__":
make_ex_json()
``` |
{
"source": "jhird11/SimpleSwarm",
"score": 4
} |
#### File: jhird11/SimpleSwarm/SimulationWorld.py
```python
import copy
import time
import sys
import os
import pickle
from functools import partial
import numpy as np
import scipy.spatial
from SimulationRobot import SimulationRobot
from matplotlib import pyplot as plt
import matplotlib.animation as animation
import matplotlib.patches as patches
class SimulationWorld:
"""
The main class which represents the simulation enviroment.
"""
def __init__(self):
# Time between each simulation step
self.dt = 0.01
#Counts the number of simulation steps executed
self.t_step = 0
#List of all robots in the simulation world
self.robot_list = []
#Determins the outer bounds of the world area in the form of (lowest x, highest x, lowest y, highest y)
self.barriers = np.array((-10.0,10.0,-10.0,10.0))
#Size of bins used to divide up the space for collision detection
self.collision_bin_size = 1.0
#How many times should we execute binary search to resolve colisions, high numbers mean higher accuracy but will take longer
self.max_col_resolve_depth = 2
#Enable collision detection between robots
self.robot_collisions = True
#Enable calculation of neighbours of robots
self.calculate_neighbours = True
#How long the simulation will run for in time steps
self.total_steps_num = 0
#Log data with this period, should be a multiple of dt
self.data_log_period = 0.1
#Log used to store simulation information (currently robot positions and rotations)
self.data_log = None
def check_barriers(self,robot_radius,robot_position):
"""
Checks if a robot is in collision with the world's outer barriers
Parameters
----------
robot_size : float
Radius of robot
robot_position : np.array
x,y position of robot
Returns
------
bool
True if robot is in collision with outer barriers
"""
#calculates configuration space where robot can be without being in colision with barriers
#can be precalculated if all robots are the same size
self.c_space_top_y = self.barriers[3] - robot_radius
self.c_space_bottom_y = self.barriers[2] + robot_radius
self.c_space_right_x = self.barriers[1] - robot_radius
self.c_space_left_x = self.barriers[0] + robot_radius
return (robot_position[0] <= self.c_space_left_x or robot_position[0] >= self.c_space_right_x or robot_position[1] >= self.c_space_top_y or robot_position[1] <= self.c_space_bottom_y)
def check_robot_collision(self,robot1_pos,robot2_pos,robot1_radius,robot2_radius):
"""Checks if robots 1 and 2 are in collision
Parameters
----------
robot1_pos : np.array
x,y position of robot 1
robot2_pos : np.array
x,y position of robot 2
robot1_radius : float
Radius of robot 1
robot2_radius : float
Radius of robot 2
Returns
------
bool
True if robots are in collision with each other, otherwise False
"""
return np.sum(np.power((robot2_pos-robot1_pos),2.0)) < np.power(robot1_radius + robot2_radius,2.0)
def solve_collison(self,robot1,collision_list,last_free_dt,last_collision_dt,depth = 0):
"""
Determines the latest time between time steps a robot wasn't in collision
Parameters
----------
robot1 : SimulationRobot
The robot we're solving collisions for
collision_list : list
The list of robots this robot could be in collision with
last_free_dt : float
The latest time we know the robot isn't in colision (starts at 0 ie. end of the previous timestep)
last_collision_dt : float
The earliest time we know the robot is in collision (starts at dt ie. end of the current time step)
depth : float
The number of times we have called this function for a given robot, will terminate the binary search after max_col_resolve_depth iterations
Returns
------
float
The latest time the robot wasn't in collision relative to the start of the timestep
"""
depth+=1
#Terminate the search if we've reached max number of iterations of the search
if depth >= self.max_col_resolve_depth:
return last_free_dt
#test dt is midway between the times we know the robot isn't in collision and the time we known is in collision
test_dt = ((last_collision_dt-last_free_dt)/2.0+last_free_dt)
#previous position is the position of the robot at the start if the time step
robot1_tpos = robot1.prev_position + test_dt*robot1.velocity
#check new robot's position if it is in collision with robots or barriers
robot_check = False
if self.robot_collisions:
for robot2_index in collision_list:
robot_check |= self.check_robot_collision(robot1_tpos,self.robot_list[robot2_index].position,robot1.robot_params["radius"] ,self.robot_list[robot2_index].robot_params["radius"] )
if (robot_check or self.check_barriers(robot1.robot_params["radius"] ,robot1_tpos)):
last_collision_dt = test_dt
else:
last_free_dt = test_dt
return self.solve_collison(robot1,collision_list,last_free_dt,last_collision_dt,depth)
def populate(self,num_robots,r_template):
"""
Adds robots into the world, each robot is a deepcopy of r_template
Parameters
----------
num_robots : int
Number of robots to add to the world
r_template : SimulationRobot
The robot the world will be populated with
"""
self.num_robots = num_robots
robot_index = 0
for i in range(num_robots):
r = copy.deepcopy(r_template)
r.robot_index = robot_index
self.robot_list.append(r)
robot_index+=1
def init_data_log(self,steps_num):
"""
Initialises data log to length determined by steps_num
Data log is initialised to zero so if not all simulation steps are executed then data log after t_step will not be valid
Data log takes is of the shape (num_robots, data_log_length,4)
For a given robot at a given data index eg. data_log[0,0,:] = [robot x, robot y, robot rotation, robot state]
Data is logged every data_log_period in simulation time. If this is smaller than simulation timesteps (dt) then data_log_period will equal dt
and will be logged every timestep. You may wish to make data_log_period greater than dt for large numbers of simulation steps or if you are only interested in the final state of the simulaiton
data_log_period will be rounded down to the neariest multiple of dt and should be set before this function is called
Parameters
----------
steps_num : int
Number of steps the simulation is going to run for
"""
self.total_steps_num = steps_num
self.log_period_steps = int(np.max((1.0,np.floor(self.data_log_period /self.dt))))
self.data_log_period = self.log_period_steps*self.dt
log_length = int(np.ceil(float(self.total_steps_num)/self.log_period_steps))
self.data_log = np.zeros((self.num_robots,log_length,4))
if self.calculate_neighbours:
self.current_robot_states = np.zeros((self.num_robots,4))
robot_index = 0
for r in self.robot_list:
self.data_log[robot_index,0,:2] = r.position[:2]
self.data_log[robot_index,0,2] = r.rotation
self.data_log[robot_index,0,3] = r.robot_state
if self.calculate_neighbours:
self.current_robot_states[robot_index,:2] = r.position[:2]
self.current_robot_states[robot_index,2] = r.rotation
self.current_robot_states[robot_index,3] = r.robot_state
r.on_sim_start()
robot_index+=1
self.data_log_index = 1
self.t_step=1
def get_data_log_index_at_time(self,time):
"""
Returns the index of the data_log closest to "time"
NOTE:
time is rounded down to the nearest time when data was logged. So the data log entry will never occur after the specified time
Parameters
----------
time : float
Simulation time in seconds to get the data index for
Returns
----------
int
data_log index
"""
return np.floor(time/self.data_log_period).astype('int')
def get_data_log_at_time(self,time):
"""
Returns the data_log entry coresponding to the simulation time specified by time
NOTE
time is rounded down to the nearest time when data was logged. So the data log entry will never occur after the specified time
Parameters
----------
time : float
Simulation time in seconds to get the data index for
Returns
----------
np.array
data_log entry at "time"
"""
return self.get_data_log_by_index(self.get_data_log_index_at_time(time),self.get_data_log_index_at_time(time)+1)
def get_data_log_by_index(self,index_start,index_end):
"""
Accesses the data log between index_start and index_end (non inclusive). Indexes are clipped so they remain in the simulation period.
This means that accessing the data log using this function at indexes beyond the end of the simulation will result in accessing the data log at the last data_log entry.
See get_data_log_index_at_time to convert between simulation time and data log indexes
Parameters
----------
index_start : index of the data log to begin access at
index_end : index of the data log to end access at (not included)
Returns
----------
np.array
datalog between these indexes (None if indexes are invalid or equal)
"""
if index_end > index_start:
clipped_indexes = np.clip(range(index_start,index_end),0,self.data_log.shape[1]-1)
return self.data_log[:,clipped_indexes,:]
else:
return None
def init_physics(self,maximum_neighbour_size = 1.0):
"""
Initialises the collision grid. Should be called after setting bin_size and barriers
"""
self.neighbour_bin_half_size = maximum_neighbour_size/self.bin_size
self.bin_layout = np.array((np.ceil((self.barriers[1]-self.barriers[0])/self.bin_size),np.ceil((self.barriers[3]-self.barriers[2])/self.bin_size)),dtype = 'int')
def assign_robot_to_bin(self,robot):
"""
Assigns a robot to the colision grid. Each robot's position in the colision grid is stored in the robot's bin_index parameter
Returns
------
tuple(int,int)
The robot's position the collision grid
"""
bin_num_x = np.floor((robot.position[0]-self.barriers[0])/self.bin_size)
bin_num_y = np.floor((robot.position[1]-self.barriers[2])/self.bin_size)
robot.bin_index = np.array((bin_num_x,bin_num_y))
robot.bin_index = np.clip(robot.bin_index,np.zeros(2),self.bin_layout-1)
robot.bin_index = (int(robot.bin_index[0]),int(robot.bin_index[1]))
return robot.bin_index
def get_robots_collision_list(self,robot):
"""
Compiles a list of robots the robot could be in collision with
Parameters
----------
robot : SimulationRobot
Robot to compile a colision list for
Returns
---------
list
List of robot indexes the robot could be in colision with
"""
collison_list = []
for bin_x_index in [robot.bin_index[0]-1,robot.bin_index[0],robot.bin_index[0]+1]:#not inclusive
for bin_y_index in [robot.bin_index[1]-1,robot.bin_index[1],robot.bin_index[1]+1]:
if (bin_x_index >= 0 and bin_y_index >= 0 and bin_x_index < self.bin_layout[0] and bin_y_index < self.bin_layout[1]):
collison_list+=(self.robot_bins[bin_x_index][bin_y_index][:])
return collison_list
def arrange(self,mode = "smallest_square", **kwargs):
"""
Arranges the robots into a starting configuration. Should be called just before the first time_step()
NOTE:
This method does not prevent invalid configurates eg. robots outside the arena. But will print an error message if robots start off in collision
Parameters
---------
mode : str
Determines how the robots are arranged
"smallest_square" will organise the robots in the smallest possible square centered on box_position
You can also specify the size of the area robots are distrobuted in using the "uniform_box" mode and box_size
Robots are separated by at least robot_spacing. rand_amount can be used to make the robot arrangement less regular by adding a random offset to each robot's position.
**kwargs
box_position : tuple(float,float)
Centre of the box the robots are to be arranged in
robot_spacing : float
Distance between the edges of the robots rather than their centres. For each mode this is the minimum gauranteed distance not accounting for rand_amount
rand_amount : float
Magnitude of the random offset added to each robot. Should be less than robot_spacing to avoid colisions
box_size : tuple(float,float)
Only required for "uniform_box" mode, determines the size of the area to arrange robots in
NOTE:
Both "smallest_square" and "uniform_box" methods assumes all robots are the same size based on the first robot in robot list
Robots have uniformally distributed rotations for both modes
"""
if mode == "smallest_square" or mode == "uniform_box":
boxpos = kwargs["center_pos"]
robot_spacing = kwargs["robot_separation"]
rand_amount = kwargs.setdefault("added_noise",0.0)
robot_index = 0
robot_spacing/=2
robot_spacing+= self.robot_list[0].robot_params["radius"]
robot_spacing*=2
if mode == "smallest_square":
boxsize = robot_spacing*np.ceil(np.sqrt(self.num_robots))*np.ones(2) + robot_spacing/2
elif mode == "uniform_box":
self.sep_dist = self.robot_list[0].robot_params["radius"]*2
boxsize = kwargs["box_size"]
grid_width = int(np.floor(boxsize[0]/robot_spacing))#Assumes same size of robots
grid_height = int(np.floor(boxsize[1]/robot_spacing))
if (grid_width*grid_height) < self.num_robots:
print("Box not big enough {} spaces {} robots".format((grid_width*grid_height),self.num_robots))
#print("{} spaces in box of size {}".format(grid_width*grid_height,boxsize))
grid_points = [ (row,col) for row in range(grid_width) for col in range(grid_height)]
grid_points = np.array(grid_points)
extra_space = boxsize - np.array((grid_width-1,grid_height-1),dtype='float')*robot_spacing
np.random.shuffle(grid_points) #{set(range(grid_width))
for r in self.robot_list:
r.rotation = np.random.uniform(-np.pi,np.pi)
r.position = boxpos - np.array(boxsize)/2.0 + grid_points[robot_index]*robot_spacing +extra_space/2 + np.random.uniform(-rand_amount,rand_amount,(2,))
robot_index+=1
##################Asign robots to collision bins####################
self.robot_bins = [ [ [] for i in range(int(self.bin_layout[1])) ] for i in range(int(self.bin_layout[0])) ]
robot_index = 0
for r in self.robot_list:
self.assign_robot_to_bin(r)
self.robot_bins[r.bin_index[0]][r.bin_index[1]].append(robot_index)
robot_index+=1
#Robots must not start off in collision or collision resolution won't work
in_collision_r = False
in_collision_b = False
for r in self.robot_list:
in_collision_b = self.check_barriers(r.robot_params["radius"] ,r.position)
for r2 in self.robot_list:
if not r is r2:
in_collision_r = in_collision_r or self.check_robot_collision(r.position,r2.position,r.robot_params["radius"],r2.robot_params["radius"])
if in_collision_r or in_collision_b:
break
if in_collision_r or in_collision_b:
print("After arranging robots in the world, they are in collision!")
print("In collision with robots? {} Outside world bounds? {}".format(in_collision_r,in_collision_b))
# self.init_data_log(1)
# self.plot_world(0,physics_debug = True)
# plt.show()
def time_step(self):
"""
Executes on time step of the simulation
"""
if self.t_step >= self.total_steps_num:
print("t_step > {} too large for data log".format(self.total_steps_num))
logging_step = self.t_step%self.log_period_steps == 0
robot_index = 0
###############Update position of each robot#################
#This dictionary could have any data from the world in it when it makes sense to pre-calcated for each robot, rarther than have each robot query the world class during its control update
self.world_sense_data = {}
if self.calculate_neighbours:
current_robot_state = self.current_robot_states.copy()
self.world_sense_data["current_robot_poses"] = current_robot_state[:,:3]#We copy this as it is changed in the loop below.
#calculates the distance matrix where the element i,j represents the distance between robot i and robot j. This matrix is symetrical so that element i,j is equal to j,i
#eg1. To get this distance between robot 0 and robot 5 this would be self.world_sense_data["robot_distances"][0,5]
#eg2. To get the distance between robot 0 and all other robots this would be self.world_sense_data["robot_distances"][0,:]
self.world_sense_data["robot_distances"] = scipy.spatial.distance.cdist( self.world_sense_data["current_robot_poses"][:,:2],self.world_sense_data["current_robot_poses"][:,:2])
self.world_sense_data["current_robot_states"] = current_robot_state[:,3]
for r in self.robot_list:
#Move each robot according to velocity
r.movement_update(self.dt)
#Update robot logic
r.control_update(self.dt,self)
#collision detection
in_collision_b = self.check_barriers(r.robot_params["radius"] ,r.position)
in_collision_r = False
#create list of robots the robot might be in collision with if collision are enabled
if self.robot_collisions:
collision_list = self.get_robots_collision_list(r)
collision_list.remove(robot_index)
else:
collision_list = []
#check list of robots we might be in colision with unless we're already in collison
if not in_collision_b and self.robot_collisions:
for i in collision_list:
r2 = self.robot_list[i]
in_collision_r = self.check_robot_collision(r.position,r2.position,r.robot_params["radius"] ,r2.robot_params["radius"])
if in_collision_r:
break
in_collision = in_collision_b or in_collision_r
if in_collision:
#resolve the collision using binary search
solved_dt = self.solve_collison(r,collision_list,0.0,self.dt,depth = 0)
r.position = r.prev_position + r.velocity*solved_dt
#### Reassign robots to their new colision bin based on new location###
self.robot_bins[r.bin_index[0]][r.bin_index[1]].remove(robot_index)
bin_index = self.assign_robot_to_bin(r)
self.robot_bins[r.bin_index[0]][r.bin_index[1]].append(robot_index)
#Log data, could add other measures here such as robot states etc.
#The data log doesn't log every time step allowing for smaller filesizes for long simulations
if logging_step:
self.data_log[robot_index,self.data_log_index,:2] = r.position[:2]
self.data_log[robot_index,self.data_log_index,2] = r.rotation
self.data_log[robot_index,self.data_log_index,3] = r.robot_state
if self.calculate_neighbours:
self.current_robot_states[robot_index,:2] = r.position[:2]
self.current_robot_states[robot_index,2] = r.rotation
self.current_robot_states[robot_index,3] = r.robot_state
robot_index+=1
if logging_step:
self.data_log_index+=1
self.t_step+=1
def save(self,file_path):
"""
Saves the world to a pickle file at the filepath
Parameters
----------
file_path : str
The file path to where the world should be saved. Should include the file extension
"""
with open(file_path, "wb+") as handle:
pickle.dump(self,handle)
def load(self,file_path):
"""
Loads the world from a pickle file at the filepath
Parameters
----------
file_path : str
The file path to where the world will be loaded from. Should include the file extension
Returns
---------
SimulationWorld
The loaded SimulationWorld
"""
with open(file_path, 'rb') as handle:
return pickle.load(handle)
class WorldAnimation():
"""
Class for producing animations of simulations
Press t while animation is running to pause it, press again to resume playback
Press r and y to skip one timestep backwards when paused
Animations can also be exported to .mp4 with by passing a "save_path" to start_animation but this will required that the codecs are installed on the machine
"""
def __init__(self,world,**kwargs):
"""
Initialises the WorldAnimation class
NOTE:
This class will create its own figure
Parameters
---------
world : SimulationWorld
The world you want to animate
**kwargs
robot_trail_length : str
Controls the length of the trail behind each robot in terms of timesteps (default 0)
robot_trail_width : float
Controls the width of robot trails (default = 0.1)
robot_state_cmap : dict
Maps between robot state (intergers) to valid matplotlib colour codes. dict should be in the form {robot_state : colour}
"robot_labels" : bool
If true will label each robot with its robot_index (Default = False)
fast_plot : bool
If true will disable robot trails, robot labels and simpify the shape used to represent each robot
view_collision_bins : bool
If true will plot the collision bins as dotted lines on the world (Default = False)
viewing_bounds : tuple
Sets the viewing window of the plot in world co-ordinates in the form (min_x,max_x,min_y,max_y) defaults to 10% larger than the world's barriers
"""
self.world = world
self.pause_toggle = False
self.internal_time_step = 0
self.saving_to_file = False
self.figure = plt.figure()
self.figure.canvas.mpl_connect('key_press_event', self.key_press_handler)
blank_arr = np.zeros((1,self.world.num_robots))
self.robot_artists = []
#Set values for **kwargs if they are not specified
if "robot_trail_length" in kwargs and kwargs["robot_trail_length"]!=0:
self.trail_length = int(kwargs["robot_trail_length"])
self.enable_trails = True
else:
self.enable_trails = False
self.trail_length = 0
if "robot_state_cmap" in kwargs:
self.r_state_cmap = kwargs["robot_state_cmap"]
else:
self.r_state_cmap = { 0 : 'dimgrey',
1 : 'palegreen',
2 : 'lightcoral',
3 : 'blue'}
self.enable_labels = kwargs.setdefault("robot_labels",False)
#Creates patches and trails for each robot based on if they are enabled or not
robot_index = 0
for r in self.world.robot_list:
if "fast_plot" in kwargs and kwargs["fast_plot"] == True:
self.fast_plot = True
body_patch = patches.CirclePolygon((0,0),self.world.robot_list[robot_index].robot_params["radius"],resolution = 5,linewidth = kwargs.setdefault("robot_body_width",0.1))
direction_patch = None
robot_trail = None
robot_label = None
self.enable_trails = False
self.enable_labels = False
else:
self.fast_plot = False
body_patch = patches.Circle((0,0),self.world.robot_list[robot_index].robot_params["radius"],linewidth = kwargs.setdefault("robot_body_width",0.1))
direction_patch = patches.Wedge((0,0), self.world.robot_list[robot_index].robot_params["radius"], -15, 15,color = 'black')
if self.enable_trails:
robot_trail = plt.plot([], [],linewidth = kwargs.setdefault("robot_trail_width",0.1))[0]
else:
robot_trail = None
if self.enable_labels:
robot_label = plt.text(0.0, 0.0, "r{}".format(robot_index),clip_on = True)
else:
robot_label = None
self.robot_artists.append((body_patch,direction_patch,robot_trail,robot_label))
robot_index+=1
self.ax = plt.gca()
self.ax.set_aspect('equal')
for artist_group in self.robot_artists:
self.ax.add_artist(artist_group[0])
if not artist_group[1] is None:
self.ax.add_artist(artist_group[1])
#Plot static elements such as barrier lines and colision bins
p1 = np.array((world.barriers[0],world.barriers[2]))
p2 = np.array((world.barriers[1],world.barriers[2]))
p3 = np.array((world.barriers[1],world.barriers[3]))
p4 = np.array((world.barriers[0],world.barriers[3]))
barrier_line = np.array((p1,p2,p3,p4,p1))
plt.plot(barrier_line[:,0],barrier_line[:,1])
if "view_collision_bins" in kwargs and kwargs["view_collision_bins"] == True:
for x in range(self.world.bin_layout[0]):
plt.plot((x*self.world.bin_size+self.world.barriers[0],x*self.world.bin_size+world.barriers[0]),(self.world.barriers[2],self.world.barriers[3]),linestyle= '--',linewidth = 0.5,color = 'black')
for y in range(self.world.bin_layout[1]):
plt.plot((self.world.barriers[0],self.world.barriers[1]),(y*self.world.bin_size+world.barriers[2],y*self.world.bin_size+self.world.barriers[2]),linestyle= '--',linewidth = 0.5,color = 'black')
if "viewing_bounds" in kwargs:
plt.xlim((kwargs["viewing_bounds"][0],kwargs["viewing_bounds"][1]))
plt.ylim((kwargs["viewing_bounds"][2],kwargs["viewing_bounds"][3]))
else:
plt.xlim((self.world.barriers[0]*1.1,self.world.barriers[1]*1.1))
plt.ylim((self.world.barriers[2]*1.1,self.world.barriers[3]*1.1))
self.time_text = plt.text(0.025, 1.01, "t = 0.0s", transform = self.ax.transAxes, color = 'black')
self.rendering_stats = False
def update_robot_patch(self,robot_pose,body_patch,direction_patch):
"""
Updates the plotting elements representing the robot
Parameters
---------
robot_pose : np,array
Robot's pose in the form (x,y,rotation)
body_patch : matplotlib.patches.Circle or matplotlib.patches.CirclePolygon depending on fast_plot
Plotting element representing robot's body
direction_patch : matplotlib.patches.Wedge or None depending on fast_plot
Plotting element representing robot's direction
"""
if not self.fast_plot:
body_patch.center = tuple(robot_pose[:2])
dir_patch_angle = robot_pose[2]
dir_patch_pos = robot_pose[:2] + 0.5*body_patch.radius*np.array((np.cos(dir_patch_angle),np.sin(dir_patch_angle)))
direction_patch.set_center(tuple(dir_patch_pos))
direction_patch.theta1 = np.rad2deg(dir_patch_angle+np.pi) - 15
direction_patch.theta2 = np.rad2deg(dir_patch_angle+np.pi) + 15
else:
body_patch.xy = tuple(robot_pose[:2])
def key_press_handler(self,event):
"""
Key Handler call back for the figure
Parameters
---------
event : KeyEvent
Event passed from the figure
"""
sys.stdout.flush()
if event.key == 'u':
self.pause_toggle = not self.pause_toggle
print("pause_toggle {}".format(self.pause_toggle))
if event.key == 'y':
if self.pause_toggle:
self.increase_internal_time(-self.world.data_log_period)
if event.key == 'i':
if self.pause_toggle:
self.increase_internal_time(self.world.data_log_period)
def update_plot(self,time):
"""
Updates the plot elements to reflect the world at time_step
Parameters
---------
time : float
Simulation time to plot the world at
"""
self.time_text.set_text("t = {:4.2f}s".format(time))
robot_index = 0
trail_start_index = np.clip(self.world.get_data_log_index_at_time(time - self.trail_length),0,None)
current_data_log_index = self.world.get_data_log_index_at_time(time)
trail_data = self.world.get_data_log_by_index(trail_start_index,current_data_log_index)
for artist_group in self.robot_artists:
robot_data = self.world.get_data_log_at_time(time)[robot_index][0,:]
self.update_robot_patch(robot_data[:3],artist_group[0],artist_group[1])
if not self.r_state_cmap is None:
artist_group[0].set_facecolor(self.r_state_cmap.setdefault(robot_data[3],'red'))
if self.enable_trails and not trail_data is None:
artist_group[2].set_data(trail_data[robot_index,:,:2].T)
if self.enable_labels:
artist_group[3].set_position(tuple(robot_data[:2]))
robot_index+=1
def plot_snapshot(self,time):
"""
Plots a snapshot of the world at a particular timestep. Equivalent to calling WorldAnimation.update_plot(time_step)
Parameters
---------
time : float
Simulation time step to plot the world at
"""
self.update_plot(time)
def increase_internal_time(self,increase):
"""
Increases the internal time counter and performs wrapping between start and end times of the animation
Parameters
---------
increase : float
Amount to increase the counter by (can be negative)
"""
self.internal_time_counter += increase
if self.internal_time_counter > self.final_time:
self.internal_time_counter = self.start_time
if self.internal_time_counter < self.start_time:
self.internal_time_counter = self.final_time
def animation_callback(self,frame_time):
"""
Callback for FuncAnimation
Parameters
---------
frame_time : float
UNUSED - Required argument form func animation, WorldAnimation uses an internal counter to allow for pausing and skipping functionality.
"""
self.update_plot(self.internal_time_counter)
#Increases internal counter if not paused
if not self.pause_toggle:
self.increase_internal_time(self.time_inc)
if self.saving_to_file:
print("Saving animation... {:4.2f}s out of {:4.2f}s = {:4.2f}%".format(self.internal_time_counter,self.final_time,((self.internal_time_counter-self.start_time)/self.time_inc)/self.rendering_times.shape[0]*100.0))
elif self.rendering_stats:
print("Animating drawing speed = {:4.2f} * desired_speed".format((time.time()-self.frame_start_time)/self.interval))
self.frame_start_time = time.time()
def start_animation(self,start_time = None,final_time = None, time_between_frames = None, speed = 1.0,save_path = None):
"""
Starts the animation by calling FuncAnimation which then repeatidly calls animation_callback to animate the plot
Parameters
----------
start_time : float
Start time of the simulation, will default to zero (start if simulation)
final_time : float
End time of the simulation, will default to the end of the simulation
time_between_frames : float
The time between each frame rendered (in terms of simulation time) if None will default to the world's data log period. For best results this should be a multiple of world's data log period
speed : float
Playback speed of the animation, ie. the period between rendered frames 1.0 will playback at real time, while 2.0 will playback at double speed.
NOTE - The figure animation may run slower than this due to the large number of plotting elements to update. But when saving to mp4 the animation will playback correctly
Might be best to reduce the time_between_frames at highers playback speeds
save_path : str
Saves the animation in .mp4 formate to a save path
"""
if start_time is None:
self.start_time = 0.0
else:
self.start_time = start_time
if final_time is None:
self.final_time = self.world.t_step*self.world.dt
else:
self.final_time = final_time
time_invalid = False
if self.start_time < 0.0:
print("Start time less than zero!")
time_invalid = True
if self.final_time < 0.0:
print("Final time less than zero!")
time_invalid = True
if self.start_time > self.final_time:
print("Start time before final time!")
time_invalid = True
if time_invalid:
print("Invalid start or end time for animation! Defaulting to full simulaton period")
self.start_time = 0.0
self.final_time = self.world.t_step*self.world.dt
if time_between_frames is None:
self.time_inc = self.world.data_log_period
else:
self.time_inc = time_between_frames
self.internal_time_counter = self.start_time
self.rendering_times = np.arange(self.start_time,self.final_time,self.time_inc)
self.interval = self.time_inc/speed
init_func = partial(self.update_plot,0.0)
sim_ani = animation.FuncAnimation(self.figure,self.animation_callback,self.rendering_times, init_func= init_func,fargs=None,interval = self.interval*1000, blit=False)
if save_path is not None:
FFwriter = animation.FFMpegWriter(fps = speed/self.time_inc,extra_args=['-vcodec', 'libx264'])
self.saving_to_file = True
sim_ani.save(save_path, writer = FFwriter)
self.saving_to_file = False
if self.rendering_stats:
self.frame_start_time = time.time()
print("Starting Animation...")
print("Press t while animation is running to pause it, press again to resume playback")
print("Press r and y to skip one timestep forwards/backwards when paused")
plt.show()
``` |
{
"source": "jhirner/boiling-pt-prediction",
"score": 3
} |
#### File: deployment/scripts/smilestools.py
```python
from rdkit.Chem import Lipinski, Descriptors, MolFromMolBlock, MolFromSmarts, Draw
import numpy as np
from io import BytesIO
import base64
class SmilesTransformer():
def __init__(self, mol_file):
self.mol_file = mol_file
# Check the validity of the MOL file input before proceeding.
self.molecule = MolFromMolBlock(self.mol_file)
self.valid_structure = True if self.molecule is not None else False
# Function to calculate fraction of aliphatic carbons that are branch points.
# Called by the instance's parse_smiles function.
def calc_branch_frac(self):
# Use SMARTS substructure codes to count the total number of carbon atoms
# & the total number of branched aliphatic carbons (i.e.: tertiary or quaternary carbon atoms)
self.carbon_count = len(self.molecule.GetSubstructMatches(MolFromSmarts("[#6]")))
self.branch_carbon_count = len(self.molecule.GetSubstructMatches(MolFromSmarts("[$([C;H1,H0]([#6])([#6])([#6]))]")))
# Prevent division by zero.
if self.carbon_count !=0:
branch_frac = self.branch_carbon_count / self.carbon_count
else:
branch_frac = np.nan
return branch_frac
# This function is responsible for feature generation.
# It calls calc_branch_frac as needed.
def gen_features(self):
# If the SMILES code was properly interpreted, compute the descriptors of interest.
# If not, return an array of zeroes instead.
# (Note: Because frontend.py only generates predictions after valid_stucture
# is confirmed True, this if statement should never actually be false.)
if self.valid_structure:
# Invoke the function for SMARTS-based substructure searching
self.branching_frac = self.calc_branch_frac()
# Determine other features.
self.h_bond_donors = Lipinski.NumHDonors(self.molecule)
self.mol_wt = Descriptors.ExactMolWt(self.molecule)
self.rings_aromatic = Lipinski.NumAromaticRings(self.molecule)
# Compile the prediction features as a tuple, then convert to array.
pred_tup = (self.branching_frac, self.h_bond_donors, self.mol_wt, self.rings_aromatic)
pred_array = np.ascontiguousarray(pred_tup)
pred_array = pred_array.reshape((1, -1))
# Returned the transformed data as a numpy array.
return pred_array
else:
return np.zeros((1,4))
# This function generates a list of unique atomic numbers present in the molecule.
# It is used for validation purposes by frontend.py via predict.py to ensure that
# only atoms included in the training set are present in the user-inputted structure.
def get_atoms(self):
atom_list = []
for atom in self.molecule.GetAtoms():
atom_list.append(atom.GetSymbol())
return set(atom_list)
# This function is used to draw a structure using the inputted SMILES code.
# The image is returned as a string of bytes to be rendered by Flask.
def draw_structure(self):
# Generate the structure as a PIL image
struct_pil = Draw.MolToImage(self.molecule, size = (300, 150))
imageBox = struct_pil.getbbox()
cropped=struct_pil.crop(imageBox)
# Convert the PIL image to bytes
struct_img = BytesIO()
cropped.save(struct_img, format = "GIF", transparency = 0)
struct_img_encoded = base64.b64encode(struct_img.getvalue())
return struct_img_encoded
``` |
{
"source": "jhirner/patent-tools",
"score": 3
} |
#### File: patent-tools/patenttools/frontend.py
```python
from flask import Flask, render_template, request
from lookup import USPTOLookup
from distiller import TextDistiller
from requests.exceptions import ConnectionError
app = Flask(__name__)
# Define routes
@app.route("/")
def build_search():
display = render_template("search_form.html")
return display
@app.route("/results", methods = ["POST", "GET"])
def display_results():
# This route should be accessed only from the search form at /.
# If it is accessed directly via GET, provide a link to / instead.
if request.method == "POST":
# Capture the user's search parameters
form_submission = request.form
raw_pat_num = form_submission["raw_pat_num"]
bigram_freq_filter = int(form_submission["bigram_freq_filter"])
# If any query at all is present, try to proceed.
if raw_pat_num != "":
try:
# Instantiate a USPTOLookup to parse basic patent information
patent_info = USPTOLookup(raw_pat_num)
if patent_info.number == "unrecognized input":
error = """The patent number you provided could not be interpreted.<p>
Please <a href = '/'>enter a new query</a>."""
return error
# Instantiate a TextDistiller to extract bigrams
pat_distiller = TextDistiller(" ".join(patent_info.claims) + patent_info.description)
bigrams = pat_distiller.gen_bigrams(min_freq = bigram_freq_filter)
display = render_template("results.html",
pat_num = patent_info.number,
pat_title = patent_info.title,
pat_url = patent_info.url,
pat_class = patent_info.primary_class,
pat_assignee = patent_info.assignee,
pat_file_date = patent_info.filing_date,
citations_us = patent_info.cited_us,
citations_for = patent_info.cited_for,
pat_bigrams = bigrams,
wordcloud = pat_distiller.wordcloud)
return display
except ConnectionError:
conn_err1 = "<b>Error: The USPTO server is unreachable.</b><p>"
conn_err2 = "Please try again later."
return conn_err1 + conn_err_2
except:
error = """An error occured.<p>
Please <a href = '/'>enter a new query</a><br>
<a href = "https://forms.gle/nZT9JLJbA9akGpeE8" target = "_blank">or share feedback about the problem.</a>"""
return error
else:
error = """No query entered.<p>
Please <a href = '/'>enter a new query</a>."""
return error
else:
error = """No query entered.<p>
Please <a href = '/'>enter a new query</a>."""
return error
@app.route("/<unspecified_str>")
def handle_unknown(unspecified_str):
error = """Invalid path.<p>Please <a href = '/'>enter a new query</a>."""
return error
# Run the app
if __name__ == "__main__":
app.run()
``` |
{
"source": "jhj0411jhj/soln-ml",
"score": 2
} |
#### File: transformations/generator/__init__.py
```python
import os
from mindware.components.feature_engineering.transformations.base_transformer import Transformer
from mindware.components.utils.class_loader import find_components, ThirdPartyComponents
"""
Load the buildin classifiers.
"""
generator_directory = os.path.split(__file__)[0]
_generator = find_components(__package__, generator_directory, Transformer)
"""
Load third-party classifiers.
"""
_addons = ThirdPartyComponents(Transformer)
def add_generator(generator):
_addons.add_component(generator)
```
#### File: transformations/preprocessor/onehot_encoder.py
```python
from mindware.components.feature_engineering.transformations.base_transformer import *
class OneHotTransformation(Transformer):
type = 2
def __init__(self):
super().__init__("onehot_encoder")
self.input_type = CATEGORICAL
def operate(self, input_datanode: DataNode, target_fields=None):
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
if target_fields is None:
target_fields = collect_fields(input_datanode.feature_types, self.input_type)
X, y = input_datanode.data
# Fetch the fields to transform.
self.target_fields = target_fields
if isinstance(X, pd.DataFrame):
X = X.values
X_input = X[:, target_fields]
if self.model is None:
self.model = OneHotEncoder(handle_unknown='ignore')
self.model.fit(X_input)
new_X = self.model.transform(X_input).toarray()
X_output = X.copy()
# Delete the original columns.
X_output = np.delete(X_output, np.s_[target_fields], axis=1)
X_output = np.hstack((X_output, new_X))
feature_types = input_datanode.feature_types.copy()
feature_types = list(np.delete(feature_types, target_fields))
feature_types.extend([CATEGORICAL] * new_X.shape[1])
output_datanode = DataNode((X_output, y), feature_types, input_datanode.task_type)
output_datanode.trans_hist = input_datanode.trans_hist.copy()
output_datanode.trans_hist.append(self.type)
return output_datanode
```
#### File: transformations/rescaler/quantile_transformer.py
```python
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformIntegerHyperparameter, \
CategoricalHyperparameter
from mindware.components.feature_engineering.transformations.base_transformer import *
class QuantileTransformation(Transformer):
type = 5
def __init__(self, n_quantiles=1000, output_distribution='uniform', random_state=1):
super().__init__("quantile_transformer")
self.input_type = [NUMERICAL, DISCRETE]
self.compound_mode = 'in_place'
self.output_type = NUMERICAL
self.output_distribution = output_distribution
self.n_quantiles = n_quantiles
self.random_state = random_state
@ease_trans
def operate(self, input_datanode, target_fields=None):
from mindware.components.feature_engineering.transformations.utils import QuantileTransformer
X, y = input_datanode.data
X_new = X[:, target_fields]
if not self.model:
self.model = QuantileTransformer(output_distribution=self.output_distribution,
n_quantiles=self.n_quantiles, copy=False,
random_state=self.random_state)
self.model.fit(X_new)
_X = self.model.transform(X_new)
return _X
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
# TODO parametrize like the Random Forest as n_quantiles = n_features^param
n_quantiles = UniformIntegerHyperparameter(
'n_quantiles', lower=10, upper=2000, default_value=1000
)
output_distribution = CategoricalHyperparameter(
'output_distribution', ['uniform', 'normal'], default_value="uniform"
)
cs.add_hyperparameters([n_quantiles, output_distribution])
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'n_quantiles': hp.randint('quantile_n_quantiles', 1990) + 10,
'output_distribution': hp.choice('quantile_output_distribution', ['uniform', 'normal'])}
return space
```
#### File: transformations/rescaler/standard.py
```python
from mindware.components.feature_engineering.transformations.base_transformer import *
class StandardScaler(Transformer):
type = 43
def __init__(self, **kwargs):
super().__init__("standard_scaler")
self.input_type = [DISCRETE, NUMERICAL]
self.compound_mode = 'in_place'
self.output_type = NUMERICAL
@ease_trans
def operate(self, input_data, target_fields):
from sklearn.preprocessing import StandardScaler
X, y = input_data.data
X_new = X[:, target_fields]
if not self.model:
self.model = StandardScaler()
self.model.fit(X_new)
_X = self.model.transform(X_new)
return _X
```
#### File: meta_learning/algorithm_recomendation/ranknet_advisor_torch.py
```python
import os
import numpy as np
import pickle as pk
from torch import nn, optim, from_numpy
import torch
from torch.utils.data import Dataset, DataLoader
from mindware.utils.logging_utils import get_logger
from mindware.components.meta_learning.algorithm_recomendation.base_advisor import BaseAdvisor
class CategoricalHingeLoss(nn.Module):
def forward(self, input, target):
pos = (1. - target) * (1. - input) + target * input
neg = target * (1. - input) + (1. - target) * input
return torch.sum(torch.max(torch.zeros_like(neg - pos + 1.), neg - pos + 1.)) / len(input)
class PairwiseDataset(Dataset):
def __init__(self, X1, X2, y):
self.X1_array, self.X2_array, self.y_array = X1, X2, y.reshape(y.shape[0], 1)
def __getitem__(self, index):
data1 = from_numpy(self.X1_array[index]).float()
data2 = from_numpy(self.X2_array[index]).float()
y_true = from_numpy(self.y_array[index]).float()
return data1, data2, y_true
def __len__(self):
return self.X1_array.shape[0]
class RankNet(nn.Module):
def __init__(self, input_shape, hidden_layer_sizes, activation):
super(RankNet, self).__init__()
self.model = nn.Sequential()
self.input_shape = input_shape
self.output_sigmoid = nn.Sigmoid()
self.act_func_dict = {'relu': nn.ReLU(inplace=True), 'tanh': nn.Tanh()}
self.model.add_module('BatchNorm', nn.BatchNorm1d(input_shape))
self.model.add_module('linear_' + str(hidden_layer_sizes[0]), nn.Linear(input_shape, hidden_layer_sizes[0]))
self.model.add_module('act_func_' + str(0), self.act_func_dict[activation[0]])
for i in range(1, len(hidden_layer_sizes)):
self.model.add_module('linear_' + str(hidden_layer_sizes[i]),
nn.Linear(hidden_layer_sizes[i - 1], hidden_layer_sizes[i]))
self.model.add_module('act_func_' + str(i),
self.act_func_dict[activation[i]])
self.model.add_module('output', nn.Linear(hidden_layer_sizes[-1], 1))
def forward(self, input1, input2):
s1 = self.model(input1)
s2 = self.model(input2)
return self.output_sigmoid(s1 - s2)
def predict(self, input):
return self.model(input).detach()
class RankNetAdvisor(BaseAdvisor):
def __init__(self,
rep=3,
metric='acc',
n_algorithm=3,
task_type=None,
total_resource=1200,
exclude_datasets=None,
meta_dir=None):
self.logger = get_logger(self.__module__ + "." + self.__class__.__name__)
super().__init__(n_algorithm, task_type, metric, rep, total_resource,
'ranknet', exclude_datasets, meta_dir)
self.model = None
@staticmethod
def create_pairwise_data(X, y):
X1, X2, labels = list(), list(), list()
n_algo = y.shape[1]
for _X, _y in zip(X, y):
if np.isnan(_X).any():
continue
meta_vec = _X
for i in range(n_algo):
for j in range(i + 1, n_algo):
if (_y[i] == -1) or (_y[j] == -1):
continue
vector_i, vector_j = np.zeros(n_algo), np.zeros(n_algo)
vector_i[i] = 1
vector_j[j] = 1
meta_x1 = list(meta_vec.copy())
meta_x1.extend(vector_i.copy())
meta_x2 = list(meta_vec.copy())
meta_x2.extend(vector_j.copy())
X1.append(meta_x1)
X1.append(meta_x2)
X2.append(meta_x2)
X2.append(meta_x1)
_label = 1 if _y[i] > _y[j] else 0
labels.append(_label)
labels.append(1 - _label)
return np.asarray(X1), np.asarray(X2), np.asarray(labels)
@staticmethod
def create_model(input_shape, hidden_layer_sizes, activation):
return RankNet(input_shape, hidden_layer_sizes, activation)
def weights_init(self, model):
if isinstance(model, nn.Linear):
nn.init.xavier_uniform_(model.weight.data) # use xavier instead of default he_normal
model.bias.data.zero_()
def fit(self, **kwargs):
l1_size = kwargs.get('layer1_size', 256)
l2_size = kwargs.get('layer2_size', 128)
act_func = kwargs.get('activation', 'tanh')
batch_size = kwargs.get('batch_size', 128)
epochs = 200
_X, _y, _ = self.metadata_manager.load_meta_data()
X1, X2, y = self.create_pairwise_data(_X, _y)
train_data = PairwiseDataset(X1, X2, y)
train_loader = DataLoader(
dataset=train_data,
batch_size=batch_size,
shuffle=True,
num_workers=2
)
self.input_shape = X1.shape[1]
meta_learner_filename = os.path.join(self.meta_dir, "meta_learner", 'ranknet_model_%s_%s_%s.pth' % (
self.meta_algo, self.metric, self.hash_id))
if os.path.exists(meta_learner_filename):
# print("load model...")
self.model = torch.load(meta_learner_filename)
else:
# print("fit model...")
self.model = RankNet(X1.shape[1], (l1_size, l2_size,), (act_func, act_func,))
self.model.apply(self.weights_init)
optimizer = optim.Adam(self.model.parameters(), lr=1e-3)
loss_fun = CategoricalHingeLoss()
self.model.train()
for epoch in range(epochs):
train_loss = 0
train_samples = 0
train_acc = 0
for i, (data1, data2, y_true) in enumerate(train_loader):
optimizer.zero_grad()
y_pred = self.model(data1, data2)
loss = loss_fun(y_pred, y_true)
loss.backward()
optimizer.step()
train_loss += loss.item() * len(data1)
train_samples += len(data1)
train_acc += sum(y_pred.detach().numpy().round() == y_true.detach().numpy())
print('Epoch{}, loss : {}, acc : {}'.format(epoch, train_loss / len(train_data),
train_acc / len(train_data)))
# print("save model...")
torch.save(self.model, meta_learner_filename)
def predict(self, dataset_meta_feat):
n_algo = self.n_algo_candidates
_X = list()
for i in range(n_algo):
vector_i = np.zeros(n_algo)
vector_i[i] = 1
_X.append(list(dataset_meta_feat.copy()) + list(vector_i))
X = np.asarray(_X)
X = from_numpy(X).float()
self.model.eval()
pred = self.model.predict(X).numpy()
return pred.ravel()
``` |
{
"source": "jhj2713/qufafeat",
"score": 3
} |
#### File: featuretools/mkfeat/feat_importance.py
```python
import os.path
import pandas as pd
import xgboost as xgb
from sklearn.model_selection import train_test_split
from .error import Error
from .columnspec import ColumnSpec
from .qufa_csv import QufaCsv
class TrainCallback(xgb.callback.TrainingCallback):
def __init__(self, proghandler: callable, n_epochs):
self.proghandler = proghandler
self.n_epochs = n_epochs
def after_iteration(self, model, epoch: int, evals_log) -> bool:
prog = int(100.0 * epoch / self.n_epochs)
if prog >= 100:
prog = 99
if self.proghandler:
return self.proghandler(prog)
return False
class FeatureImportance:
def __init__(self):
self.data = None
self.label = None
self.model = None
self.n_epochs = 300
self._colspec_data: ColumnSpec = None
def load(self, path_data: str, columns_data: dict, path_label: str, columns_label: dict) -> Error:
if path_data is None or columns_data is None:
return Error.ERR_INVALID_ARG
if not os.path.isfile(path_data):
return Error.ERR_DATA_NOT_FOUND
if path_label is not None:
if columns_label is None:
return Error.ERR_INVALID_ARG
if not os.path.isfile(path_label):
return Error.ERR_LABEL_NOT_FOUND
self._colspec_data = colspec_data = ColumnSpec(columns_data)
if path_label is None:
if colspec_data.get_label_colname() is None:
return Error.ERR_LABEL_NOT_FOUND
csv_data = QufaCsv(path_data, colspec_data)
exclude_label = True if path_label is None else False
data = csv_data.load(exclude_label=exclude_label, numeric_only=True)
if isinstance(data, Error):
return data
self.data = data
if path_label is None:
label = csv_data.load(label_only=True)
else:
colspec_label = ColumnSpec(columns_label)
csv_label = QufaCsv(path_label, colspec_label)
label = csv_label.load()
if isinstance(label, Error):
return label
self.label = label
return Error.OK
def analyze(self, proghandler: callable = None):
xtr, xv, ytr, yv = train_test_split(self.data.values, self.label, test_size=0.2, random_state=0)
dtrain = xgb.DMatrix(xtr, label=ytr)
dvalid = xgb.DMatrix(xv, label=yv)
evals = [(dtrain, 'train'), (dvalid, 'valid')]
params = {
'min_child_weight': 1, 'eta': 0.166,
'colsample_bytree': 0.4, 'max_depth': 9,
'subsample': 1.0, 'lambda': 57.93,
'booster': 'gbtree', 'gamma': 0.5,
'silent': 1, 'eval_metric': 'rmse',
'objective': 'reg:linear',
}
callback = TrainCallback(proghandler, self.n_epochs)
self.model = xgb.train(params=params, dtrain=dtrain, num_boost_round=self.n_epochs,
callbacks=[callback],
evals=evals, early_stopping_rounds=60, maximize=False, verbose_eval=10)
if proghandler is not None:
proghandler(100)
def get_importance(self):
fscores = self.model.get_fscore()
fscore_sum = 0
for i in range(len(self.data.columns)):
colname = 'f' + str(i)
if colname in fscores:
fscore_sum += fscores[colname]
importances = []
for i in range(len(self.data.columns)):
colname = 'f' + str(i)
if colname in fscores:
importances.append(fscores[colname] / fscore_sum)
else:
importances.append(0.0)
idx = 0
for is_numeric in self._colspec_data.get_is_numerics():
if not is_numeric:
importances.insert(idx, 0)
idx += 1
return importances
```
#### File: featuretools/mkfeat/featsel.py
```python
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import MinMaxScaler
from pandas import DataFrame
from featuretools import selection
from elapsed_time import ElapsedTime
THRESHOLD = 0.02
def select_features(df: DataFrame, features, elapsed_time: ElapsedTime):
df_new = DataFrame()
features_new = []
f_names_check = []
for feat in features:
name = feat.get_name()
if feat.variable_type.type_string == "numeric" or feat.variable_type.type_string == "boolean":
sel = VarianceThreshold()
scaler = MinMaxScaler()
try:
arr = df[name].values.reshape(len(df), 1)
scaler.fit(arr)
sel.fit(scaler.transform(arr))
if sel.variances_[0] >= THRESHOLD:
f_names_check.append(name)
df_new[name] = df[name]
features_new.append(feat)
except ValueError:
pass
else:
df_new[name] = df[name]
features_new.append(feat)
elapsed_time.mark()
df_new, features_new = selection.remove_highly_correlated_features(df_new, features_new,
features_to_check=f_names_check)
return df_new, features_new
```
#### File: featuretools/mkfeat/normalize.py
```python
from pandas import DataFrame
import autonormalize as an
def normalize(df: DataFrame, key_colname):
if len(df) > 1000:
df = df.sample(n=1000)
es = an.auto_entityset(df, index=key_colname, accuracy=0.98)
norminfos = []
# 첫번째 이외의 entity들에 대해서. 첫번째 entity가 main임을 가정
entities = es.entities[1:]
for et in entities:
norminfo = []
for var in et.variables:
norminfo.append(var.name)
norminfos.append(norminfo)
for norminfo in norminfos:
parent_ids = _get_parent_entity_ids(es, norminfo[0])
for parent_id in parent_ids:
vars = es[parent_id].variables
for var in vars[1:]:
norminfo.append(var.name)
return norminfos
def _get_parent_entity_ids(es, child_id):
parent_ids = []
for rel in es.relationships:
if child_id == rel.child_entity.id:
parent_ids.append(rel.parent_entity.id)
parent_ids += _get_parent_entity_ids(es, rel.parent_entity.id)
return parent_ids
```
#### File: featuretools/mkfeat/qufa_csv.py
```python
import pandas as pd
from .columnspec import ColumnSpec
from .error import Error
# CSV데이터가 header를 포함하는지 여부. 데이터 연동 서비스측에 따라 결정됨. 현재 구현은 2가지 경우를 모두 감안하기로 함
csv_has_header = True
class QufaCsv:
def __init__(self, path: str, colspec: ColumnSpec):
self._path = path
self._colspec = colspec
self._skiprows = 1 if csv_has_header else None
def get_row(self, path):
with open(path, "r") as f:
lines = f.readlines()
return len(lines)
def load(self, callback, label_only: bool = False, exclude_label: bool = False, numeric_only: bool = False):
usecols = None
colnames = self._colspec.get_colnames()
if len(colnames) != self._guess_n_columns():
return Error.ERR_COLUMN_COUNT_MISMATCH
usecols = self._colspec.get_usecols(label_only=label_only, exclude_label=exclude_label,
numeric_only=numeric_only)
row_count = self.get_row(self._path)
try:
chunk_size = 10000
chunk_prog = chunk_size / row_count * 100
prog = 0
data_arr = []
for data in pd.read_csv(self._path, header=None, names=colnames, converters=self._colspec.get_converters(),
skiprows=self._skiprows, usecols=usecols, dtype=self._colspec.get_dtypes(),
true_values=['Y', 'true', 'T'], false_values=['N', 'false', 'F'],
chunksize=chunk_size):
data_arr.append(data)
prog += chunk_prog
callback(0, prog, 0, True)
data_concat = pd.concat([data for data in data_arr])
except ValueError:
return Error.ERR_COLUMN_TYPE
return data_concat
def _guess_n_columns(self):
data = pd.read_csv(self._path, header=0, skiprows=self._skiprows, nrows=1)
return len(data.columns)
``` |
{
"source": "jhjacobsen/pytorch-i-revnet",
"score": 2
} |
#### File: pytorch-i-revnet/models/iRevNet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .model_utils import split, merge, injective_pad, psi
class irevnet_block(nn.Module):
def __init__(self, in_ch, out_ch, stride=1, first=False, dropout_rate=0.,
affineBN=True, mult=4):
""" buid invertible bottleneck block """
super(irevnet_block, self).__init__()
self.first = first
self.pad = 2 * out_ch - in_ch
self.stride = stride
self.inj_pad = injective_pad(self.pad)
self.psi = psi(stride)
if self.pad != 0 and stride == 1:
in_ch = out_ch * 2
print('')
print('| Injective iRevNet |')
print('')
layers = []
if not first:
layers.append(nn.BatchNorm2d(in_ch//2, affine=affineBN))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(in_ch//2, int(out_ch//mult), kernel_size=3,
stride=stride, padding=1, bias=False))
layers.append(nn.BatchNorm2d(int(out_ch//mult), affine=affineBN))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(int(out_ch//mult), int(out_ch//mult),
kernel_size=3, padding=1, bias=False))
layers.append(nn.Dropout(p=dropout_rate))
layers.append(nn.BatchNorm2d(int(out_ch//mult), affine=affineBN))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(int(out_ch//mult), out_ch, kernel_size=3,
padding=1, bias=False))
self.bottleneck_block = nn.Sequential(*layers)
def forward(self, x):
""" bijective or injective block forward """
if self.pad != 0 and self.stride == 1:
x = merge(x[0], x[1])
x = self.inj_pad.forward(x)
x1, x2 = split(x)
x = (x1, x2)
x1 = x[0]
x2 = x[1]
Fx2 = self.bottleneck_block(x2)
if self.stride == 2:
x1 = self.psi.forward(x1)
x2 = self.psi.forward(x2)
y1 = Fx2 + x1
return (x2, y1)
def inverse(self, x):
""" bijective or injecitve block inverse """
x2, y1 = x[0], x[1]
if self.stride == 2:
x2 = self.psi.inverse(x2)
Fx2 = - self.bottleneck_block(x2)
x1 = Fx2 + y1
if self.stride == 2:
x1 = self.psi.inverse(x1)
if self.pad != 0 and self.stride == 1:
x = merge(x1, x2)
x = self.inj_pad.inverse(x)
x1, x2 = split(x)
x = (x1, x2)
else:
x = (x1, x2)
return x
class iRevNet(nn.Module):
def __init__(self, nBlocks, nStrides, nClasses, nChannels=None, init_ds=2,
dropout_rate=0., affineBN=True, in_shape=None, mult=4):
super(iRevNet, self).__init__()
self.ds = in_shape[2]//2**(nStrides.count(2)+init_ds//2)
self.init_ds = init_ds
self.in_ch = in_shape[0] * 2**self.init_ds
self.nBlocks = nBlocks
self.first = True
print('')
print(' == Building iRevNet %d == ' % (sum(nBlocks) * 3 + 1))
if not nChannels:
nChannels = [self.in_ch//2, self.in_ch//2 * 4,
self.in_ch//2 * 4**2, self.in_ch//2 * 4**3]
self.init_psi = psi(self.init_ds)
self.stack = self.irevnet_stack(irevnet_block, nChannels, nBlocks,
nStrides, dropout_rate=dropout_rate,
affineBN=affineBN, in_ch=self.in_ch,
mult=mult)
self.bn1 = nn.BatchNorm2d(nChannels[-1]*2, momentum=0.9)
self.linear = nn.Linear(nChannels[-1]*2, nClasses)
def irevnet_stack(self, _block, nChannels, nBlocks, nStrides, dropout_rate,
affineBN, in_ch, mult):
""" Create stack of irevnet blocks """
block_list = nn.ModuleList()
strides = []
channels = []
for channel, depth, stride in zip(nChannels, nBlocks, nStrides):
strides = strides + ([stride] + [1]*(depth-1))
channels = channels + ([channel]*depth)
for channel, stride in zip(channels, strides):
block_list.append(_block(in_ch, channel, stride,
first=self.first,
dropout_rate=dropout_rate,
affineBN=affineBN, mult=mult))
in_ch = 2 * channel
self.first = False
return block_list
def forward(self, x):
""" irevnet forward """
n = self.in_ch//2
if self.init_ds != 0:
x = self.init_psi.forward(x)
out = (x[:, :n, :, :], x[:, n:, :, :])
for block in self.stack:
out = block.forward(out)
out_bij = merge(out[0], out[1])
out = F.relu(self.bn1(out_bij))
out = F.avg_pool2d(out, self.ds)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out, out_bij
def inverse(self, out_bij):
""" irevnet inverse """
out = split(out_bij)
for i in range(len(self.stack)):
out = self.stack[-1-i].inverse(out)
out = merge(out[0],out[1])
if self.init_ds != 0:
x = self.init_psi.inverse(out)
else:
x = out
return x
if __name__ == '__main__':
model = iRevNet(nBlocks=[6, 16, 72, 6], nStrides=[2, 2, 2, 2],
nChannels=[24, 96, 384, 1536], nClasses=1000, init_ds=2,
dropout_rate=0., affineBN=True, in_shape=[3, 224, 224],
mult=4)
y = model(Variable(torch.randn(1, 3, 224, 224)))
print(y.size())
``` |
{
"source": "jhja/RFNN",
"score": 2
} |
#### File: RFNN/theano-rfnn/mnist_loader.py
```python
import numpy as np
import os
from random import shuffle
datasets_dir = './../data/'
def one_hot(x,n):
if type(x) == list:
x = np.array(x)
x = x.flatten()
o_h = np.zeros((len(x),n))
o_h[np.arange(len(x)),x] = 1
return o_h
def mnist(ntrain=60000,ntest=10000,onehot=True):
ntrain=np.array(ntrain).astype(int).squeeze()
data_dir = os.path.join(datasets_dir,'mnist/')
fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trY = loaded[8:].reshape((60000))
fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teY = loaded[8:].reshape((10000))
trY_shuffle = []
trX_shuffle = []
index_shuf = range(len(trY))
shuffle(index_shuf)
for i in index_shuf:
trY_shuffle.append(trY[i])
trX_shuffle.append(trX[i])
trX = np.asarray(trX_shuffle)
trY = np.asarray(trY_shuffle)
trX = trX/255.
teX = teX/255.
trX = trX[:ntrain]
trY = trY[:ntrain]
teX = teX[:ntest]
teY = teY[:ntest]
if onehot:
trY = one_hot(trY, 10)
teY = one_hot(teY, 10)
else:
trY = np.asarray(trY)
teY = np.asarray(teY)
return trX,teX,trY,teY
``` |
{
"source": "jh-jeong/selective-convolution",
"score": 2
} |
#### File: jh-jeong/selective-convolution/datasets.py
```python
import os
import torchvision
import torchvision.transforms as transforms
DATA_DIR = './data'
IMAGENET_PATH = ''
TIMAGENET_PATH = ''
def get_dataset(dataset):
if dataset == 'cifar10' or dataset == 'cifar100':
if dataset == 'cifar10':
normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],
std=[0.2471, 0.2435, 0.2616])
data = torchvision.datasets.CIFAR10
else:
normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408],
std=[0.2675, 0.2565, 0.2761])
data = torchvision.datasets.CIFAR100
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
test_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
train_set = data(DATA_DIR, train=True, transform=train_transform, download=True)
test_set = data(DATA_DIR, train=False, transform=test_transform, download=True)
return train_set, test_set
elif dataset == 'fmnist':
normalize = transforms.Normalize((0.1307,), (0.3081,))
data = torchvision.datasets.FashionMNIST
train_transform = transforms.Compose([
transforms.RandomCrop(28, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_set = data(DATA_DIR, train=True, transform=train_transform, download=True)
test_set = data(DATA_DIR, train=False, transform=test_transform, download=True)
return train_set, test_set
elif dataset == 'tinyimg':
train_dir = os.path.join(TIMAGENET_PATH, 'train')
val_dir = os.path.join(TIMAGENET_PATH, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([
transforms.RandomCrop(64, padding=8),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
val_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_set = torchvision.datasets.ImageFolder(train_dir, train_transform)
val_set = torchvision.datasets.ImageFolder(val_dir, val_transform)
return train_set, val_set
elif dataset == 'imagenet':
train_dir = os.path.join(IMAGENET_PATH, 'train')
val_dir = os.path.join(IMAGENET_PATH, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
val_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
train_set = torchvision.datasets.ImageFolder(train_dir, train_transform)
val_set = torchvision.datasets.ImageFolder(val_dir, val_transform)
return train_set, val_set
```
#### File: jh-jeong/selective-convolution/main.py
```python
from __future__ import division
import sys
import json
import os
import time
import math
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import models
from datasets import get_dataset
from utils import Logger
from utils import AverageMeter
from utils import save_checkpoint
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True
def update_learning_rate(optimizer, epoch, args, cur_batch, num_batches):
lr_init = args.get('lr_init', 0.1)
num_epochs = args['num_epochs']
T_total = num_epochs * num_batches
T_cur = (epoch % num_epochs) * num_batches + cur_batch
lr = 0.5 * lr_init * (1 + math.cos(math.pi * T_cur / T_total))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def error_k(output, target, ks=(1,)):
"""Computes the precision@k for the specified values of k"""
max_k = max(ks)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
results = []
for k in ks:
correct_k = correct[:k].view(-1).float().sum(0)
results.append(100.0 - correct_k.mul_(100.0 / batch_size))
return results
def train(epoch, model, criterion, optimizer, dataloader, logger, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
error_top1 = AverageMeter()
error_top5 = AverageMeter()
# Switch to train mode
model.train()
num_batches = len(dataloader)
check = time.time()
for n, (images, labels) in enumerate(dataloader):
images, labels = images.to(device), labels.to(device)
data_time.update(time.time() - check)
lr = update_learning_rate(optimizer, epoch, args, n, num_batches)
check = time.time()
outputs = model(images)
loss = criterion(outputs, labels)
# Compute gradient and do SGD step
model.zero_grad()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Measure elapsed time
batch_time.update(time.time() - check)
# Measure accuracy and record loss
top1, top5 = error_k(outputs.data, labels, ks=(1, 5))
batch_size = images.size(0)
losses.update(loss.item(), batch_size)
error_top1.update(top1.item(), batch_size)
error_top5.update(top5.item(), batch_size)
if n % 10 == 0:
logger.log('[Epoch %3d; %3d] [Time %.3f] [Data %.3f] [Loss %f] [LR %.3f]' %
(epoch, n, batch_time.value, data_time.value, losses.value, lr))
check = time.time()
logger.log('[DONE] [Time %.3f] [Data %.3f] [Loss %f] [Train@1 %.3f] [Train@5 %.3f]' %
(batch_time.average, data_time.average, losses.average,
error_top1.average, error_top5.average))
logger.scalar_summary('loss', losses.average, epoch)
logger.scalar_summary('train_1', error_top1.average, epoch)
logger.scalar_summary('batch_time', batch_time.average, epoch)
def test(epoch, model, criterion, dataloader, logger=None):
batch_time = AverageMeter()
losses = AverageMeter()
error_top1 = AverageMeter()
error_top5 = AverageMeter()
# Switch to eval mode
model.eval()
with torch.no_grad():
for n, (images, labels) in enumerate(dataloader):
images, labels = images.to(device), labels.to(device)
check = time.time()
outputs = model(images)
loss = criterion(outputs, labels)
# Measure elapsed time
batch_time.update(time.time() - check)
# Measure accuracy and record loss
top1, top5 = error_k(outputs.data, labels, ks=(1, 5))
batch_size = images.size(0)
losses.update(loss.item(), batch_size)
error_top1.update(top1.item(), batch_size)
error_top5.update(top5.item(), batch_size)
if n % 10 == 0:
if logger:
logger.log('[Test %3d] [Time %.3f] [Loss %f] [Test@1 %.3f] [Test@5 %.3f]' %
(n, batch_time.value, losses.value, error_top1.value, error_top5.value))
else:
print('[Test %3d] [Time %.3f] [Loss %f] [Test@1 %.3f] [Test@5 %.3f]' %
(n, batch_time.value, losses.value, error_top1.value, error_top5.value))
if logger:
logger.log(' * [Error@1 %.3f] [Error@5 %.3f] [Loss %.3f]' %
(error_top1.average, error_top5.average, losses.average))
logger.scalar_summary('error_1', error_top1.average, epoch)
logger.scalar_summary('error_5', error_top5.average, epoch)
logger.scalar_summary('loss_test', losses.average, epoch)
return error_top1.average
def main(args, fn):
logger = Logger(fn)
hparams = args['model_hparams']
if args['dataset'] in ['cifar10', 'fmnist']:
hparams['n_classes'] = 10
elif args['dataset'] == 'cifar100':
hparams['n_classes'] = 100
elif args['dataset'] == 'tinyimg':
hparams['n_classes'] = 200
elif args['dataset'] == 'imagenet':
hparams['n_classes'] = 1000
logger.log(args)
hparams['dataset'] = args['dataset']
model = models.__dict__[args['model']](hparams)
logger.log(model)
if torch.cuda.is_available():
n_gpus = torch.cuda.device_count()
if n_gpus > 1:
logger.log('Multi-GPU mode: using %d GPUs for training.' % n_gpus)
model = nn.DataParallel(model).cuda()
else:
logger.log('Single-GPU mode.')
model = model.cuda()
else:
n_gpus = 0
# Configure parameters to optimize
pg_normal = []
pg_small = []
for p in model.parameters():
if not p.requires_grad:
continue
elif hasattr(p, 'wd_small') and p.wd_small:
pg_small.append(p)
else:
pg_normal.append(p)
params = [
{'params': pg_normal, 'weight_decay': 1e-4},
{'params': pg_small, 'weight_decay': 1e-5}
]
# Define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(params,
lr=args.get('lr_init', 0.1),
momentum=args.get('momentum', 0.9),
nesterov=True)
train_set, test_set = get_dataset(args['dataset'])
n_workers = max(8*n_gpus, 4)
train_loader = torch.utils.data.DataLoader(train_set,
shuffle=True,
pin_memory=True,
batch_size=args['batch_size'],
num_workers=n_workers)
test_loader = torch.utils.data.DataLoader(test_set,
shuffle=False,
pin_memory=True,
batch_size=args['batch_size'],
num_workers=n_workers)
best = 100.0
for epoch in range(args['num_epochs']):
train(epoch, model, criterion, optimizer, train_loader, logger, args)
error = test(epoch, model, criterion, test_loader, logger)
# Perform dealloc/realloc for SelectiveConv2d modules
for m in model.modules():
if type(m).__name__ in ['SelectiveConv2d']:
if epoch < 0.5 * args['num_epochs']:
m.dealloc()
m.realloc()
if isinstance(model, nn.DataParallel):
save_states = model.module.state_dict()
else:
save_states = model.state_dict()
is_best = (best > error)
if is_best:
best = error
save_checkpoint(epoch, args, best,
save_states, optimizer.state_dict(),
logger.logdir, is_best)
logger.scalar_summary('best', best, epoch)
logger.log('[Epoch %3d] [Test %5.2f] [Best %5.2f]' % (epoch, error, best))
if __name__ == '__main__':
config_path = sys.argv[1]
with open(config_path) as file:
config = file.read()
print(config)
args = json.loads(config)
config_fn = os.path.split(config_path)[-1].split('.')[0]
main(args, config_fn)
```
#### File: selective-convolution/models/condensenet_sconv.py
```python
from collections import OrderedDict
import math
import torch
import torch.nn as nn
from selective_convolution import SelectiveConv2d
from models import BRC
class _CondenseSConvLayer(nn.Module):
def __init__(self, n_channels, growth_rate, args):
super(_CondenseSConvLayer, self).__init__()
gamma = args['gamma']
dropout_rate = args['dropout_rate']
self.brc_1 = SelectiveConv2d(n_channels, 4*growth_rate, dropout_rate=dropout_rate,
gamma=gamma, K=0, N_max=None)
self.brc_2 = BRC(4*growth_rate, growth_rate,
kernel_size=3, padding=1, groups=4)
def forward(self, x):
x_ = self.brc_1(x)
x_ = self.brc_2(x_)
return torch.cat([x, x_], 1)
class _CondenseSConvBlock(nn.Sequential):
def __init__(self, args, n_layers, n_channels, growth_rate):
super(_CondenseSConvBlock, self).__init__()
for i in range(n_layers):
layer = _CondenseSConvLayer(n_channels + i*growth_rate, growth_rate, args)
self.add_module('layer%d' % (i + 1), layer)
class _Transition(nn.Module):
def __init__(self):
super(_Transition, self).__init__()
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = self.pool(x)
return x
class CondenseNetSConv(nn.Module):
def __init__(self, args, block_config, growth_rates):
# Network-level hyperparameters
self.block_config = block_config
self.growth_rates = growth_rates
self.dataset = args['dataset']
self.n_classes = args['n_classes']
# Layer-level hyperparameters
self.args = args
super(CondenseNetSConv, self).__init__()
i_channels = 2 * self.growth_rates[0]
self.conv0 = nn.Conv2d(3, i_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.features = nn.Sequential()
n_channels = i_channels
for i, n_layers in enumerate(self.block_config):
growth_rate = self.growth_rates[i]
block = _CondenseSConvBlock(args=args, n_layers=n_layers,
n_channels=n_channels, growth_rate=growth_rate)
self.features.add_module('block%d' % (i + 1), block)
n_channels = n_channels + n_layers * growth_rate
if i != len(self.block_config) - 1:
trans = _Transition()
self.features.add_module('trans%d' % (i + 1), trans)
self.features.add_module('norm_last', nn.BatchNorm2d(n_channels))
self.features.add_module('relu_last', nn.ReLU(inplace=True))
self.features.add_module('pool_last', nn.AvgPool2d(8))
self.classifier = nn.Linear(n_channels, self.n_classes)
self.reset()
def reset(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
y = self.conv0(x)
features = self.features(y)
out = features.view(features.size(0), -1)
out = self.classifier(out)
return out
def condensenet_sconv182(hparams):
return CondenseNetSConv(hparams, block_config=[30, 30, 30], growth_rates=[12, 24, 48])
``` |
{
"source": "jh-jeong/smoothing-consistency",
"score": 3
} |
#### File: code/archs/densenet.py
```python
from collections import OrderedDict
import math
import torch
import torch.nn as nn
class BRC(nn.Sequential):
"""Abbreviation of BN-ReLU-Conv"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dropout_rate=0.0, groups=1):
super(BRC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.add_module('norm', nn.BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(inplace=True))
if dropout_rate > 0:
self.add_module('drop', nn.Dropout(dropout_rate))
self.add_module('conv', nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding, bias=False,
groups=groups))
class _DenseLayer(nn.Module):
def __init__(self, n_channels, growth_rate):
super(_DenseLayer, self).__init__()
self.brc_1 = BRC(n_channels, 4*growth_rate, kernel_size=1)
self.brc_2 = BRC(4*growth_rate, growth_rate, kernel_size=3, padding=1)
def forward(self, x):
x_ = self.brc_1(x)
x_ = self.brc_2(x_)
return torch.cat([x, x_], 1)
class _DenseBlock(nn.Sequential):
def __init__(self, n_layers, n_channels, growth_rate):
super(_DenseBlock, self).__init__()
for i in range(n_layers):
layer = _DenseLayer(n_channels + i*growth_rate, growth_rate)
self.add_module('layer%d' % (i + 1), layer)
class _Transition(nn.Module):
def __init__(self, in_channels, out_channels):
super(_Transition, self).__init__()
if in_channels != out_channels:
self.brc = BRC(in_channels, out_channels, kernel_size=1)
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x):
if hasattr(self, 'brc'):
x = self.brc(x)
x = self.pool(x)
return x
class DenseNet(nn.Module):
def __init__(self, block_config, num_classes=10, growth_rate=12, compression=1.0):
# Network-level hyperparameters
self.block_config = block_config
self.n_classes = num_classes
self.growth_rate = growth_rate
self.compression = compression
assert 0 < self.compression <= 1, '0 < compression <= 1'
super(DenseNet, self).__init__()
i_channels = 2 * self.growth_rate
i_features = [
('conv0', nn.Conv2d(3, i_channels, kernel_size=3, stride=1, padding=1, bias=False)),
]
last_pool = 8
self.features = nn.Sequential(OrderedDict(i_features))
n_channels = i_channels
for i, n_layers in enumerate(self.block_config):
block = _DenseBlock(n_layers=n_layers, n_channels=n_channels, growth_rate=self.growth_rate)
self.features.add_module('block%d' % (i + 1), block)
n_channels = n_channels + n_layers * self.growth_rate
if i != len(self.block_config) - 1:
trans = _Transition(in_channels=n_channels, out_channels=int(n_channels * self.compression))
self.features.add_module('trans%d' % (i + 1), trans)
n_channels = int(n_channels * self.compression)
self.features.add_module('norm_last', nn.BatchNorm2d(n_channels))
self.features.add_module('relu_last', nn.ReLU(inplace=True))
self.features.add_module('pool_last', nn.AvgPool2d(last_pool))
self.classifier = nn.Linear(n_channels, self.n_classes)
self.reset()
def reset(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
features = self.features(x)
out = features.view(features.size(0), -1)
out = self.classifier(out)
return out
def densenet40(**kwargs):
return DenseNet(block_config=[6, 6, 6], growth_rate=12, compression=1.0, **kwargs)
```
#### File: code/third_party/smoothadv.py
```python
from abc import ABCMeta, abstractmethod
import numpy as np
import torch
from torch.autograd import Variable
from torch.nn import CrossEntropyLoss
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from typing import Optional
class Attacker(metaclass=ABCMeta):
@abstractmethod
def attack(self, model, inputs, labels):
raise NotImplementedError
# Modification of the code from https://github.com/jeromerony/fast_adversarial
class PGD_L2(Attacker):
"""
PGD attack
Parameters
----------
steps : int
Number of steps for the optimization.
max_norm : float or None, optional
If specified, the norms of the perturbations will not be greater than this value which might lower success rate.
device : torch.device, optional
Device on which to perform the attack.
"""
def __init__(self,
steps: int,
random_start: bool = True,
max_norm: Optional[float] = None,
device: torch.device = torch.device('cpu')) -> None:
super(PGD_L2, self).__init__()
self.steps = steps
self.random_start = random_start
self.max_norm = max_norm
self.device = device
def attack(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, num_noise_vectors=1, targeted: bool = False, no_grad=False) -> torch.Tensor:
if num_noise_vectors == 1:
return self._attack(model, inputs, labels, noise, targeted)
else:
if no_grad:
with torch.no_grad():
return self._attack_mutlinoise_no_grad(model, inputs, labels, noise, num_noise_vectors, targeted)
else:
return self._attack_mutlinoise(model, inputs, labels, noise, num_noise_vectors, targeted)
def _attack(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, targeted: bool = False) -> torch.Tensor:
"""
Performs the attack of the model for the inputs and labels.
Parameters
----------
model : nn.Module
Model to attack.
inputs : torch.Tensor
Batch of samples to attack. Values should be in the [0, 1] range.
labels : torch.Tensor
Labels of the samples to attack if untargeted, else labels of targets.
targeted : bool, optional
Whether to perform a targeted attack or not.
Returns
-------
torch.Tensor
Batch of samples modified to be adversarial to the model.
"""
if inputs.min() < 0 or inputs.max() > 1: raise ValueError('Input values should be in the [0, 1] range.')
batch_size = inputs.shape[0]
multiplier = 1 if targeted else -1
delta = torch.zeros_like(inputs, requires_grad=True)
# Setup optimizers
optimizer = optim.SGD([delta], lr=self.max_norm / self.steps * 2)
for i in range(self.steps):
adv = inputs + delta
if noise is not None:
adv = adv + noise
logits = model(adv)
pred_labels = logits.argmax(1)
ce_loss = F.cross_entropy(logits, labels, reduction='sum')
loss = multiplier * ce_loss
optimizer.zero_grad()
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer.step()
delta.data.add_(inputs)
delta.data.clamp_(0, 1).sub_(inputs)
delta.data.renorm_(p=2, dim=0, maxnorm=self.max_norm)
return inputs + delta
def _attack_mutlinoise(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, num_noise_vectors: int = 1,
targeted: bool = False) -> torch.Tensor:
"""
Performs the attack of the model for the inputs and labels.
Parameters
----------
model : nn.Module
Model to attack.
inputs : torch.Tensor
Batch of samples to attack. Values should be in the [0, 1] range.
labels : torch.Tensor
Labels of the samples to attack if untargeted, else labels of targets.
targeted : bool, optional
Whether to perform a targeted attack or not.
Returns
-------
torch.Tensor
Batch of samples modified to be adversarial to the model.
"""
if inputs.min() < 0 or inputs.max() > 1: raise ValueError('Input values should be in the [0, 1] range.')
batch_size = labels.shape[0]
multiplier = 1 if targeted else -1
delta = torch.zeros((len(labels), *inputs.shape[1:]), requires_grad=True, device=self.device)
# Setup optimizers
optimizer = optim.SGD([delta], lr=self.max_norm / self.steps * 2)
for i in range(self.steps):
adv = inputs + delta.repeat(1, num_noise_vectors, 1, 1).view_as(inputs)
if noise is not None:
adv = adv + noise
logits = model(adv)
pred_labels = logits.argmax(1).reshape(-1, num_noise_vectors).mode(1)[0]
# safe softamx
softmax = F.softmax(logits, dim=1)
# average the probabilities across noise
average_softmax = softmax.reshape(-1, num_noise_vectors, logits.shape[-1]).mean(1, keepdim=True).squeeze(1)
logsoftmax = torch.log(average_softmax.clamp(min=1e-20))
ce_loss = F.nll_loss(logsoftmax, labels)
loss = multiplier * ce_loss
optimizer.zero_grad()
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer.step()
delta.data.add_(inputs[::num_noise_vectors])
delta.data.clamp_(0, 1).sub_(inputs[::num_noise_vectors])
delta.data.renorm_(p=2, dim=0, maxnorm=self.max_norm)
return inputs + delta.repeat(1, num_noise_vectors, 1, 1).view_as(inputs)
def _attack_mutlinoise_no_grad(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, num_noise_vectors: int = 1,
targeted: bool = False) -> torch.Tensor:
"""
Performs the attack of the model for the inputs and labels.
Parameters
----------
model : nn.Module
Model to attack.
inputs : torch.Tensor
Batch of samples to attack. Values should be in the [0, 1] range.
labels : torch.Tensor
Labels of the samples to attack if untargeted, else labels of targets.
targeted : bool, optional
Whether to perform a targeted attack or not.
Returns
-------
torch.Tensor
Batch of samples modified to be adversarial to the model.
"""
if inputs.min() < 0 or inputs.max() > 1: raise ValueError('Input values should be in the [0, 1] range.')
batch_size = labels.shape[0]
multiplier = 1 if targeted else -1
delta = torch.zeros((len(labels), *inputs.shape[1:]), requires_grad=True, device=self.device)
# Setup optimizers
optimizer = optim.SGD([delta], lr=self.max_norm / self.steps * 2)
for i in range(self.steps):
adv = inputs + delta.repeat(1, num_noise_vectors, 1, 1).view_as(inputs)
if noise is not None:
adv = adv + noise
logits = model(adv)
pred_labels = logits.argmax(1).reshape(-1, num_noise_vectors).mode(1)[0]
# safe softamx
softmax = F.softmax(logits, dim=1)
grad = F.nll_loss(softmax,
labels.unsqueeze(1).repeat(1, 1, num_noise_vectors).view(batch_size * num_noise_vectors),
reduction='none').repeat(*noise.shape[1:], 1).permute(3, 0, 1, 2) * noise
grad = grad.reshape(-1, num_noise_vectors, *inputs.shape[1:]).mean(1)
# average the probabilities across noise
grad_norms = grad.view(batch_size, -1).norm(p=2, dim=1)
grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
grad[grad_norms == 0] = torch.randn_like(grad[grad_norms == 0])
# optimizer.step()
delta = delta + grad * self.max_norm / self.steps * 2
delta.data.add_(inputs[::num_noise_vectors])
delta.data.clamp_(0, 1).sub_(inputs[::num_noise_vectors])
delta.data.renorm_(p=2, dim=0, maxnorm=self.max_norm)
return inputs + delta.repeat(1, num_noise_vectors, 1, 1).view_as(inputs)
# Source code from https://github.com/jeromerony/fast_adversarial
class DDN(Attacker):
"""
DDN attack: decoupling the direction and norm of the perturbation to achieve a small L2 norm in few steps.
Parameters
----------
steps : int
Number of steps for the optimization.
gamma : float, optional
Factor by which the norm will be modified. new_norm = norm * (1 + or - gamma).
init_norm : float, optional
Initial value for the norm.
quantize : bool, optional
If True, the returned adversarials will have quantized values to the specified number of levels.
levels : int, optional
Number of levels to use for quantization (e.g. 256 for 8 bit images).
max_norm : float or None, optional
If specified, the norms of the perturbations will not be greater than this value which might lower success rate.
device : torch.device, optional
Device on which to perform the attack.
callback : object, optional
Visdom callback to display various metrics.
"""
def __init__(self,
steps: int,
gamma: float = 0.05,
init_norm: float = 1.,
quantize: bool = True,
levels: int = 256,
max_norm: Optional[float] = None,
device: torch.device = torch.device('cpu'),
callback: Optional = None) -> None:
super(DDN, self).__init__()
self.steps = steps
self.gamma = gamma
self.init_norm = init_norm
self.quantize = quantize
self.levels = levels
self.max_norm = max_norm
self.device = device
self.callback = callback
def attack(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, num_noise_vectors=1, targeted: bool = False, no_grad=False) -> torch.Tensor:
if num_noise_vectors == 1:
return self._attack(model, inputs, labels, noise, targeted)
# return self._attack_mutlinoise(model, inputs, labels, noise, num_noise_vectors, targeted)
else:
if no_grad:
raise NotImplementedError
else:
return self._attack_mutlinoise(model, inputs, labels, noise, num_noise_vectors, targeted)
def _attack(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, targeted: bool = False) -> torch.Tensor:
"""
Performs the attack of the model for the inputs and labels.
Parameters
----------
model : nn.Module
Model to attack.
inputs : torch.Tensor
Batch of samples to attack. Values should be in the [0, 1] range.
labels : torch.Tensor
Labels of the samples to attack if untargeted, else labels of targets.
targeted : bool, optional
Whether to perform a targeted attack or not.
Returns
-------
torch.Tensor
Batch of samples modified to be adversarial to the model.
"""
if inputs.min() < 0 or inputs.max() > 1: raise ValueError('Input values should be in the [0, 1] range.')
batch_size = inputs.shape[0]
multiplier = 1 if targeted else -1
delta = torch.zeros_like(inputs, requires_grad=True)
norm = torch.full((batch_size,), self.init_norm, device=self.device, dtype=torch.float)
worst_norm = torch.max(inputs, 1 - inputs).view(batch_size, -1).norm(p=2, dim=1)
# Setup optimizers
optimizer = optim.SGD([delta], lr=1)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.steps, eta_min=0.01)
best_l2 = worst_norm.clone()
best_delta = torch.zeros_like(inputs)
adv_found = torch.zeros(inputs.size(0), dtype=torch.uint8, device=self.device)
for i in range(self.steps):
scheduler.step()
l2 = delta.data.view(batch_size, -1).norm(p=2, dim=1)
adv = inputs + delta
if noise is not None:
adv = adv + noise
logits = model(adv)
pred_labels = logits.argmax(1)
ce_loss = F.cross_entropy(logits, labels, reduction='sum')
loss = multiplier * ce_loss
is_adv = (pred_labels == labels) if targeted else (pred_labels != labels)
is_smaller = l2 < best_l2
is_both = is_adv * is_smaller
adv_found[is_both] = 1
best_l2[is_both] = l2[is_both]
best_delta[is_both] = delta.data[is_both]
optimizer.zero_grad()
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
if self.callback:
cosine = F.cosine_similarity(-delta.grad.view(batch_size, -1),
delta.data.view(batch_size, -1), dim=1).mean().item()
self.callback.scalar('ce', i, ce_loss.item() / batch_size)
self.callback.scalars(
['max_norm', 'l2', 'best_l2'], i,
[norm.mean().item(), l2.mean().item(),
best_l2[adv_found].mean().item() if adv_found.any() else norm.mean().item()]
)
self.callback.scalars(['cosine', 'lr', 'success'], i,
[cosine, optimizer.param_groups[0]['lr'], adv_found.float().mean().item()])
optimizer.step()
norm.mul_(1 - (2 * is_adv.float() - 1) * self.gamma)
norm = torch.min(norm, worst_norm)
delta.data.mul_((norm / delta.data.view(batch_size, -1).norm(2, 1)).view(-1, 1, 1, 1))
delta.data.add_(inputs)
if self.quantize:
delta.data.mul_(self.levels - 1).round_().div_(self.levels - 1)
delta.data.clamp_(0, 1).sub_(inputs)
if self.max_norm is not None:
best_delta.renorm_(p=2, dim=0, maxnorm=self.max_norm)
if self.quantize:
best_delta.mul_(self.levels - 1).round_().div_(self.levels - 1)
return inputs + best_delta
def _attack_mutlinoise(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,
noise: torch.Tensor = None, num_noise_vectors: int = 1,
targeted: bool = False) -> torch.Tensor:
"""
Performs the attack of the model for the inputs and labels.
Parameters
----------
model : nn.Module
Model to attack.
inputs : torch.Tensor
Batch of samples to attack. Values should be in the [0, 1] range.
labels : torch.Tensor
Labels of the samples to attack if untargeted, else labels of targets.
targeted : bool, optional
Whether to perform a targeted attack or not.
Returns
-------
torch.Tensor
Batch of samples modified to be adversarial to the model.
"""
if inputs.min() < 0 or inputs.max() > 1: raise ValueError('Input values should be in the [0, 1] range.')
batch_size = labels.shape[0]
multiplier = 1 if targeted else -1
delta = torch.zeros((len(labels), *inputs.shape[1:]), requires_grad=True, device=self.device)
norm = torch.full((batch_size,), self.init_norm, device=self.device, dtype=torch.float)
worst_norm = torch.max(inputs[::num_noise_vectors], 1 - inputs[::num_noise_vectors]).view(batch_size, -1).norm(
p=2, dim=1)
# Setup optimizers
optimizer = optim.SGD([delta], lr=1)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.steps, eta_min=0.01)
best_l2 = worst_norm.clone()
best_delta = torch.zeros_like(inputs[::num_noise_vectors])
adv_found = torch.zeros(inputs[::num_noise_vectors].size(0), dtype=torch.uint8, device=self.device)
for i in range(self.steps):
scheduler.step()
l2 = delta.data.view(batch_size, -1).norm(p=2, dim=1)
adv = inputs + delta.repeat(1, num_noise_vectors, 1, 1).view_as(inputs)
if noise is not None:
adv = adv + noise
logits = model(adv)
pred_labels = logits.argmax(1).reshape(-1, num_noise_vectors).mode(1)[0]
# safe softamx
softmax = F.softmax(logits, dim=1)
# average the probabilities across noise
average_softmax = softmax.reshape(-1, num_noise_vectors, logits.shape[-1]).mean(1, keepdim=True).squeeze(1)
logsoftmax = torch.log(average_softmax.clamp(min=1e-20))
ce_loss = F.nll_loss(logsoftmax, labels)
loss = multiplier * ce_loss
is_adv = (pred_labels == labels) if targeted else (pred_labels != labels)
is_smaller = l2 < best_l2
is_both = is_adv * is_smaller
adv_found[is_both] = 1
best_l2[is_both] = l2[is_both]
best_delta[is_both] = delta.data[is_both]
optimizer.zero_grad()
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
if self.callback:
cosine = F.cosine_similarity(-delta.grad.view(batch_size, -1),
delta.data.view(batch_size, -1), dim=1).mean().item()
self.callback.scalar('ce', i, ce_loss.item() / batch_size)
self.callback.scalars(
['max_norm', 'l2', 'best_l2'], i,
[norm.mean().item(), l2.mean().item(),
best_l2[adv_found].mean().item() if adv_found.any() else norm.mean().item()]
)
self.callback.scalars(['cosine', 'lr', 'success'], i,
[cosine, optimizer.param_groups[0]['lr'], adv_found.float().mean().item()])
optimizer.step()
norm.mul_(1 - (2 * is_adv.float() - 1) * self.gamma)
norm = torch.min(norm, worst_norm)
delta.data.mul_((norm / delta.data.view(batch_size, -1).norm(2, 1)).view(-1, 1, 1, 1))
delta.data.add_(inputs[::num_noise_vectors])
if self.quantize:
delta.data.mul_(self.levels - 1).round_().div_(self.levels - 1)
delta.data.clamp_(0, 1).sub_(inputs[::num_noise_vectors])
if self.max_norm is not None:
best_delta.renorm_(p=2, dim=0, maxnorm=self.max_norm)
if self.quantize:
best_delta.mul_(self.levels - 1).round_().div_(self.levels - 1)
return inputs + best_delta.repeat(1, num_noise_vectors, 1, 1).view_as(inputs)
```
#### File: smoothing-consistency/code/train_cohen.py
```python
import argparse
import time
import torch
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from architectures import ARCHITECTURES
from datasets import DATASETS
from train_utils import AverageMeter, accuracy, log, test
from train_utils import prologue
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('dataset', type=str, choices=DATASETS)
parser.add_argument('arch', type=str, choices=ARCHITECTURES)
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--batch', default=256, type=int, metavar='N',
help='batchsize (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
help='initial learning rate', dest='lr')
parser.add_argument('--lr_step_size', type=int, default=30,
help='How often to decrease learning by gamma.')
parser.add_argument('--gamma', type=float, default=0.1,
help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--noise_sd', default=0.0, type=float,
help="standard deviation of Gaussian noise for data augmentation")
parser.add_argument('--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--id', default=None, type=int,
help='experiment id, `randint(10000)` if None')
#####################
# Options added by Salman et al. (2019)
parser.add_argument('--resume', action='store_true',
help='if true, tries to resume training from existing checkpoint')
parser.add_argument('--pretrained-model', type=str, default='',
help='Path to a pretrained model')
args = parser.parse_args()
args.outdir = f"logs/{args.dataset}/cohen/noise_{args.noise_sd}"
def main():
train_loader, test_loader, criterion, model, optimizer, scheduler, \
starting_epoch, logfilename, model_path, device, writer = prologue(args)
for epoch in range(starting_epoch, args.epochs):
before = time.time()
train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch, args.noise_sd, device, writer)
test_loss, test_acc = test(test_loader, model, criterion, epoch, args.noise_sd, device, writer, args.print_freq)
after = time.time()
log(logfilename, "{}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}".format(
epoch, after - before,
scheduler.get_lr()[0], train_loss, train_acc, test_loss, test_acc))
# In PyTorch 1.1.0 and later, you should call `optimizer.step()` before `lr_scheduler.step()`.
# See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
scheduler.step(epoch)
torch.save({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, model_path)
def train(loader: DataLoader, model: torch.nn.Module, criterion, optimizer: Optimizer,
epoch: int, noise_sd: float, device: torch.device, writer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
# switch to train mode
model.train()
for i, (inputs, targets) in enumerate(loader):
# measure data loading time
data_time.update(time.time() - end)
inputs, targets = inputs.to(device), targets.to(device)
batch_size = inputs.size(0)
# augment inputs with noise
inputs = inputs + torch.randn_like(inputs, device=device) * noise_sd
# compute output
outputs = model(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
acc1, acc5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), batch_size)
top1.update(acc1.item(), batch_size)
top5.update(acc5.item(), batch_size)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.avg:.3f}\t'
'Data {data_time.avg:.3f}\t'
'Loss {loss.avg:.4f}\t'
'Acc@1 {top1.avg:.3f}\t'
'Acc@5 {top5.avg:.3f}'.format(
epoch, i, len(loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
if writer:
writer.add_scalar('loss/train', losses.avg, epoch)
writer.add_scalar('batch_time', batch_time.avg, epoch)
writer.add_scalar('accuracy/train@1', top1.avg, epoch)
writer.add_scalar('accuracy/train@5', top5.avg, epoch)
return (losses.avg, top1.avg)
if __name__ == "__main__":
main()
``` |
{
"source": "jh-jeong/smoothmix",
"score": 2
} |
#### File: smoothmix/code/train_consistency.py
```python
import argparse
import time
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from architectures import ARCHITECTURES
from datasets import DATASETS
from third_party.smoothadv import Attacker
from train_utils import AverageMeter, accuracy, log, requires_grad_, test
from train_utils import prologue
from third_party.consistency import consistency_loss
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('dataset', type=str, choices=DATASETS)
parser.add_argument('arch', type=str, choices=ARCHITECTURES)
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=150, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--batch', default=256, type=int, metavar='N',
help='batchsize (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
help='initial learning rate', dest='lr')
parser.add_argument('--lr_step_size', type=int, default=50,
help='How often to decrease learning by gamma.')
parser.add_argument('--gamma', type=float, default=0.1,
help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--noise_sd', default=0.0, type=float,
help="standard deviation of Gaussian noise for data augmentation")
parser.add_argument('--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--id', default=None, type=int,
help='experiment id, `randint(10000)` if None')
#####################
# Options added by Salman et al. (2019)
parser.add_argument('--resume', action='store_true',
help='if true, tries to resume training from existing checkpoint')
parser.add_argument('--pretrained-model', type=str, default='',
help='Path to a pretrained model')
#####################
parser.add_argument('--num-noise-vec', default=1, type=int,
help="number of noise vectors. `m` in the paper.")
parser.add_argument('--lbd', default=10., type=float)
parser.add_argument('--eta', default=0.5, type=float)
# Options when SmoothAdv is used (Salman et al., 2019)
parser.add_argument('--adv-training', action='store_true')
parser.add_argument('--epsilon', default=512, type=float)
parser.add_argument('--num-steps', default=4, type=int)
parser.add_argument('--warmup', default=10, type=int, help="Number of epochs over which "
"the maximum allowed perturbation increases linearly "
"from zero to args.epsilon.")
args = parser.parse_args()
if args.adv_training:
mode = f"salman_{args.epsilon}_{args.num_steps}_{args.warmup}"
else:
mode = f"cohen"
args.outdir = f"logs/{args.dataset}/consistency/{mode}/num_{args.num_noise_vec}/lbd_{args.lbd}/eta_{args.eta}/noise_{args.noise_sd}"
args.epsilon /= 256.0
def main():
train_loader, test_loader, criterion, model, optimizer, scheduler, \
starting_epoch, logfilename, model_path, device, writer = prologue(args)
if args.adv_training:
attacker = SmoothAdv_PGD(steps=args.num_steps, device=device, max_norm=args.epsilon)
else:
attacker = None
for epoch in range(starting_epoch, args.epochs):
if args.adv_training:
attacker.max_norm = np.min([args.epsilon, (epoch + 1) * args.epsilon / args.warmup])
before = time.time()
train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch,
args.noise_sd, attacker, device, writer)
test_loss, test_acc = test(test_loader, model, criterion, epoch, args.noise_sd, device, writer, args.print_freq)
after = time.time()
log(logfilename, "{}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}".format(
epoch, after - before,
scheduler.get_lr()[0], train_loss, train_acc, test_loss, test_acc))
# In PyTorch 1.1.0 and later, you should call `optimizer.step()` before `lr_scheduler.step()`.
# See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
scheduler.step(epoch)
torch.save({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, model_path)
def _chunk_minibatch(batch, num_batches):
X, y = batch
batch_size = len(X) // num_batches
for i in range(num_batches):
yield X[i*batch_size : (i+1)*batch_size], y[i*batch_size : (i+1)*batch_size]
def train(loader: DataLoader, model: torch.nn.Module, criterion, optimizer: Optimizer, epoch: int, noise_sd: float,
attacker: Attacker, device: torch.device, writer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses_reg = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
# switch to train mode
model.train()
requires_grad_(model, True)
for i, batch in enumerate(loader):
# measure data loading time
data_time.update(time.time() - end)
mini_batches = _chunk_minibatch(batch, args.num_noise_vec)
for inputs, targets in mini_batches:
inputs, targets = inputs.to(device), targets.to(device)
batch_size = inputs.size(0)
noises = [torch.randn_like(inputs, device=device) * noise_sd
for _ in range(args.num_noise_vec)]
if args.adv_training:
requires_grad_(model, False)
model.eval()
inputs = attacker.attack(model, inputs, targets, noises=noises)
model.train()
requires_grad_(model, True)
# augment inputs with noise
inputs_c = torch.cat([inputs + noise for noise in noises], dim=0)
targets_c = targets.repeat(args.num_noise_vec)
logits = model(inputs_c)
loss_xent = criterion(logits, targets_c)
logits_chunk = torch.chunk(logits, args.num_noise_vec, dim=0)
loss_con = consistency_loss(logits_chunk, args.lbd, args.eta)
loss = loss_xent + loss_con
acc1, acc5 = accuracy(logits, targets_c, topk=(1, 5))
losses.update(loss_xent.item(), batch_size)
losses_reg.update(loss_con.item(), batch_size)
top1.update(acc1.item(), batch_size)
top5.update(acc5.item(), batch_size)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.avg:.3f}\t'
'Data {data_time.avg:.3f}\t'
'Loss {loss.avg:.4f}\t'
'Acc@1 {top1.avg:.3f}\t'
'Acc@5 {top5.avg:.3f}'.format(
epoch, i, len(loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
writer.add_scalar('loss/train', losses.avg, epoch)
writer.add_scalar('loss/consistency', losses_reg.avg, epoch)
writer.add_scalar('batch_time', batch_time.avg, epoch)
writer.add_scalar('accuracy/train@1', top1.avg, epoch)
writer.add_scalar('accuracy/train@5', top5.avg, epoch)
return (losses.avg, top1.avg)
class SmoothAdv_PGD(Attacker):
"""
SmoothAdv PGD L2 attack
Parameters
----------
steps : int
Number of steps for the optimization.
max_norm : float or None, optional
If specified, the norms of the perturbations will not be greater than this value which might lower success rate.
device : torch.device, optional
Device on which to perform the attack.
"""
def __init__(self,
steps: int,
random_start: bool = True,
max_norm: Optional[float] = None,
device: torch.device = torch.device('cpu')) -> None:
super(SmoothAdv_PGD, self).__init__()
self.steps = steps
self.random_start = random_start
self.max_norm = max_norm
self.device = device
def attack(self, model, inputs, labels, noises=None):
"""
Performs SmoothAdv PGD L2 attack of the model for the inputs and labels.
Parameters
----------
model : nn.Module
Model to attack.
inputs : torch.Tensor
Batch of samples to attack. Values should be in the [0, 1] range.
labels : torch.Tensor
Labels of the samples to attack.
noises : List[torch.Tensor]
Lists of noise samples to attack.
Returns
-------
torch.Tensor
Batch of samples modified to be adversarial to the model.
"""
if inputs.min() < 0 or inputs.max() > 1: raise ValueError('Input values should be in the [0, 1] range.')
def _batch_l2norm(x):
x_flat = x.reshape(x.size(0), -1)
return torch.norm(x_flat, dim=1)
adv = inputs.detach()
alpha = self.max_norm / self.steps * 2
for i in range(self.steps):
adv.requires_grad_()
logits = [model(adv + noise) for noise in noises]
softmax = [F.softmax(logit, dim=1) for logit in logits]
avg_softmax = sum(softmax) / len(noises)
logsoftmax = torch.log(avg_softmax.clamp(min=1e-20))
loss = F.nll_loss(logsoftmax, labels)
grad = torch.autograd.grad(loss, [adv])[0]
grad_norm = _batch_l2norm(grad).view(-1, 1, 1, 1)
grad = grad / (grad_norm + 1e-8)
adv = adv + alpha * grad
eta_x_adv = adv - inputs
eta_x_adv = eta_x_adv.renorm(p=2, dim=0, maxnorm=self.max_norm)
adv = inputs + eta_x_adv
adv = torch.clamp(adv, 0, 1)
adv = adv.detach()
return adv
if __name__ == "__main__":
main()
``` |
{
"source": "jhjguxin/blogserver",
"score": 2
} |
#### File: blogserver/api/handlers.py
```python
from piston.handler import BaseHandler, AnonymousBaseHandler
from piston.utils import rc, require_mime, require_extended
from blogserver.apps.blog.models import Post
import pdb
class PostHandler(BaseHandler):
allowed_methods = ('GET',)
model = Post
#anonymous = 'AnonymousPostHandler'
fields = ('title', 'content', 'created_on', )
def read(self, request, title=None):
base = Post.objects
if title:
return base.get(title=title)
else:
return base.all()
class AddPostHandler(BaseHandler):
allowed_methods = ('POST', )
def create(self, request):
#pdb.set_trace()
post=Post()
post.title = request.POST.get("title");
post.content = request.POST.get("content");
# post = Post(title, content, author=request.user)
post.save()
return post
"""
attrs = self.flatten_dict(request.POST)
if self.exists(**attrs):
return rc.DUPLICATE_ENTRY
else:
post = Post(title=attrs['title'], content=attrs['content'], author=request.user)
post.save()
return post
"""
"""
class AnonymousPostHandler(PostHandler, AnonymousBaseHandler):
# Anonymous entrypoint for blogposts.
fields = ('id', 'title', 'content', 'created_on')
"""
```
#### File: apps/blog/editer_user.py
```python
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.forms import PasswordResetForm,PasswordChangeForm,AdminPasswordChangeForm
#from django.contrib.auth.models import User
from dynamicresponse.response import *
from forms import *
from models import *
from django.views.decorators.csrf import csrf_exempt
import pdb
"""
def users(request):
users = Users.objects.all()
return render_to_response("users.html", {
'users': users },
RequestContext(request))
def test_js(request):
return render_to_response('test_js.html', {}, RequestContext(request))
"""
"""
@ csrf_exempt
def index_user(request):
"Lists all blog user."
if request.method == 'POST':
user = User.objects.create(title=request.POST.get("title"), reviewer=request.POST.get("reviewer"), email=request.POST.get("email"),content=request.POST.get("content") )
user.save()
form = RegisterForm(request.POST, instance=user)
#users = Users.objects.all()
else:
form = RegisterForm(instance=None)
users = User.objects.all()
#pdb.set_trace()
return SerializeOrRender('blog/index_user.html', { 'users': users }, extra={ 'form': form })
"""
def users_list(request):
"Lists all blog user."
users = User.objects.all()
return SerializeOrRender('blog/users_list.html', { 'users': users })
"""
def delete_user(request, user_id):
"Deletes the blog user."
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
user.delete()
return SerializeOrRedirect(reverse('list_users'), {}, status=CR_DELETED)
else:
return SerializeOrRender('blog/delete_user.html', { 'user': user }, status=CR_CONFIRM)
"""
def register(request, user_id=None):
"""Displays, creates or updates a blog users."""
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
form = RegisterForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('users_list'), { 'user': user })
else:
form = RegisterForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
def u_change(request, user_id=None):
"""Displays, creates or updates a blog users."""
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
form = U_ChangeForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('users_list'), { 'user': user })
else:
form = U_ChangeForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
def passwordchange(request, user_id=None):
password_change_form=PasswordChangeForm
user = None
if user_id:
user = get_object_or_404(User.objects.get(id=user_id), pk=user_id)
if request.method == 'POST':
form = PasswordChangeForm(user, request.POST)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('list_users'), { 'user': user })
else:
form = password_change_form(user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
"""
def passwordchange(request, user_id=None):
"Displays, creates or updates a blog users."
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
olduser=User.objects.get(id=user_id)
if request.method == 'POST':
form = U_PasswordChangeForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('list_users'), { 'user': user })
# else:
# form = U_PasswordChangeForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
"""
"""
def passwordchange(request, is_admin_site=False, template_name='blog/user.html',
email_template_name='registration/password_reset_email.html',
password_reset_form=PasswordResetForm, token_generator=default_token_generator,
post_reset_redirect=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done')
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {}
opts['use_https'] = request.is_secure()
opts['token_generator'] = token_generator
if is_admin_site:
opts['domain_override'] = request.META['HTTP_HOST']
else:
opts['email_template_name'] = email_template_name
if not Site._meta.installed:
opts['domain_override'] = RequestSite(request).domain
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
"""
```
#### File: jhjguxin/blogserver/views.py
```python
from django.shortcuts import render_to_response
import pdb
from datetime import *
from calendar import monthrange
from blogserver.apps.blog.models import Post
from django.template import RequestContext
from django.utils.html import conditional_escape as esc
from django.utils.safestring import mark_safe
from itertools import groupby
from calendar import HTMLCalendar, monthrange
##### Here's code for the view to look up the event objects for to put in
# the context for the template. It goes in your app's views.py file (or
# wherever you put your views).
#####
def named_month(month_number):
"""
Return the name of the month, given the number.
"""
return date(1900, month_number, 1).strftime("%B")
def this_month(request):
"""
Show calendar of events this month.
"""
today = datetime.now()
return calendar(request, today.year, today.month)
def calendar(request, year, month, series_id=None):
"""
Show calendar of events for a given month of a given year.
``series_id``
The event series to show. None shows all event series.
"""
my_year = int(year)
my_month = int(month)
my_calendar_from_month = datetime(my_year, my_month, 1)
my_calendar_to_month = datetime(my_year, my_month, monthrange(my_year, my_month)[1])
my_events = Post.objects.filter(date_published__year=my_year, date_published__month=my_month)
if series_id:
my_events = my_events.filter(series=series_id)
# Calculate values for the calendar controls. 1-indexed (Jan = 1)
my_previous_year = my_year
my_previous_month = my_month - 1
if my_previous_month == 0:
my_previous_year = my_year - 1
my_previous_month = 12
my_next_year = my_year
my_next_month = my_month + 1
if my_next_month == 13:
my_next_year = my_year + 1
my_next_month = 1
my_year_after_this = my_year + 1
my_year_before_this = my_year - 1
#pdb.set_trace()
return render_to_response("blog/calendar.html", { 'events_list': my_events,
'month': my_month,
'month_name': named_month(my_month),
'year': my_year,
'previous_month': my_previous_month,
'previous_month_name': named_month(my_previous_month),
'previous_year': my_previous_year,
'next_month': my_next_month,
'next_month_name': named_month(my_next_month),
'next_year': my_next_year,
'year_before_this': my_year_before_this,
'year_after_this': my_year_after_this,
}, context_instance=RequestContext(request))
def post_calendar(request):
#pdb.set_trace()
# return render_to_response("base.html",)
lToday = datetime.now()
return calendar(request, lToday.year, lToday.month)
def base_page(request):
#pdb.set_trace()
return render_to_response("base.html",)
#lToday = datetime.now()
#return calendar(request, lToday.year, lToday.month)
##### Here's code for the view to look up the event objects for to put in
# the context for the template. It goes in your app's views.py file (or
# wherever you put your views).
#####
``` |
{
"source": "jhjiangcs/PaddleFL",
"score": 2
} |
#### File: core/dygraph/customer.py
```python
import paddle
import json
import time
import os
import numpy as np
import grpc
import yaml
import logging
from core.proto import common_pb2_grpc, common_pb2
from .layer_handler import CustomerLayerHandler
from core import util
_LOGGER = logging.getLogger(__name__)
class CustomerExecutor(object):
def __init__(self, endpoints):
self._connect(endpoints)
self.run_type = None # TRAIN or INFER
def load_layer_handler(self, layer, optimizer, common_vars):
self.run_type = "TRAIN"
self.layer_handler = CustomerLayerHandler(layer, optimizer)
self.common_vars = common_vars
self.token = "<PASSWORD>"
def load_persistables(self, path):
layer_state_dict = paddle.load(
os.path.join(path, "layer.pdparams"))
opt_state_dict = paddle.load(
os.path.join(path, "optimizer.pdopt"))
self.layer_handler.layer.set_state_dict(layer_state_dict)
self.layer_handler.optimizer.set_state_dict(opt_state_dict)
# load token info
with open(os.path.join(path, "model_info")) as f:
model_info = json.load(f)
self.token = model_info["token"]
def _connect(self, endpoints):
options = [('grpc.max_receive_message_length', 512 * 1024 * 1024),
('grpc.max_send_message_length', 512 * 1024 * 1024)]
g_endpoint = 'ipv4:{}'.format(','.join(endpoints))
self.channel = grpc.insecure_channel(g_endpoint, options=options)
self.stub = common_pb2_grpc.FLExecutorStub(self.channel)
def _parse_vars_from_host(self, resp, required_common_vars):
vars_map = util.parse_proto_to_tensor(resp)
# check common in
for name in required_common_vars:
if name not in vars_map:
raise KeyError("Failed to calc: {} not found in query response.".format(name))
return vars_map
def _pack_vars_to_host(self, grad_vars, required_common_vars):
vars_map = {name: grad_vars[name] for name in required_common_vars}
req = util.pack_tensor_to_proto(vars_map)
return req
def _inner_cancel_current_step(self, err_msg):
_LOGGER.error(err_msg, exc_info=True)
self.layer_handler.cancel()
def cancel_host_current_step(self, err_msg):
self.stub.cancel_current_step(
common_pb2.NilRequest(
token=self.token,
state=common_pb2.State(
succ=False,
error_message=err_msg)))
def run(self, usr_key, feed):
if self.run_type == "TRAIN":
return self._run_for_train(usr_key, feed)
elif self.run_type == "INFER":
# TODO
pass
else:
raise ValueError("Failed to execute program: "
"unknown run type({})".format(self.run_type))
def _execute_forward_host_part(self, usr_key):
# query for user feature
user_info = common_pb2.UserInfo(
uid=usr_key, token=self.token)
resp = self.stub.execute_forward_host_part(user_info)
if not resp.state.succ:
raise RuntimeError(resp.state.error_message)
return resp
def _generate_feed_for_customer_part(self, feed, vars_from_host):
for in_name in self.common_vars["in"]:
feed[in_name] = vars_from_host[in_name]
return feed
def _execute_backward_host_part(self, req):
resp = self.stub.execute_backward_host_part(req)
if not resp.state.succ:
raise RuntimeError(resp.state.error_message)
def _run_for_train(self, usr_key, feed):
try:
resp = self._execute_forward_host_part(usr_key)
except Exception as e:
err_msg = "Failed to execute forward host part: {}".format(e)
self._inner_cancel_current_step(err_msg)
return None
try:
vars_from_host = self._parse_vars_from_host(
resp, self.common_vars["in"])
for tensor in vars_from_host.values():
tensor.stop_gradient = False # allow gradient
except Exception as e:
err_msg = "Failed to parse vars from host: {}".format(e)
self._inner_cancel_current_step(err_msg)
self.cancel_host_current_step("[Customer] {}".format(err_msg))
return None
for name in self.common_vars["in"]:
_LOGGER.debug("Get params {}: {}".format(
name, vars_from_host[name]))
try:
feed = self._generate_feed_for_customer_part(feed, vars_from_host)
except Exception as e:
err_msg = "Failed to generate feed data: {}".format(e)
self._inner_cancel_current_step(err_msg)
self.cancel_host_current_step("[Customer] {}".format(err_msg))
return None
try:
# forward and calc grad
self.layer_handler.call_for_forward(feed)
fetch_vars = self.layer_handler.get_fetch_vars()
except Exception as e:
err_msg = "Failed to run middle program: {}".format(e)
self._inner_cancel_current_step(err_msg)
self.cancel_host_current_step("[Customer] {}".format(err_msg))
return None
try:
grad_vars = {
"{}@GRAD".<EMAIL>(name): tensor.grad.numpy()
for name, tensor in vars_from_host.items()}
for name, tensor in vars_from_host.items():
_LOGGER.debug("Send grad {}: {}".format(name, tensor.grad))
req = self._pack_vars_to_host(
grad_vars, self.common_vars["out"])
req.token = self.token
except Exception as e:
err_msg = "Failed to pack vars to host: {}".format(e)
self._inner_cancel_current_step(err_msg)
self.cancel_host_current_step("[Customer] {}".format(err_msg))
return None
try:
# update params
self.layer_handler.call_for_backward()
except Exception as e:
err_msg = "Failed to update params: {}".format(e)
self._inner_cancel_current_step(err_msg)
self.cancel_host_current_step("[Customer] {}".format(err_msg))
return None
try:
self._execute_backward_host_part(req)
except Exception as e:
err_msg = "Failed to execute backward host part: {}".format(e)
self._inner_cancel_current_step(err_msg)
self.cancel_host_current_step("[Customer] {}".format(err_msg))
return None
return fetch_vars
def save_persistables(self, local_path, remote_path):
token = CustomerProgramSaver.save_persistables(
local_path, self.layer_handler)
resp = self.stub.save_persistables(
common_pb2.SaveInfo(
path=remote_path,
token=self.token,
save_token=token))
if not resp.state.succ:
err_msg = "Failed to save vars in host side: {}".format(
resp.state.error_message)
raise RuntimeError(err_msg)
return True
class CustomerProgramSaver(object):
def __init__(self):
pass
@staticmethod
def save_persistables(
dirpath, layer_handler):
layer = layer_handler.layer
optimizer = layer_handler.optimizer
paddle.save(layer.state_dict(), os.path.join(dirpath, "layer.pdparams"))
paddle.save(optimizer.state_dict(), os.path.join(dirpath, "optimizer.pdopt"))
# token
token = str(time.time())
model_info = {
"token": token,
}
with open(os.path.join(dirpath, "model_info"), "w") as f:
f.write(json.dumps(model_info))
return token
```
#### File: core/dygraph/host.py
```python
import paddle
import numpy as np
import os
import json
from concurrent import futures
import contextlib
import socket
import grpc
import logging
from core.proto import common_pb2_grpc, common_pb2
from .layer_handler import HostLayerHandler
from core import util
_LOGGER = logging.getLogger(__name__)
class FLExecutorServicer(common_pb2_grpc.FLExecutorServicer):
def __init__(self, program_loader, lookup_table, reader):
super(FLExecutorServicer, self).__init__()
self.run_type = program_loader.run_type
self.common_vars = program_loader.common_vars
self.token = program_loader.token
self.layer_handler = program_loader.layer_handler
self.table = lookup_table
self.reader = reader
def execute_forward_host_part(self, request, context):
if request.token != self.token:
err_msg = "Failed: token({}) is not valid.".format(request.token)
_LOGGER.error(err_msg)
return self.__generate_err_features("[Host] {}".format(err_msg))
uid = request.uid
try:
value = self.table.lookup(uid)
inputs = self.reader.parse(value)
except Exception as e:
err_msg = "Failed to lookup for input: {}".format(e)
self._inner_cancel_current_step(err_msg)
return self.__generate_err_features("[Host] {}".format(err_msg))
feed_data = {name: tensor for name, tensor in inputs.items()}
fetch_vars = None
try:
if self.run_type == "TRAIN":
# forward only
fetch_vars = self.layer_handler.call_for_forward(feed_data)
elif self.run_type == "INFER":
# TODO
pass
else:
raise ValueError("Failed to execute program: "
"unknown run type({})".format(self.run_type))
except Exception as e:
err_msg = "Failed to run forward program: {}".format(e)
self._inner_cancel_current_step(err_msg)
return self.__generate_err_features("[Host] {}".format(err_msg))
for name in self.common_vars["out"]:
_LOGGER.debug("Send params {}: {}".format(
name, fetch_vars[name]))
try:
resp = self._pack_vars_to_client(
fetch_vars, self.common_vars["out"])
except Exception as e:
err_msg = "Failed to pack vars to client: {}".format(e)
self._inner_cancel_current_step(err_msg)
return self.__generate_err_features("[Host] {}".format(err_msg))
return resp
def execute_backward_host_part(self, request, context):
if request.token != self.token:
err_msg = "Failed: token({}) is not valid.".format(req_token)
_LOGGER.error(err_msg, exc_info=True)
return self.__generate_nil_response("[Host] {}".format(err_msg))
try:
common_map = self._parse_vars_from_client(
request, self.common_vars["in"])
except Exception as e:
err_msg = "Failed to parse vars from client: {}".format(e)
self._inner_cancel_current_step(err_msg)
return self.__generate_nil_response("[Host] {}".format(err_msg))
for name, tensor in common_map.items():
_LOGGER.debug("Get grad {}: {}".format(name, tensor))
try:
# backward and minimize
fetch_vars = self.layer_handler.call_for_backward(common_map)
except Exception as e:
err_msg = "Failed to run backward program: {}".format(e)
self._inner_cancel_current_step(err_msg)
return self.__generate_nil_response("[Host] {}".format(err_msg))
return self.__generate_nil_response()
def save_persistables(self, request, context):
if request.token != self.token:
err_msg = "Failed: token({}) is not valid.".format(req_token)
_LOGGER.error(err_msg, exc_info=True)
return self.__generate_nil_response("[Host] {}".format(err_msg))
try:
HostProgramSaver.save_persistables(
request.path, self.layer_handler, request.save_token)
except Exception as e:
err_msg = "Failed to save vars: {}".format(e)
_LOGGER.error(err_msg, exc_info=True)
return self.__generate_nil_response("[Host] {}".format(err_msg))
return self.__generate_nil_response()
def cancel_current_step(self, request, context):
if request.token != self.token:
err_msg = "Failed: token({}) is not valid.".format(req_token)
_LOGGER.error(err_msg, exc_info=True)
return self.__generate_nil_response("[Host] {}".format(err_msg))
self._inner_cancel_current_step(request.state.error_message)
return self.__generate_nil_response()
def _parse_vars_from_client(self, request, required_common_vars):
vars_map = util.parse_proto_to_tensor(request)
# check common in
for name in required_common_vars:
if name not in vars_map:
raise KeyError(
"Failed to parse vars from client: {} not found in response.".format(name))
return vars_map
def _pack_vars_to_client(self, fetch_vars, required_common_vars):
vars_map = {name: fetch_vars[name] for name in required_common_vars}
req = util.pack_tensor_to_proto(vars_map)
req.token = self.token
return req
def _inner_cancel_current_step(self, err_msg):
_LOGGER.error(err_msg, exc_info=True)
self.layer_handler.cancel()
def __generate_nil_response(self, error_message=None):
if error_message:
return common_pb2.NilResponse(
state=common_pb2.State(
succ=False,
error_message=error_message))
else:
return common_pb2.NilResponse(
state=common_pb2.State(succ=True))
def __generate_err_features(self, error_message):
return common_pb2.Features(
token=self.token,
state=common_pb2.State(
succ=False,
error_message=error_message))
class HostExecutor(object):
def __init__(self, table, reader, max_workers=1):
self.program_loader = HostProgramLoader()
self.table = table
self.reader = reader
self.max_workers = max_workers
def load_layer_handler(self, layer, optimizer, common_vars):
self.program_loader.load_layer_handler(layer, optimizer, common_vars)
def load_persistables(self, path):
self.program_loader.load_persistables(path)
def _is_port_available(self, port):
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('0.0.0.0', port))
return result != 0
def start(self, port):
if not self._is_port_available(port):
raise ValueError("Failed to start: port {} not available".format(port))
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=self.max_workers),
options=[('grpc.max_send_message_length', 256 * 1024 * 1024),
('grpc.max_receive_message_length', 256 * 1024 * 1024)])
common_pb2_grpc.add_FLExecutorServicer_to_server(
FLExecutorServicer(self.program_loader, self.table, self.reader), server)
server.add_insecure_port('[::]:{}'.format(port))
_LOGGER.info("Run service in port: {}".format(port))
server.start()
server.wait_for_termination()
class HostProgramLoader(object):
def __init__(self):
self.run_type = None # TRAIN or INFER
self.common_vars = None
self.layer_handler = None
self.token = None
def load_layer_handler(self, layer, optimizer, common_vars):
self.run_type = "TRAIN"
self.layer_handler = HostLayerHandler(layer, optimizer)
self.common_vars = common_vars
self.token = "<PASSWORD>"
def load_persistables(self, path):
layer_state_dict = paddle.load(
os.path.join(path, "layer.pdparams"))
opt_state_dict = paddle.load(
os.path.join(path, "optimizer.pdopt"))
self.layer_handler.layer.set_state_dict(layer_state_dict)
self.layer_handler.optimizer.set_state_dict(opt_state_dict)
# load token info
with open(os.path.join(path, "model_info")) as f:
model_info = json.load(f)
self.token = model_info["token"]
class HostProgramSaver(object):
def __init__(self):
pass
@staticmethod
def save_persistables(
dirpath, layer_handler, save_token):
layer = layer_handler.layer
optimizer = layer_handler.optimizer
paddle.save(layer.state_dict(), os.path.join(dirpath, "layer.pdparams"))
paddle.save(optimizer.state_dict(), os.path.join(dirpath, "optimizer.pdopt"))
model_info = {
"token": save_token,
}
with open(os.path.join(dirpath, "model_info"), "w") as f:
f.write(json.dumps(model_info))
```
#### File: core/reader/reader_base.py
```python
class ReaderBase(object):
def __init__(self):
pass
def parse(self, db_value):
raise NotImplementedError("Failed to parse db_value")
class TmpReader(ReaderBase):
def __init__(self, place):
super(FakeReader, self).__init__()
self.place = place
def parse(self, db_value):
data_dict = {}
data = {}
data_dict["Host|input"] = np.random.randint(2, size=( 1, 1)).astype('int64')
shapes = [[len(c) for c in data_dict["Host|input"]]]
data["Host|input"] = fluid.create_lod_tensor(data_dict["Host|input"].reshape(-1,1), shapes, self.place)
data_dict["Customer|label"] = [1] #np.array([1]).astype('int64')
data["Customer|label"] = data_dict["Customer|label"]
return data
```
#### File: split_learning/core/util.py
```python
import os
import subprocess
import time
import json
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import Parameter, Program
from .proto import common_pb2_grpc, common_pb2
from . import reformer
def parse_proto_to_lod_tensor(proto, place=paddle.fluid.CPUPlace()):
vars_map = {}
for pb_var in proto.vars:
dtype = pb_var.dtype
name = pb_var.name
data = getattr(pb_var, "data_{}".format(dtype))
shape = pb_var.shape
recursive_seq_lens = []
for sequence_length in pb_var.recursive_sequence_lengths.sequence_lengths:
recursive_seq_lens.append(sequence_length)
np_data = np.array(data).astype(dtype)
np_data = np_data.reshape(shape)
tensor = fluid.create_lod_tensor(np_data, recursive_seq_lens, place)
vars_map[name] = tensor
return vars_map
def parse_proto_to_tensor(proto, place=paddle.fluid.CPUPlace()):
vars_map = {}
for pb_var in proto.tensors:
dtype = pb_var.dtype
name = pb_var.name
data = getattr(pb_var, "data_{}".format(dtype))
shape = pb_var.shape
np_data = np.array(data).astype(dtype)
np_data = np_data.reshape(shape)
tensor = paddle.to_tensor(np_data, dtype, place)
vars_map[name] = tensor
return vars_map
def pack_lod_tensor_to_proto(vars_map):
proto = common_pb2.Features()
for name, tensor in vars_map.items():
np_data = np.array(tensor)
pb_var = common_pb2.Variable()
pb_var.name = name
pb_var.shape.extend(list(np_data.shape))
np_data = np_data.reshape(-1)
pb_var.dtype = np_data.dtype.name
data_handler = getattr(pb_var, "data_{}".format(np_data.dtype))
data_handler.extend(np_data.tolist())
recursive_seq_lens = tensor.recursive_sequence_lengths()
for seq_lens in recursive_seq_lens:
pb_seq_lens = common_pb2.Variable.RecursiveSequenceLength.SequenceLength()
pb_seq_lens.lengths.extend(seq_lens)
pb_var.recursive_sequence_lengths.append(pb_seq_lens)
proto.vars.append(pb_var)
proto.state.succ = True
return proto
def pack_tensor_to_proto(vars_map):
proto = common_pb2.Features()
for name, tensor in vars_map.items():
np_data = np.array(tensor)
pb_var = common_pb2.Tensor()
pb_var.name = name
pb_var.shape.extend(list(np_data.shape))
np_data = np_data.reshape(-1)
pb_var.dtype = np_data.dtype.name
data_handler = getattr(pb_var, "data_{}".format(np_data.dtype))
data_handler.extend(np_data.tolist())
proto.tensors.append(pb_var)
proto.state.succ = True
return proto
def save_whole_program(main_prog, startup_prog, program_path):
if not os.path.exists(program_path):
os.makedirs(program_path)
main_program_str = main_prog.desc.serialize_to_string()
startup_program_str = startup_prog.desc.serialize_to_string()
params = main_prog.global_block().all_parameters()
with open(program_path + '/para_info', 'w') as fout:
for item in params:
fout.write("%s:%s\n" % (item.name, item.trainable))
with open(program_path + '/startup_program', "wb") as fout:
fout.write(startup_program_str)
with open(program_path + '/main_program', "wb") as fout:
fout.write(main_program_str)
stop_vars = []
for check_stop in main_prog.list_vars():
if check_stop.stop_gradient == True:
stop_vars.append(check_stop.name)
with open(program_path + '/stop_gradient', 'w') as fout:
for stop_item in stop_vars:
fout.write("%s\n" % stop_item)
def load_whole_program(program_input):
with open(program_input + '/startup_program', "rb") as fin:
new_startup = Program().parse_from_string(fin.read())
with open(program_input + '/main_program', "rb") as fin:
new_main = Program().parse_from_string(fin.read())
para_list = []
with open(program_input + '/para_info', 'r') as fin:
for line in fin:
current_para = {}
para = line[:-1].split(":")
current_para["name"] = para[0]
if para[1] == 'True':
current_para['trainable'] = True
else:
current_para['trainable'] = False
para_list.append(current_para)
with open(program_input + '/stop_gradient', 'r') as fin:
for line in fin:
stop_name = line[:-1]
stop_var = new_main.global_block().var(stop_name)
stop_var.stop_gradient = True
for item in para_list:
main_para = new_main.global_block().var(item['name'])
main_para.__class__ = Parameter
main_para.regularizer = None
main_para.optimize_attr = {'learning_rate': 1.0}
main_para.trainable = item['trainable']
main_para.is_distributed = False
startup_para = new_startup.global_block().var(item['name'])
startup_para.__class__ = Parameter
startup_para.regularizer = None
startup_para.optimize_attr = {'learning_rate': 1.0}
startup_para.trainable = item['trainable']
startup_para.is_distributed = False
return new_startup, new_main
def split_program_by_name_and_save(
startup_program,
main_program,
save_path,
feeded_var_names,
target_var_names):
split_program_by_key_prefix_and_save(
startup_program,
main_program,
"Host|",
save_path,
feeded_var_names,
target_var_names)
def split_program_by_key_prefix_and_save(
startup_program,
main_program,
key_prefix,
save_path,
feeded_var_names,
target_var_names):
if not os.path.exists(save_path):
os.makedirs(save_path)
token = str(time.time())
# split program by key_prefix
splited_programs = reformer.Reformer.split_program_by_key_prefix(main_program, key_prefix)
model_infos = []
# common var name
for i in range(len(splited_programs)):
model_infos.append({"common": {"in": [], "out": []}})
common_vars = intersection_vars(splited_programs[0], splited_programs[1])
for name in common_vars:
model_infos[0]["common"]["out"].append(name)
model_infos[1]["common"]["in"].append(name)
common_vars = intersection_vars(splited_programs[1], splited_programs[2])
for name in common_vars:
model_infos[1]["common"]["out"].append(name)
model_infos[2]["common"]["in"].append(name)
# save splited_program
for i, program in enumerate(splited_programs):
program_save_path = os.path.join(save_path, "part{}".format(i))
if not os.path.exists(program_save_path):
os.makedirs(program_save_path)
# save startup_program
with open(os.path.join(program_save_path, 'startup_program'), "wb") as fout:
fout.write(startup_program.desc.serialize_to_string())
# save main_pargram part
with open(os.path.join(program_save_path, "main_program"), "wb") as fout:
fout.write(program.desc.serialize_to_string())
model_info = {
"token": token,
"params": [],
"stop_gradient_vars": [],
"target_var_names": [],
"feeded_var_names": [],
"persistable_vars": [],
}
# param name with trainable
for param in program.global_block().all_parameters():
model_info["params"].append({"name": param.name, "trainable": param.trainable})
# stop_gradient var name
for check_stop in program.list_vars():
if check_stop.stop_gradient == True:
model_info["stop_gradient_vars"].append(check_stop.name)
# target_var_names
for name in target_var_names:
if find_var(program, name) is not None:
model_info["target_var_names"].append(name)
# feeded_var_names
for name in feeded_var_names:
if find_var(program, name) is not None:
model_info["feeded_var_names"].append(name)
# persistable var names
for var in program.list_vars():
if var.persistable == True:
model_info["persistable_vars"].append(var.name)
model_infos[i].update(model_info)
with open(os.path.join(program_save_path, "model_info"), "w") as fout:
fout.write(json.dumps(model_infos[i]))
def load_splited_program(save_path):
startup_program, main_program = None, None
with open(os.path.join(save_path, "startup_program"), "rb") as fin:
startup_program = Program().parse_from_string(fin.read())
with open(os.path.join(save_path, 'main_program'), "rb") as fin:
main_program = Program().parse_from_string(fin.read())
with open(os.path.join(save_path, "model_info"), "r") as fin:
model_info = json.loads(fin.read())
# params
for item in model_info["params"]:
main_para = main_program.global_block().var(item['name'])
main_para.__class__ = Parameter
main_para.regularizer = None
main_para.optimize_attr = {'learning_rate': 1.0}
main_para.trainable = item['trainable']
main_para.is_distributed = False
startup_para = startup_program.global_block().var(item['name'])
startup_para.__class__ = Parameter
startup_para.regularizer = None
startup_para.optimize_attr = {'learning_rate': 1.0}
startup_para.trainable = item['trainable']
startup_para.is_distributed = False
# stop_gradient
for stop_name in model_info["stop_gradient_vars"]:
stop_var = main_program.global_block().var(stop_name)
stop_var.stop_gradient = True
return startup_program, main_program, model_info
def intersection_vars(p1_program, p2_program):
p1_whole_vars = [var.name for var in p1_program.list_vars()]
p2_whole_vars = [var.name for var in p2_program.list_vars()]
return set(p1_whole_vars) & set(p2_whole_vars)
def make_vars_persistable(program, var_names):
for name in var_names:
var = find_var(program, name)
var.persistable = True
def find_var(program, var_name):
whole_vars = program.list_vars()
for var in whole_vars:
if var.name == var_name:
return var
return None
def parse_bns_by_name(bns_name='', default_ip_port=''):
"""
return proxy ip list
"""
final_ip_port = default_ip_port
(s, o) = subprocess.getstatusoutput(
'get_instance_by_service -ip %s' % bns_name)
if int(s) == 0:
lns = o.split('\n')
final_ip_port = list()
for line in lns:
ll = line.split(' ')
ip_port = ""
if len(ll) == 3:
ip_port = (ll[1], ll[2])
elif len(ll) == 2:
ip_port = (ll[0], ll[1])
final_ip_port.append(ip_port)
return final_ip_port
``` |
{
"source": "jhji-soochow/MAIN",
"score": 3
} |
#### File: src/data/__init__.py
```python
from importlib import import_module
from torch.utils.data.dataloader import DataLoader
from torch.utils.data import ConcatDataset
import pdb
# This is a simple wrapper function for ConcatDataset
class MyConcatDataset(ConcatDataset):
def __init__(self, datasets):
super(MyConcatDataset, self).__init__(datasets)
self.train = datasets[0].train
class Data:
def __init__(self, args):
self.loader_train = None
if not args.test_only:
datasets = []
for d in args.data_train:
module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
m = import_module('data.' + module_name.lower()) # 动态导入
datasets.append(getattr(m, module_name)(args, name=d))
self.loader_train = DataLoader(
MyConcatDataset(datasets),
batch_size=args.batch_size,
shuffle=True,
pin_memory=not args.cpu,
num_workers=args.n_threads
)
self.loader_test = []
for d in args.data_test:
if d in ['mySet5', 'mySet15', 'mySet18', 'myBSDS100', 'myUrban12', 'myUrban100', 'myManga109']:
m = import_module('data.mybenchmark')
testset = getattr(m, 'Benchmark')(args, train=False, name=d)
self.loader_test.append(DataLoader(
testset,
batch_size=1,
shuffle=False,
pin_memory=not args.cpu,
num_workers=args.n_threads
))
```
#### File: src/model/common.py
```python
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def default_conv(in_channels, out_channels, kernel_size, bias=True, dilation=1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias, dilation=dilation)
class ChannelZeroPad(nn.Module):
def __init__(self, prepadding=1, postpadding=0, value=0):
super(ChannelZeroPad, self).__init__()
self.prepadding = prepadding
self.postpadding = postpadding
self.value = 0
def forward(self, input):
return F.pad(input, (0, 0, 0, 0, self.prepadding, self.postpadding))
class MyUpsampler(nn.Module):
def __init__(self, conv, upscale_factor, n_feats, bias=True):
super(MyUpsampler, self).__init__()
self.upscale_factor = upscale_factor
self.conv1 = conv(n_feats, n_feats // 2, 3, bias)
self.conv2 = conv(n_feats // 2, self.upscale_factor ** 2 - 1, 3, bias)
self.ChannelZeroPad = ChannelZeroPad(1, 0, 0)
self.positionupscale = nn.PixelShuffle(self.upscale_factor)
self.relu = nn.ReLU(True)
def forward(self, x, preintp_x):
x = self.relu(self.conv1(x))
x = self.conv2(x)
x = self.ChannelZeroPad(x)
x += preintp_x.repeat(1, self.upscale_factor**2, 1, 1)
x = self.positionupscale(x)
return x
```
#### File: MAIN/src/template.py
```python
def set_template(args):
# Set the templates here
if args.template.find('AIN') >=0:
args.model = 'AIN'
if args.template.find('AIN2') >= 0:
args.model = 'AIN2'
``` |
{
"source": "JHJohny/CryptoBuyInBot",
"score": 3
} |
#### File: CryptoBuyInBot/Bot/BinanceExchange.py
```python
import time
from binance.client import Client
from Bot.ExchangeBase import Exchange
class Binance(Exchange):
def __init__(self, api_key, api_secret):
self.api_key = api_key
self.api_secret = api_secret
self.client = Client(self.api_key, self.api_secret)
def get_current_minute_candle(self, symbol):
"""Takes symbol of market - example BTCUSDT and returns current minute candle dictionary"""
last_minute_candle = next(
self.client.get_historical_klines_generator(symbol=symbol, interval=Client.KLINE_INTERVAL_1MINUTE,
start_str="1 minute ago UTC"))
candle = {"open": float(last_minute_candle[1]),
"high": float(last_minute_candle[2]),
"low": float(last_minute_candle[3]),
"close": float(last_minute_candle[4])} # close is current status - current minute is not closed yet
change = ((100 / candle["open"]) * candle["close"]) - 100
candle["change"] = change
return candle
def create_buy_order(self, amount, crypto, timeout=10):
order = self.client.order_market_buy(symbol=crypto, quantity=amount)
order = self.wait_till_order_is_filled(order["orderId"])
# Nah block below looks weird, think about refactoring
completed_order = {
"symbol": crypto,
"amount": self.__amount_after_comission(order),
"price": order["fills"][0]["price"] # TODO - make average of price
}
return completed_order
def set_stop_loss(self, *, crypto, amount, stop_loss_price):
order = self.client.create_order(symbol=crypto,
side="SELL",
type="STOP_LOSS",
timeInForce="GTC",
quantity=amount,
price=stop_loss_price)
return order
def set_stop_profit(self, *, crypto, amount, profit_price):
order = self.client.create_order(symbol=crypto,
side="SELL",
type="TAKE_PROFIT",
timeInForce="GTC",
quantity=amount,
price=profit_price)
return order
def wait_till_order_is_filled(self, timeout=15, *orderIds):
"""Wait till order is completed or timeouted - if it's timeouted cancel order and return order
If multiple orders are inserted, stop wait when first one got filled or timeouted"""
elapsed_time = 0
while elapsed_time < timeout:
for orderId in orderIds:
order_status = self.client.get_order(orderId=orderId)["status"]
if order_status != "FILLED":
time.sleep(0) if timeout is None else time.sleep(1), elapsed_time.__add__(1)
else:
return self.client.get_order(orderId=orderId)
else:
for orderId in orderIds:
order = self.client.get_order(orderId=orderId)
self.client.cancel_order(symbol=order["symbol"], orderId=orderId)
order = self.client.get_order(orderId=orderId)
return order
def __amount_after_comission(self, order): # Will calculate final bought amount - comissions removed
amount = 0
comissions = 0
for fill in order["fills"]:
amount += float(fill["qty"])
comissions += float(fill["commission"])
return amount - comissions
```
#### File: CryptoBuyInBot/Bot/BotOne.py
```python
import time
from Bot.BotBase import Bot
from utils.mathematics import get_x_percent_of_y
from utils.decorator.Decorators import block_argument
"""Bot One - logic
Bot one is simplest concept. Bot one checks prices of main crypto currencies for rapid/move to upside.
If bot detect massive movement on one crypto, will try to buy other currencies as long they are on low price.
Whole idea is - when bitcoin moves up, we expect all crypto moves up as well.
"""
class BotOne(Bot):
__list_of_cryptos = ["BTCUSDT", "ETHUSDT", "BCHUSDT", "LTCUSDT", "XRPUSDT"]
@block_argument("", None)
def __init__(self, *, exchange_client, buy_in_sum, list_of_cryptos=__list_of_cryptos, min_pump=4, min_oppor=3,
check_interval=10):
self.exchange_client = exchange_client
self.buy_in_sum = buy_in_sum
self.__list_of_cryptos = list_of_cryptos
self.min_pump = min_pump # % that crypto must reach, to consider it as pump
self.min_opportunity = min_oppor # % difference between already pumping crypto and not yet pumping crypto
self.check_interval = check_interval
def check_for_opportunities(self):
"""Checks for opportunity to make profit - every bot has different strategy
pump - when one crypto is above x %
opportunity - when pump is found and found is also crypto that isn't pumped yet - that is opportunity to buy in"""
# record candles for each crypto
candles = {}
for crypto in self.__list_of_cryptos:
candles[crypto] = self.exchange_client.get_current_minute_candle(crypto)
# actual checking for pump and opportunities
for crypto, candle in candles.items(): # looking for pump
if candle["change"] > self.min_pump:
print(f"Pump found! in {crypto}, change - {candle['change']}")
pump = candle["change"]
for crypto, change in candles.items(): # looking for opportunity
if (pump - change) > self.min_opportunity:
print(f"Opportunity FOUND! {crypto}")
self.__opportunity_found(crypto, candle["close"])
break
break
else:
print(f"Nothing special found in {crypto} , change only {candle['change']}")
print(f"Not opportunities found")
# TODO - do it event based, if more functions will be added to start
def start(self):
try: # Because Binance API has troubles to request history at 58/59/60th second of minute.
self.check_for_opportunities()
except:
print(f"Something wrong with binacne API ")
time.sleep(self.check_interval) # TODO - test if it's worth to import asyncio and do it with async
self.start()
def __opportunity_found(self, crypto, crypto_price):
"""Call when one crypto didn't pump yet - so we can buy before it pumps and sell it with profit"""
buy_order = self.exchange_client.create_buy_order(int(self.buy_in_sum / crypto_price), crypto)
stop_loss_order = self.exchange_client.set_stop_loss(symbol=buy_order["symbol"],
amount=buy_order["amount"],
stop_loss_price=get_x_percent_of_y(x=97,
y=buy_order["price"]))
stop_profit_order = self.exchange_client.set_stop_profit(symbol=buy_order["symbol"],
amount=buy_order["amount"],
stop_loss_price=get_x_percent_of_y(x=103, y=buy_order[
"price"]))
filled_order = self.exchange_client.wait_till_order_is_filled(stop_loss_order["orderId"],
stop_profit_order["orderId"],
timeout=None)
print("ORDER COMPLETED - ", filled_order)
# TODO - send a SMS - via multi threading or multi processing
```
#### File: CryptoBuyInBot/Bot/ExchangeBase.py
```python
from abc import ABC, abstractmethod
class Exchange(ABC):
@abstractmethod
def get_current_minute_candle(self, symbol):
"""Takes keyword of cryptocurrency and returns dict of - Open, High, Low, Close values"""
pass
@abstractmethod
def create_buy_order(self):
pass
@abstractmethod
def set_stop_loss(self):
pass
@abstractmethod
def set_stop_profit(self):
pass
```
#### File: Bot/menu/MenuInterface.py
```python
from Bot.BotOne import BotOne
from Bot.BinanceExchange import Binance
from Bot.menu.ExchangesMenu import get_binance_params
from Bot.menu.ASCI import ASCI
"""Start menu
Start menu is guide for user to 'configure' bot - choose which bot user want to use, what exchange, APIs,
how much he wants to invest"""
def insert_fancy_lines(func):
def insert():
print(f"\n \n{ASCI.random_fancy_line}")
return func()
return insert
@insert_fancy_lines
def choose_exchange():
print("Select exchange - \n"
"1. Binance\n"
"2. Comming soon\n"
"\n")
def ask_for_input():
users_input = input("Your choice: ")
if users_input == "1" or users_input == "":
api_params = get_binance_params()
return Binance(api_params["api_key"], api_params["api_key_secret"])
else:
print("Sorry try again")
return ask_for_input()
return ask_for_input()
@insert_fancy_lines
def choose_invest_amount():
print("Invest amount\n"
"Type how much you would like to invest\n"
"\n")
def ask_for_input():
users_input = input("Enter amount: ")
if users_input.isdigit():
return int(users_input)
elif users_input == "":
return 40
else:
print("Sorry try again")
return ask_for_input()
return ask_for_input()
@insert_fancy_lines
def choose_bot():
print("Select bot - \n"
"1. Bot one (default) Bot one detect pump in one crypto and will try to buy other cryptos as long they are still in low price\n"
"2. Comming soon\n"
"\n")
def ask_for_input():
users_input = input("Your choice: ")
if users_input == '1' or users_input == '':
return BotOne(exchange_client=choose_exchange(), buy_in_sum=choose_invest_amount())
else:
print("Sorry try again")
return ask_for_input()
return ask_for_input()
def start_menu():
print(ASCI.random_logo)
bot = choose_bot()
bot.start()
``` |
{
"source": "jhk16/stackoverflow-encourages-cheating",
"score": 2
} |
#### File: src/dataset/base_dataset.py
```python
import datasets
from datasets import Value, Features
from datasets.features import Sequence
from datasets.splits import SplitGenerator
from pathlib import Path
import json
from dataclasses import dataclass
__all__ = [
"BaseDataset",
"BaseDatasetConfig"
]
# import logging
# import os
_URL = "https://www.dropbox.com/s/xv3zcutli07w37w/base_dataset.zip?dl=1"
_DESCRIPTION = """\
Implementation of the CoNaLa dataset in hugging face for my research.
"""
_HOMEPAGE = "https://conala-corpus.github.io/"
_LICENSE = ""
_CITATION = """\
@inproceedings{yin2018mining,
author = {<NAME> <NAME> <NAME>},
title = {Learning to Mine Aligned Code and Natural Language Pairs from Stack Overflow},
booktitle = {International Conference on Mining Software Repositories},
series = {MSR},
pages = {476--486},
year = {2018},
publisher = {ACM},
doi = {https://doi.org/10.1145/3196398.3196408},
}
"""
# TODO: Better Documentation
@dataclass
class BaseDatasetConfig(datasets.BuilderConfig):
objective: str = 'CodeGen'
use_canonical_snippet: bool = True
use_canonical_intent: bool = True
skip_pretrain: bool = False
skip_api: bool = False
testing_path: Path = None
class BaseDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIG_CLASS = BaseDatasetConfig
BUILDER_CONFIGS = [
BaseDatasetConfig(name='default',
version=VERSION,
description='Load the default data.',
objective='CodeGen'),
]
DEFAULT_CONFIG_NAME = 'default'
def getNameForConfig(self):
return f"{'c' if self.config.use_canonical_intent else ''}Intent" \
f"_{'c' if self.config.use_canonical_snippet else ''}Snippet"
def _info(self):
# Features are set based on the objective of the config. These are to be
# considered the default/base features and does not mean they will have
# a value. That is based on the config.
default_features = {
'question_id': Value('string'),
'score' : Value('string'),
'intent' : Value('string'),
'title' : Value('string'),
'body' : Value('string'),
'tags' : Sequence(Value('string')),
'slot_map' : Sequence({
'key' : Value('string'),
'value': Value('string'),
'quote': Value('string'),
'type' : Value('string')
}),
}
if self.config.objective == 'CodeGen':
features = Features({
'snippet' : Value('string'),
'answer_body' : Value('string'),
'answer_score': Value('string'),
**default_features
})
elif self.config.objective == 'QuestionGen':
features = Features({
'snippet' : Sequence(Value('string')),
'answer_body' : Sequence(Value('string')),
'answer_score': Sequence(Value('string')),
**default_features
})
else:
raise ValueError(f"'{self.config.objective}' is not a supported objective")
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features,
# Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = self.config.testing_path
# If `data_dir` is None, we are in testing mode
if data_dir is None:
data_dir = Path(dl_manager.download_and_extract(_URL)).joinpath('base_dataset')
splits = [
SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir.joinpath('train.jsonl'),
},
),
SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir.joinpath('test.jsonl')
},
)]
# Some of these take some time to complete, so add options in the config
# to skip them.
if not self.config.skip_api:
splits.extend([
SplitGenerator(
name='direct_api',
gen_kwargs={
'filepath': data_dir.joinpath('direct_api.jsonl')
}
),
SplitGenerator(
name='sampled_api',
gen_kwargs={
'filepath': data_dir.joinpath('sampled_api.jsonl')
}
)
])
if not self.config.skip_pretrain:
splits.append(
SplitGenerator(
name='mined',
gen_kwargs={
'filepath': data_dir.joinpath('pretrain.jsonl')
}
)
)
return splits
def _getDataFromExample(self, example):
# Get the correct intent & snippet based on if we want the canonical
# version.
if self.config.use_canonical_intent:
intent = example['canonical_intent']
else:
intent = example['normal_intent']
if self.config.use_canonical_intent:
snippet = example['canonical_snippet']
else:
snippet = example['snippet']
# If we are currently parsing api data, there will be no tags,
# title, etc because that is from StackOverflow. Therefore, add
# empty values for them as the default for get.
title = example.get('title', None)
body = example.get('body', None)
tags = example.get('tags', [])
answer_body = example.get('answer_body', snippet)
question_id = example.get('question_id', None)
slot_map = example.get('slot_map', None)
if slot_map is None:
slot_map = {}
slot_map = [{'key': k, **v} for k, v in slot_map.items()]
# You cannot have an none int w/ HuggingFace.
answer_score = example.get('answer_score', None)
score = example.get('score', None)
if score is not None:
score = str(score)
if answer_score is not None:
answer_score = str(answer_score)
return question_id, {
'question_id' : str(question_id),
'intent' : intent,
'snippet' : snippet,
'title' : title,
'body' : body,
'answer_body' : answer_body,
'answer_score': answer_score,
'tags' : tags,
'score' : score,
'slot_map' : slot_map
}
def _generate_examples(self, filepath: Path):
# Store question data for the questionGen objective
question_data = {}
sequential_keys = ['answer_body', 'answer_score', 'snippet']
# Read through the data file and yield the data
for idx, line in enumerate(filepath.read_text('utf-8').splitlines(False)):
qid, example = self._getDataFromExample(json.loads(line))
if self.config.objective != 'QuestionGen':
# example['index'] = idx
yield idx, example
else:
if qid is None:
raise ValueError(f'Objective is QuestionGen but question {idx} has no id!')
# Multiple answers for one question, so we group them here.
if qid not in question_data:
# Get data from the keys that would not change based on the
# answer.
question_dict = {k: v for k, v in example.items() if
k not in sequential_keys}
for k in sequential_keys:
# We need to make the list because we expect many
# answers for each question.
question_dict[k] = [example[k]]
question_data[qid] = question_dict
else:
for k in sequential_keys:
question_data[qid][k].append(example[k])
if self.config.objective == 'QuestionGen':
for idx, example in enumerate(question_data.values()):
# example['index'] = idx
yield idx, example
```
#### File: stackoverflow-encourages-cheating/src/experiment_functions.py
```python
import logging
import os
import random
from pathlib import Path
import ml_collections
import numpy as np
import plac
import torch
from numpy.random import default_rng
from transformers import (
AutoTokenizer, AutoModelForSeq2SeqLM, AutoConfig,
Seq2SeqTrainer, Seq2SeqTrainingArguments, DataCollatorForSeq2Seq, PreTrainedModel
)
from datasets import Dataset
from typing import List
from src.common.file_util import setupLoggers, strToPath
from src.common.training_util import loadDatasets
from src.evaluation import *
from src.processor import *
from src.processor.code_generation import CodeGenerationProcessor
__all__ = [
"createModel",
"trainingArgs"
]
def createModel(model_name: str,
max_target_len: int,
vocab_size: int,
device,
keys_to_ignore: List[str] = None):
config = AutoConfig.from_pretrained(model_name)
config.max_length = max_target_len
model = AutoModelForSeq2SeqLM.from_pretrained(
model_name,
config=config
)
model.resize_token_embeddings(vocab_size).to(device)
if not hasattr(model, 'keys_to_ignore_at_inference'):
model.keys_to_ignore_at_inference = []
model.keys_to_ignore_at_inference.extend(keys_to_ignore or [])
return model
def trainingArgs(
logging_dir,
label_names=None,
no_cuda=False,
seed=1995,
batch_size=16,
epochs=10,
save_limit=2):
if label_names is None:
label_names = ['labels']
config = ml_collections.ConfigDict()
config.output_dir = logging_dir
config.evaluation_strategy = "epoch"
config.per_device_train_batch_size = batch_size
config.per_device_eval_batch_size = batch_size
config.logging_dir = logging_dir
config.save_total_limit = save_limit
config.learning_rate = 5e-5
config.seed = seed
config.num_train_epochs = epochs
config.dataloader_num_workers = 0
config.label_names = label_names
config.no_cuda = no_cuda
config.load_best_model_at_end = True
config.group_by_length = True
config.warmup_ratio = 0.05
config.logging_strategy = 'epoch'
return config
``` |
{
"source": "jhk523/news-trend",
"score": 3
} |
#### File: python/scripts/azure_test.py
```python
import numpy as np
from newstrends import azure
from newstrends.data import mysql
def print_scores(documents, scores):
sentiments = ['positive', 'neutral', 'negative']
for t, s in zip(documents, scores):
scores_str = ', '.join(f'{e:.3f}' for e in s)
print(f'Document: {t}')
print(f'Sentiment: {sentiments[np.argmax(s)]}')
print(f'Scores: ({scores_str})')
print()
def main():
titles = mysql.select_all_titles(preprocess=True)
titles = titles[:1000]
scores = azure.compute_scores(titles)
print_scores(titles, scores)
if __name__ == '__main__':
main()
```
#### File: python/scripts/classify_publisher.py
```python
import os
import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
from newstrends import utils, models
from newstrends.data import mysql
def save_strings(path, name, data):
if isinstance(data, list):
data = np.array(data, dtype=str)
np.savetxt(os.path.join(path, name), data, fmt='%s')
def save_as_pieces(model, path, publishers):
entries = mysql.select_articles(
field=['title', 'publisher'], publishers=publishers)
titles = [e[0] for e in entries]
titles = utils.preprocess(titles)
publishers = [e[1] for e in entries]
os.makedirs(path, exist_ok=True)
save_strings(path, 'titles.tsv', titles)
save_strings(path, 'labels.tsv', publishers)
piece_list = []
piece_path = os.path.join(path, 'pieces.tsv')
with open(piece_path, 'w') as f1:
for title in titles:
pieces = model.EncodeAsPieces(title)
piece_list.append(pieces)
f1.write('\t'.join(pieces) + '\n')
def read_pieces(path):
pieces = []
with open(path) as f:
for line in f:
pieces.append(line.strip().split('\t'))
return pieces
def read_labels_as_tensor(path, label_map):
labels = []
with open(path) as f:
for line in f:
labels.append(label_map[line.strip()])
return torch.tensor(labels)
def print_predictions(model, loader, titles, path):
model.eval()
device = utils.to_device()
count = 0
f = open(path, 'w')
for x, y in loader:
x = x.to(device)
y = y.to(device)
y_pred = torch.softmax(model(x), dim=1)
for i in range(x.size(0)):
pred_str = ', '.join(f'{e * 100:.1f}' for e in y_pred[i])
label = '조선일보' if y[i] == 0 else '한겨레경향'
f.write(f'Title: {titles[count]}\n')
f.write(f'Prediction: ({pred_str})\n')
f.write(f'True label: ({label})\n')
f.write('\n')
count += 1
f.close()
def start_interactive_session(model, spm_model, vocabulary):
model.eval()
device = utils.to_device()
while True:
print('Sentence:', end=' ')
sentence = input()
if len(sentence.strip()) == 0:
continue
pieces = spm_model.encode_as_pieces(sentence)
matrix = utils.to_integer_matrix([pieces], vocabulary).to(device)
y_pred = torch.softmax(model(matrix), dim=1)
pred_str = ', '.join(f'{e * 100:.1f}' for e in y_pred[0])
print(f'Prediction: ({pred_str})')
print()
def main():
publishers = ['조선일보', '경향신문', '한겨례']
label_map = dict(조선일보=0, 경향신문=1, 한겨례=1)
spm_path = '../../data/sentencepiece'
pub_path = '../../data/publishers'
num_classes = 2
embedding_dim = 12
batch_size = 512
dropout = 0.5
spm_model = utils.load_sentencepiece(spm_path)
spm_vocab = utils.read_vocabulary(spm_path)
vocab_size = len(spm_vocab)
device = utils.to_device()
cls_model = models.RNNClassifier(
vocab_size, num_classes, embedding_dim, dropout=dropout).to(device)
cls_path = os.path.join(pub_path, 'model.pth')
if not os.path.exists(pub_path):
train_path = os.path.join(pub_path, 'train')
save_as_pieces(spm_model, train_path, publishers)
pieces = read_pieces(os.path.join(train_path, 'pieces.tsv'))
features = utils.to_integer_matrix(pieces, spm_vocab)
labels = read_labels_as_tensor(
os.path.join(train_path, 'labels.tsv'), label_map)
loader = DataLoader(
TensorDataset(features, labels), batch_size, shuffle=True)
os.makedirs(os.path.dirname(cls_path), exist_ok=True)
utils.train_model(
cls_model, loader, lr=1e-4, num_epochs=1500, print_every=100, patience=100)
torch.save(cls_model.state_dict(), cls_path)
titles = open(os.path.join(train_path, 'titles.tsv')).readlines()
titles = [e.strip() for e in titles]
print_predictions(cls_model, loader, titles, f'{pub_path}/predictions.txt')
else:
cls_model.load_state_dict(torch.load(cls_path, map_location=device))
start_interactive_session(cls_model, spm_model, spm_vocab)
if __name__ == '__main__':
main()
```
#### File: python/scripts/find_keyword_sentiment.py
```python
import numpy as np
from newstrends import utils
def main():
keyword = '<NAME>'
df = utils.search_keyword_sentiment(keyword)
sentiments = ['긍정적', '중립적', '부정적']
if df is None:
return
for pub, df_ in df.groupby(by='publisher'):
print(f'[Publisher] {pub}')
df_ = df_.sort_values(by='date', ascending=False)
for date, df__ in df_.groupby(by='date', sort=False):
avg_score = df__[['pos_score', 'neu_score', 'neg_score']].values.mean(axis=0)
print('[{}] {:2d}개 기사 - {} ({})'.format(
date.strftime('%Y/%m/%d'),
df__.shape[0],
sentiments[np.argmax(avg_score)],
', '.join(f'{e:.3f}' for e in avg_score)))
print()
if __name__ == '__main__':
main()
```
#### File: website/search/views.py
```python
from django.shortcuts import render
# Create your views here.
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
from search.models import NewsArticle
from search.forms import SearchValue
SEARCH_WORD = ""
class Search(TemplateView):
template_name = 'search/index.html'
def get_context_data(self, *args, **kwargs):
from newstrends.utils import find_popular_keywords
context = super().get_context_data(*args, **kwargs)
df = find_popular_keywords()
dates_list = df.date.unique()
date1_df = df[df['date'] == dates_list[0]]
date2_df = df[df['date'] == dates_list[1]]
date1_list = date1_df.to_dict('records')
date2_list = date2_df.to_dict('records')
context['date1'] = dates_list[0]
context['date2'] = dates_list[1]
context['date1_list'] = date1_list
context['date2_list'] = date2_list
return context
class Result(FormView):
template_name = 'search/result.html'
form_class = SearchValue
success_url = '/result/'
def __init__(self, **kwargs):
super().__init__(**kwargs)
def form_valid(self, form):
global SEARCH_WORD
SEARCH_WORD = form.cleaned_data['search_value']
return super(Result, self).form_valid(form)
def get_context_data(self, *args, **kwargs):
context = super(Result, self).get_context_data(*args, **kwargs)
search_word_len = len(SEARCH_WORD.split(' '))
if search_word_len > 2:
from newstrends.utils import compute_sentence_polarity
polarity = compute_sentence_polarity(SEARCH_WORD)
context['keyword'] = SEARCH_WORD
context['type'] = 'polarity'
context['left'] = polarity['진보']
context['right'] = polarity['보수']
return context
else:
from newstrends.utils import search_keyword_sentiment
df = search_keyword_sentiment(SEARCH_WORD)
df.sort_values(by='date', ascending=False, inplace=True)
df['pos_score'] = df['pos_score'].apply(lambda x: '{:.2f}'.format(x))
df['neg_score'] = df['neg_score'].apply(lambda x: '{:.2f}'.format(x))
df['neu_score'] = df['neu_score'].apply(lambda x: '{:.2f}'.format(x))
df_list = df.to_dict('records')
publishers_list = df.publisher.unique()
context['keyword'] = SEARCH_WORD
context['type'] = 'sentiment'
context['df'] = df_list
context['publishers'] = publishers_list
return context
``` |
{
"source": "jhkennedy/asf-tools",
"score": 3
} |
#### File: asf-tools/asf_tools/composite.py
```python
import argparse
import logging
import os
import sys
from pathlib import Path
from statistics import multimode
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import List, Union
import numpy as np
from osgeo import gdal, osr
gdal.UseExceptions()
log = logging.getLogger(__name__)
def get_epsg_code(info: dict) -> int:
"""Get the EPSG code from a GDAL Info dictionary
Args:
info: The dictionary returned by a gdal.Info call
Returns:
epsg_code: The integer EPSG code
"""
proj = osr.SpatialReference(info['coordinateSystem']['wkt'])
epsg_code = int(proj.GetAttrValue('AUTHORITY', 1))
return epsg_code
def epsg_to_wkt(epsg_code: int) -> str:
"""Get the WKT representation of a projection from its EPSG code
Args:
epsg_code: The integer EPSG code
Returns:
wkt: The WKT representation of the projection
"""
srs = osr.SpatialReference()
srs.ImportFromEPSG(epsg_code)
return srs.ExportToWkt()
def get_target_epsg_code(codes: List[int]) -> int:
"""Determine the target UTM EPSG projection for the output composite
Args:
codes: List of UTM EPSG codes
Returns:
target: UTM EPSG code
"""
# use median east/west UTM zone of all files, regardless of hemisphere
# UTM EPSG codes for each hemisphere will look like:
# North: 326XX
# South: 327XX
valid_codes = list(range(32601, 32661)) + list(range(32701, 32761))
if bad_codes := set(codes) - set(valid_codes):
raise ValueError(f'Non UTM EPSG code encountered: {bad_codes}')
hemispheres = [c // 100 * 100 for c in codes]
# if even modes, choose lowest (North)
target_hemisphere = min(multimode(hemispheres))
zones = sorted([c % 100 for c in codes])
# if even length, choose fist of median two
target_zone = zones[(len(zones) - 1) // 2]
return target_hemisphere + target_zone
def get_area_raster(raster: str) -> str:
"""Determine the path of the area raster for a given backscatter raster based on naming conventions for HyP3 RTC
products
Args:
raster: path of the backscatter raster, e.g. S1A_IW_20181102T155531_DVP_RTC30_G_gpuned_5685_VV.tif
Returns:
area_raster: path of the area raster, e.g. S1A_IW_20181102T155531_DVP_RTC30_G_gpuned_5685_area.tif
"""
return '_'.join(raster.split('_')[:-1] + ['area.tif'])
def get_full_extent(raster_info: dict):
"""Determine the corner coordinates and geotransform for the full extent of a set of rasters
Args:
raster_info: A dictionary of gdal.Info results for the set of rasters
Returns:
upper_left: The upper left corner of the extent as a tuple
upper_right: The lower right corner of the extent as a tuple
geotransform: The geotransform of the extent as a list
"""
upper_left_corners = [info['cornerCoordinates']['upperLeft'] for info in raster_info.values()]
lower_right_corners = [info['cornerCoordinates']['lowerRight'] for info in raster_info.values()]
ulx = min([ul[0] for ul in upper_left_corners])
uly = max([ul[1] for ul in upper_left_corners])
lrx = max([lr[0] for lr in lower_right_corners])
lry = min([lr[1] for lr in lower_right_corners])
log.debug(f'Full extent raster upper left: ({ulx, uly}); lower right: ({lrx, lry})')
trans = []
for info in raster_info.values():
# Only need info from any one raster
trans = info['geoTransform']
break
trans[0] = ulx
trans[3] = uly
return (ulx, uly), (lrx, lry), trans
def reproject_to_target(raster_info: dict, target_epsg_code: int, target_resolution: float, directory: str) -> dict:
"""Reprojects a set of raster images to a common projection and resolution
Args:
raster_info: A dictionary of gdal.Info results for the set of rasters
target_epsg_code: The integer EPSG code for the target projection
target_resolution: The target resolution
directory: The directory in which to create the reprojected files
Returns:
target_raster_info: An updated dictionary of gdal.Info results for the reprojected files
"""
target_raster_info = {}
for raster, info in raster_info.items():
epsg_code = get_epsg_code(info)
resolution = info['geoTransform'][1]
if epsg_code != target_epsg_code or resolution != target_resolution:
log.info(f'Reprojecting {raster}')
reprojected_raster = os.path.join(directory, os.path.basename(raster))
gdal.Warp(
reprojected_raster, raster, dstSRS=f'EPSG:{target_epsg_code}',
xRes=target_resolution, yRes=target_resolution, targetAlignedPixels=True
)
area_raster = get_area_raster(raster)
log.info(f'Reprojecting {area_raster}')
reprojected_area_raster = os.path.join(directory, os.path.basename(area_raster))
gdal.Warp(
reprojected_area_raster, area_raster, dstSRS=f'EPSG:{target_epsg_code}',
xRes=target_resolution, yRes=target_resolution, targetAlignedPixels=True
)
target_raster_info[reprojected_raster] = gdal.Info(reprojected_raster, format='json')
else:
log.info(f'No need to reproject {raster}')
target_raster_info[raster] = info
return target_raster_info
def read_as_array(raster: str, band: int = 1) -> np.array:
"""Reads data from a raster image into memory
Args:
raster: The file path to a raster image
band: The raster band to read
Returns:
data: The raster pixel data as a numpy array
"""
log.debug(f'Reading raster values from {raster}')
ds = gdal.Open(raster)
data = ds.GetRasterBand(band).ReadAsArray()
del ds # How to close w/ gdal
return data
def write_cog(file_name: Union[str, Path], data: np.ndarray, transform: List[float], epsg_code: int,
dtype=gdal.GDT_Float32, nodata_value=None):
"""Creates a Cloud Optimized GeoTIFF
Args:
file_name: The output file name
data: The raster data
transform: The geotransform for the output GeoTIFF
epsg_code: The integer EPSG code for the output GeoTIFF projection
dtype: The pixel data type for the output GeoTIFF
nodata_value: The NODATA value for the output Geotiff
Returns:
file_name: The output file name
"""
log.info(f'Creating {file_name}')
with NamedTemporaryFile() as temp_file:
driver = gdal.GetDriverByName('GTiff')
temp_geotiff = driver.Create(temp_file.name, data.shape[1], data.shape[0], 1, dtype)
temp_geotiff.GetRasterBand(1).WriteArray(data)
if nodata_value is not None:
temp_geotiff.GetRasterBand(1).SetNoDataValue(nodata_value)
temp_geotiff.SetGeoTransform(transform)
temp_geotiff.SetProjection(epsg_to_wkt(epsg_code))
driver = gdal.GetDriverByName('COG')
options = ['COMPRESS=LZW', 'OVERVIEW_RESAMPLING=AVERAGE', 'NUM_THREADS=ALL_CPUS', 'BIGTIFF=YES']
driver.CreateCopy(str(file_name), temp_geotiff, options=options)
del temp_geotiff # How to close w/ gdal
return file_name
def make_composite(out_name: str, rasters: List[str], resolution: float = None):
"""Creates a local-resolution-weighted composite from Sentinel-1 RTC products
Args:
out_name: The base name of the output GeoTIFFs
rasters: A list of file paths of the images to composite
resolution: The pixel size for the output GeoTIFFs
Returns:
out_raster: Path to the created composite backscatter GeoTIFF
out_counts_raster: Path to the created GeoTIFF with counts of scenes contributing to each pixel
"""
if not rasters:
raise ValueError('Must specify at least one raster to composite')
raster_info = {}
for raster in rasters:
raster_info[raster] = gdal.Info(raster, format='json')
# make sure gdal can read the area raster
gdal.Info(get_area_raster(raster))
target_epsg_code = get_target_epsg_code([get_epsg_code(info) for info in raster_info.values()])
log.debug(f'Composite projection is EPSG:{target_epsg_code}')
if resolution is None:
resolution = max([info['geoTransform'][1] for info in raster_info.values()])
log.debug(f'Composite resolution is {resolution} meters')
# resample rasters to maximum resolution & common UTM zone
with TemporaryDirectory(prefix='reprojected_') as temp_dir:
raster_info = reproject_to_target(raster_info, target_epsg_code=target_epsg_code, target_resolution=resolution,
directory=temp_dir)
# Get extent of union of all images
full_ul, full_lr, full_trans = get_full_extent(raster_info)
nx = int(abs(full_ul[0] - full_lr[0]) // resolution)
ny = int(abs(full_ul[1] - full_lr[1]) // resolution)
outputs = np.zeros((ny, nx))
weights = np.zeros(outputs.shape)
counts = np.zeros(outputs.shape, dtype=np.int8)
for raster, info in raster_info.items():
log.info(f'Processing raster {raster}')
log.debug(f"Raster upper left: {info['cornerCoordinates']['upperLeft']}; "
f"lower right: {info['cornerCoordinates']['lowerRight']}")
values = read_as_array(raster)
area_raster = get_area_raster(raster)
areas = read_as_array(area_raster)
ulx, uly = info['cornerCoordinates']['upperLeft']
y_index_start = int((full_ul[1] - uly) // resolution)
y_index_end = y_index_start + values.shape[0]
x_index_start = int((ulx - full_ul[0]) // resolution)
x_index_end = x_index_start + values.shape[1]
log.debug(
f'Placing values in output grid at {y_index_start}:{y_index_end} and {x_index_start}:{x_index_end}'
)
mask = values == 0
raster_weights = 1.0 / areas
raster_weights[mask] = 0
outputs[y_index_start:y_index_end, x_index_start:x_index_end] += values * raster_weights
weights[y_index_start:y_index_end, x_index_start:x_index_end] += raster_weights
counts[y_index_start:y_index_end, x_index_start:x_index_end] += ~mask
del values, areas, mask, raster_weights
# Divide by the total weight applied
outputs /= weights
del weights
out_raster = write_cog(f'{out_name}.tif', outputs, full_trans, target_epsg_code, nodata_value=0)
del outputs
out_counts_raster = write_cog(f'{out_name}_counts.tif', counts, full_trans, target_epsg_code, dtype=gdal.GDT_Int16)
del counts
return out_raster, out_counts_raster
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('out_name', help='Base name of output composite GeoTIFF (without extension)')
parser.add_argument('rasters', nargs='+', help='Sentinel-1 GeoTIFF rasters to composite')
parser.add_argument('-r', '--resolution', type=float,
help='Desired output resolution in meters '
'(default is the max resolution of all the input files)')
parser.add_argument('-v', '--verbose', action='store_true', help='Turn on verbose logging')
args = parser.parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(message)s', level=level)
log.debug(' '.join(sys.argv))
log.info(f'Creating a composite of {len(args.rasters)} rasters')
raster, counts = make_composite(args.out_name, args.rasters, args.resolution)
log.info(f'Composite created successfully: {raster}')
log.info(f'Number of rasters contributing to each pixel: {counts}')
```
#### File: asf_tools/hand/calculate.py
```python
import argparse
import logging
import sys
import warnings
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Optional, Union
import astropy.convolution
import fiona
import numpy as np
import rasterio.crs
import rasterio.mask
from pysheds.grid import Grid
from shapely.geometry import GeometryCollection, shape
from asf_tools.composite import write_cog
from asf_tools.dem import prepare_dem_vrt
log = logging.getLogger(__name__)
def fill_nan(array: np.ndarray) -> np.ndarray:
"""Replace NaNs with values interpolated from their neighbors
Replace NaNs with values interpolated from their neighbors using a 2D Gaussian
kernel, see: https://docs.astropy.org/en/stable/convolution/#using-astropy-s-convolution-to-replace-bad-data
"""
kernel = astropy.convolution.Gaussian2DKernel(x_stddev=3) # kernel x_size=8*stddev
with warnings.catch_warnings():
warnings.simplefilter("ignore")
array = astropy.convolution.interpolate_replace_nans(
array, kernel, convolve=astropy.convolution.convolve
)
return array
def calculate_hand(dem_array, dem_affine: rasterio.Affine, dem_crs: rasterio.crs.CRS, basin_mask,
acc_thresh: Optional[int] = 100):
"""Calculate the Height Above Nearest Drainage (HAND)
Calculate the Height Above Nearest Drainage (HAND) using pySHEDS library. Because HAND
is tied to watershed boundaries (hydrobasins), clipped/cut basins will produce weird edge
effects, and incomplete basins should be masked out. For watershed boundaries,
see: https://www.hydrosheds.org/page/hydrobasins
This involves:
* Filling depressions (regions of cells lower than their surrounding neighbors)
in the Digital Elevation Model (DEM)
* Resolving un-drainable flats
* Determine the flow direction using the ESRI D8 routing scheme
* Determine flow accumulation (number of upstream cells)
* Create a drainage mask using the accumulation threshold `acc_thresh`
* Calculating HAND
In the HAND calculation, NaNs inside the basin filled using `fill_nan`
Args:
dem_array: DEM to calculate HAND for
dem_crs: DEM Coordinate Reference System (CRS)
dem_affine: DEM Affine geotransform
basin_mask: Array of booleans indicating wither an element should be masked out (à la Numpy Masked Arrays:
https://numpy.org/doc/stable/reference/maskedarray.generic.html#what-is-a-masked-array)
acc_thresh: Accumulation threshold for determining the drainage mask.
If `None`, the mean accumulation value is used
"""
grid = Grid()
grid.add_gridded_data(dem_array, data_name='dem', affine=dem_affine, crs=dem_crs.to_dict(), mask=~basin_mask)
log.info('Filling depressions')
grid.fill_depressions('dem', out_name='flooded_dem')
if np.isnan(grid.flooded_dem).any():
log.debug('NaNs encountered in flooded DEM; filling.')
grid.flooded_dem = fill_nan(grid.flooded_dem)
log.info('Resolving flats')
grid.resolve_flats('flooded_dem', out_name='inflated_dem')
if np.isnan(grid.inflated_dem).any():
log.debug('NaNs encountered in inflated DEM; replacing NaNs with original DEM values')
grid.inflated_dem[np.isnan(grid.inflated_dem)] = dem_array[np.isnan(grid.inflated_dem)]
log.info('Obtaining flow direction')
grid.flowdir(data='inflated_dem', out_name='dir', apply_mask=True)
if np.isnan(grid.dir).any():
log.debug('NaNs encountered in flow direction; filling.')
grid.dir = fill_nan(grid.dir)
log.info('Calculating flow accumulation')
grid.accumulation(data='dir', out_name='acc')
if np.isnan(grid.acc).any():
log.debug('NaNs encountered in accumulation; filling.')
grid.acc = fill_nan(grid.acc)
if acc_thresh is None:
acc_thresh = grid.acc.mean()
log.info(f'Calculating HAND using accumulation threshold of {acc_thresh}')
hand = grid.compute_hand('dir', 'inflated_dem', grid.acc > acc_thresh, inplace=False)
if np.isnan(hand).any():
log.debug('NaNs encountered in HAND; filling.')
hand = fill_nan(hand)
# ensure non-basin is masked after fill_nan
hand[basin_mask] = np.nan
return hand
def calculate_hand_for_basins(out_raster: Union[str, Path], geometries: GeometryCollection,
dem_file: Union[str, Path]):
"""Calculate the Height Above Nearest Drainage (HAND) for watershed boundaries (hydrobasins).
For watershed boundaries, see: https://www.hydrosheds.org/page/hydrobasins
Args:
out_raster: HAND GeoTIFF to create
geometries: watershed boundary (hydrobasin) polygons to calculate HAND over
dem_file: DEM raster covering (containing) `geometries`
"""
with rasterio.open(dem_file) as src:
basin_mask, basin_affine_tf, basin_window = rasterio.mask.raster_geometry_mask(
src, geometries, all_touched=True, crop=True, pad=True, pad_width=1
)
basin_array = src.read(1, window=basin_window)
hand = calculate_hand(basin_array, basin_affine_tf, src.crs, basin_mask)
write_cog(str(out_raster), hand, transform=basin_affine_tf.to_gdal(), epsg_code=src.crs.to_epsg())
def make_copernicus_hand(out_raster: Union[str, Path], vector_file: Union[str, Path]):
"""Copernicus GLO-30 Height Above Nearest Drainage (HAND)
Make a Height Above Nearest Drainage (HAND) GeoTIFF from the Copernicus GLO-30 DEM
covering the watershed boundaries (hydrobasins) defined in a vector file.
For watershed boundaries, see: https://www.hydrosheds.org/page/hydrobasins
Args:
out_raster: HAND GeoTIFF to create
vector_file: Vector file of watershed boundary (hydrobasin) polygons to calculate HAND over
"""
with fiona.open(vector_file) as vds:
geometries = GeometryCollection([shape(feature['geometry']) for feature in vds])
with NamedTemporaryFile(suffix='.vrt', delete=False) as dem_vrt:
prepare_dem_vrt(dem_vrt.name, geometries)
calculate_hand_for_basins(out_raster, geometries, dem_vrt.name)
def main():
parser = argparse.ArgumentParser(
description=__doc__,
epilog='For watershed boundaries, see: https://www.hydrosheds.org/page/hydrobasins',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('out_raster', type=Path,
help='HAND GeoTIFF to create')
parser.add_argument('vector_file', type=Path,
help='Vector file of watershed boundary (hydrobasin) polygons to calculate HAND over. '
'Vector file Must be openable by GDAL, see: https://gdal.org/drivers/vector/index.html')
parser.add_argument('-v', '--verbose', action='store_true', help='Turn on verbose logging')
args = parser.parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(message)s', level=level)
log.debug(' '.join(sys.argv))
log.info(f'Calculating HAND for {args.vector_file}')
make_copernicus_hand(args.out_raster, args.vector_file)
log.info(f'HAND GeoTIFF created successfully: {args.out_raster}')
```
#### File: asf_tools/hand/prepare.py
```python
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Union
from osgeo import gdal, ogr
from rasterio.enums import Resampling
from shapely.geometry import shape
from shapely.geometry.base import BaseGeometry
from asf_tools import vector
from asf_tools.composite import get_epsg_code
from asf_tools.util import GDALConfigManager
HAND_GEOJSON = '/vsicurl/https://asf-hand-data.s3.amazonaws.com/cop30-hand.geojson'
gdal.UseExceptions()
ogr.UseExceptions()
def prepare_hand_vrt(vrt: Union[str, Path], geometry: Union[ogr.Geometry, BaseGeometry]):
"""Prepare a HAND mosaic VRT covering a given geometry
Prepare a Height Above Nearest Drainage (HAND) virtual raster (VRT) covering a given geometry.
The Height Above Nearest Drainage (HAND) mosaic is assembled from the HAND tiles that intersect
the geometry, using a HAND derived from the Copernicus GLO-30 DEM.
Note: `asf_tools` does not currently support geometries that cross the antimeridian.
Args:
vrt: Path for the output VRT file
geometry: Geometry in EPSG:4326 (lon/lat) projection for which to prepare a HAND mosaic
"""
with GDALConfigManager(GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):
if isinstance(geometry, BaseGeometry):
geometry = ogr.CreateGeometryFromWkb(geometry.wkb)
min_lon, max_lon, _, _ = geometry.GetEnvelope()
if min_lon < -160. and max_lon > 160.:
raise ValueError(f'asf_tools does not currently support geometries that cross the antimeridian: {geometry}')
tile_features = vector.get_features(HAND_GEOJSON)
if not vector.get_property_values_for_intersecting_features(geometry, tile_features):
raise ValueError(f'Copernicus GLO-30 HAND does not intersect this geometry: {geometry}')
hand_file_paths = vector.intersecting_feature_properties(geometry, tile_features, 'file_path')
gdal.BuildVRT(str(vrt), hand_file_paths)
def prepare_hand_for_raster(hand_raster: Union[str, Path], source_raster: Union[str, Path],
resampling_method: str = 'lanczos'):
"""Create a HAND raster pixel-aligned to a source raster
Args:
hand_raster: Path for the output HAND raster
source_raster: Path for the source raster
resampling_method: Name of the resampling method to use. For available methods, see:
https://gdal.org/programs/gdalwarp.html#cmdoption-gdalwarp-r
"""
info = gdal.Info(str(source_raster), format='json')
hand_geometry = shape(info['wgs84Extent'])
hand_bounds = [info['cornerCoordinates']['upperLeft'][0],
info['cornerCoordinates']['lowerRight'][1],
info['cornerCoordinates']['lowerRight'][0],
info['cornerCoordinates']['upperLeft'][1]]
with NamedTemporaryFile(suffix='.vrt', delete=False) as hand_vrt:
prepare_hand_vrt(hand_vrt.name, hand_geometry)
gdal.Warp(str(hand_raster), hand_vrt.name, dstSRS=f'EPSG:{get_epsg_code(info)}',
outputBounds=hand_bounds, width=info['size'][0], height=info['size'][1],
resampleAlg=Resampling[resampling_method].value)
```
#### File: asf-tools/tests/conftest.py
```python
from pathlib import Path
import numpy as np
import pytest
def pytest_addoption(parser):
parser.addoption('--integration', action='store_true', default=False, dest='integration',
help='enable integration tests')
def pytest_collection_modifyitems(config, items):
if not config.getoption('--integration'):
integration_skip = pytest.mark.skip(reason='Integration tests not requested; skipping.')
for item in items:
if 'integration' in item.keywords:
item.add_marker(integration_skip)
@pytest.fixture(scope='session')
def raster_tiles():
tiles_file = Path(__file__).parent / 'data' / 'em_tiles.npz'
tile_data = np.load(tiles_file)
tiles = np.ma.MaskedArray(tile_data['tiles'], mask=tile_data['mask'])
return np.log10(tiles) + 30
@pytest.fixture(scope='session')
def thresholds():
thresholds_file = Path(__file__).parent / 'data' / 'em_thresholds.npz'
thresholds_data = np.load(thresholds_file)
return thresholds_data['thresholds']
@pytest.fixture(scope='session')
def hand_candidates():
hand_file = Path(__file__).parent / 'data' / 'hand_candidates.npz'
hand_data = np.load(hand_file)
return hand_data['hand_candidates']
```
#### File: asf-tools/tests/test_composite.py
```python
import os
import numpy as np
import pytest
from osgeo import gdal
from asf_tools import composite
def test_get_epsg_code():
wkt = 'PROJCS["WGS 84 / UTM zone 54N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",141],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32654"]]'
info = {'coordinateSystem': {'wkt': wkt}}
assert composite.get_epsg_code(info) == 32654
wkt = 'PROJCS["WGS 84 / UTM zone 22N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-51],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32622"]]'
info = {'coordinateSystem': {'wkt': wkt}}
assert composite.get_epsg_code(info) == 32622
wkt = 'PROJCS["WGS 84 / UTM zone 33S",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",15],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",10000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32733"]]'
info = {'coordinateSystem': {'wkt': wkt}}
assert composite.get_epsg_code(info) == 32733
wkt = 'PROJCS["NAD83 / Alaska Albers",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Albers_Conic_Equal_Area"],PARAMETER["latitude_of_center",50],PARAMETER["longitude_of_center",-154],PARAMETER["standard_parallel_1",55],PARAMETER["standard_parallel_2",65],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","3338"]]'
info = {'coordinateSystem': {'wkt': wkt}}
assert composite.get_epsg_code(info) == 3338
def test_epsg_to_wkt():
wkt = 'PROJCS["WGS 84 / UTM zone 54N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",141],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32654"]]'
assert composite.epsg_to_wkt(32654) == wkt
wkt = 'PROJCS["WGS 84 / UTM zone 22N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-51],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32622"]]'
assert composite.epsg_to_wkt(32622) == wkt
wkt = 'PROJCS["WGS 84 / UTM zone 33S",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",15],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",10000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32733"]]'
assert composite.epsg_to_wkt(32733) == wkt
wkt = 'PROJCS["NAD83 / Alaska Albers",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Albers_Conic_Equal_Area"],PARAMETER["latitude_of_center",50],PARAMETER["longitude_of_center",-154],PARAMETER["standard_parallel_1",55],PARAMETER["standard_parallel_2",65],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","3338"]]'
assert composite.epsg_to_wkt(3338) == wkt
def test_get_target_epsg_code():
# Northern hemisphere
assert composite.get_target_epsg_code([32601]) == 32601
assert composite.get_target_epsg_code([32601, 32601]) == 32601
# both hemispheres
assert composite.get_target_epsg_code([32601, 32702]) == 32601
assert composite.get_target_epsg_code([32702, 32601]) == 32601
# Southern hemisphere
assert composite.get_target_epsg_code([32760]) == 32760
assert composite.get_target_epsg_code([32730, 32732]) == 32730
# antimeridian
assert composite.get_target_epsg_code([32701, 32760]) == 32701
assert composite.get_target_epsg_code([32701, 32760, 32701]) == 32701
assert composite.get_target_epsg_code([32701, 32760, 32760]) == 32760
assert composite.get_target_epsg_code(
[32731, 32631, 32731, 32631, 32732, 32633, 32733, 32633, 32733]
) == 32732
# bounds
with pytest.raises(ValueError):
composite.get_target_epsg_code([32600])
with pytest.raises(ValueError):
composite.get_target_epsg_code([32661])
with pytest.raises(ValueError):
composite.get_target_epsg_code([32700])
with pytest.raises(ValueError):
composite.get_target_epsg_code([32761])
with pytest.raises(ValueError):
composite.get_target_epsg_code([32601, 99, 32760])
def test_get_area_raster():
raster = 'S1A_IW_20181102T155531_DVP_RTC30_G_gpuned_5685_VV.tif'
assert composite.get_area_raster(raster) == 'S1A_IW_20181102T155531_DVP_RTC30_G_gpuned_5685_area.tif'
raster = './foo/S1B_IW_20181104T030247_DVP_RTC30_G_gpuned_9F91_VH.tif'
assert composite.get_area_raster(raster) == './foo/S1B_IW_20181104T030247_DVP_RTC30_G_gpuned_9F91_area.tif'
raster = '/tmp/bar/S1B_IW_20181102T031956_DVP_RTC30_G_gpuned_1259_HH.tif'
assert composite.get_area_raster(raster) == '/tmp/bar/S1B_IW_20181102T031956_DVP_RTC30_G_gpuned_1259_area.tif'
def test_get_full_extents():
data = {}
data['a'] = {
'cornerCoordinates': {
'upperLeft': [10.0, 130.0],
'lowerRight': [110.0, 30.0],
},
'geoTransform': [10.0, 2.0, 0.0, 40.0, 0.0, -2.0],
}
expected_upper_left = (10.0, 130.0)
expected_lower_right = (110.0, 30.0)
expected_geotransform = [10.0, 2.0, 0.0, 130.0, 0.0, -2.0]
assert composite.get_full_extent(data) == (expected_upper_left, expected_lower_right, expected_geotransform)
data['b'] = {
'cornerCoordinates': {
'upperLeft': [20.0, 140.0],
'lowerRight': [120.0, 40.0],
},
'geoTransform': [20.0, 1.0, 12.0, 140.0, 13.0, -1.0],
}
expected_upper_left = (10.0, 140.0)
expected_lower_right = (120.0, 30.0)
expected_geotransform = [10.0, 2.0, 0.0, 140.0, 0.0, -2.0]
assert composite.get_full_extent(data) == (expected_upper_left, expected_lower_right, expected_geotransform)
def test_write_cog(tmp_path):
outfile = tmp_path / 'out.tif'
data = np.ones((1024, 1024))
transform = [10.0, 0.0, 1.0, 20.0, 0.0, -1.0]
epsg_code = 4326
result = composite.write_cog(str(outfile), data, transform, epsg_code)
assert result == str(outfile)
assert outfile.exists()
info = gdal.Info(result, format='json')
assert info['geoTransform'] == transform
assert info['driverShortName'] == 'GTiff'
assert info['size'] == [1024, 1024]
assert 'overviews' in info['bands'][0]
assert info['metadata']['IMAGE_STRUCTURE']['LAYOUT'] == 'COG'
assert info['metadata']['IMAGE_STRUCTURE']['COMPRESSION'] == 'LZW'
def test_make_composite(tmp_path):
os.chdir(tmp_path)
epsg_code = 32601
transform = [0.0, 30.0, 0.0, 60.0, 0.0, -30.0]
data = np.array([
[1, 1, 1, 1],
[1, 1, 1, 1],
])
area = np.array([
[1, 1, 1, 1],
[1, 1, 1, 1],
])
composite.write_cog('first_data.tif', data, transform, epsg_code, nodata_value=0)
composite.write_cog('first_area.tif', area, transform, epsg_code)
transform = [30.0, 30.0, 0.0, 30.0, 0.0, -30.0]
data = np.array([
[3, 0, 3, 3],
[3, 0, 3, 3],
])
area = np.array([
[1, 1, 3, 1],
[1, 1, 2, 1],
])
composite.write_cog('second_data.tif', data, transform, epsg_code)
composite.write_cog('second_area.tif', area, transform, epsg_code)
out_file, count_file = composite.make_composite('out', ['first_data.tif', 'second_data.tif'])
assert out_file == 'out.tif'
assert count_file == 'out_counts.tif'
assert os.path.exists(out_file)
assert os.path.exists(count_file)
data = np.nan_to_num(composite.read_as_array(out_file))
expected = np.array([
[1, 1, 1, 1, 0],
[1, 2, 1, 1.5, 3],
[0, 3, 0, 3, 3],
])
assert np.allclose(data, expected)
counts = composite.read_as_array(count_file)
expected = np.array([
[1, 1, 1, 1, 0],
[1, 2, 1, 2, 1],
[0, 1, 0, 1, 1],
])
assert np.allclose(counts, expected)
```
#### File: asf-tools/tests/test_dem.py
```python
import json
import pytest
from osgeo import gdal, ogr
from asf_tools import dem
gdal.UseExceptions()
def test_prepare_dem_vrt_no_coverage():
geojson = {
'type': 'Point',
'coordinates': [0, 0],
}
geometry = ogr.CreateGeometryFromJson(json.dumps(geojson))
with pytest.raises(ValueError):
dem.prepare_dem_vrt('foo', geometry)
def test_prepare_dem_vrt(tmp_path):
dem_vrt = tmp_path / 'dem.tif'
geojson = {
'type': 'Polygon',
'coordinates': [[
[0.4, 10.16],
[0.4, 10.86],
[0.6, 10.86],
[0.6, 10.16],
[0.4, 10.16],
]],
}
geometry = ogr.CreateGeometryFromJson(json.dumps(geojson))
dem.prepare_dem_vrt(str(dem_vrt), geometry)
assert dem_vrt.exists()
info = gdal.Info(str(dem_vrt), format='json')
assert info['geoTransform'] == \
[-0.0001388888888889, 0.0002777777777778, 0.0, 11.00013888888889, 0.0, -0.0002777777777778]
assert info['size'] == [3600, 3600]
def test_prepare_dem_geotiff_antimeridian(tmp_path):
dem_vrt = tmp_path / 'dem.vrt'
geojson = {
'type': 'MultiPolygon',
'coordinates': [
[[
[179.5, 51.4],
[179.5, 51.6],
[180.0, 51.6],
[180.0, 51.4],
[179.5, 51.4],
]],
[[
[-180.0, 51.4],
[-180.0, 51.6],
[-179.5, 51.6],
[-179.5, 51.4],
[-180.0, 51.4],
]],
],
}
geometry = ogr.CreateGeometryFromJson(json.dumps(geojson))
with pytest.raises(ValueError):
dem.prepare_dem_vrt(str(dem_vrt), geometry)
``` |
{
"source": "jhkennedy/Discovery-asf_search",
"score": 3
} |
#### File: asf_search/search/product_search.py
```python
from typing import Union, Iterable
import asf_search.search
def product_search(
product_list: Iterable[str],
host: str = asf_search.INTERNAL.HOST,
output: str = 'geojson',
cmr_token: str = None,
cmr_provider: str = None
) -> dict:
"""
Performs a product ID search using the ASF SearchAPI
:param product_list: List of specific products. Results guaranteed to be at most one product per product name.
:param host: SearchAPI host, defaults to Production SearchAPI. This option is intended for dev/test purposes.
:param output: SearchAPI output format, can be used to alter what metadata is returned and the structure of the results.
:param cmr_token: EDL Auth Token for authenticated searches, see https://urs.earthdata.nasa.gov/user_tokens
:param cmr_provider: Custom provider name to constrain CMR results to, for more info on how this is used, see https://cmr.earthdata.nasa.gov/search/site/docs/search/api.html#c-provider
:return: Dictionary of search results
"""
kwargs = locals()
data = dict((k,v) for k,v in kwargs.items() if v is not None and v != '')
return asf_search.search(**data)
``` |
{
"source": "jhkennedy/earthdata",
"score": 3
} |
#### File: earthdata/tests/test_auth.py
```python
import unittest
from unittest import mock
import responses
from earthdata.auth import Auth
class TestCreateAuth(unittest.TestCase):
@mock.patch("builtins.input")
@mock.patch("getpass.getpass")
def test_create_auth_wrong_credentials(self, user_input, user_password) -> bool:
user_input.return_value = "user"
user_password.return_value = "<PASSWORD>"
auth = Auth().login()
self.assertEqual(auth.authenticated, False)
@responses.activate
@mock.patch("builtins.input")
@mock.patch("getpass.getpass")
def test_auth_gets_proper_credentials(self, user_input, user_password) -> bool:
user_input.return_value = "valid-user"
user_password.return_value = "<PASSWORD>"
json_response = [
{"access_token": "ED<PASSWORD>", "expiration_date": "12/15/2021"},
{"access_token": "EDL-token-2", "expiration_date": "12/16/2021"},
]
responses.add(
responses.GET,
"https://urs.earthdata.nasa.gov/api/users/tokens",
json=json_response,
status=200,
)
# Test
auth = Auth().login()
self.assertEqual(auth.authenticated, True)
self.assertTrue(auth.token in json_response)
@responses.activate
@mock.patch("builtins.input")
@mock.patch("getpass.getpass")
def test_auth_can_create_proper_credentials(
self, user_input, user_password
) -> bool:
user_input.return_value = "valid-user"
user_password.return_value = "<PASSWORD>"
json_response = {"access_token": "ED<PASSWORD>", "expiration_date": "12/15/2021"}
responses.add(
responses.GET,
"https://urs.earthdata.nasa.gov/api/users/tokens",
json=[],
status=200,
)
responses.add(
responses.POST,
"https://urs.earthdata.nasa.gov/api/users/token",
json=json_response,
status=200,
)
# Test
auth = Auth().login()
self.assertEqual(auth.authenticated, True)
self.assertEqual(auth.token, json_response)
``` |
{
"source": "jhkennedy/hyp3",
"score": 2
} |
#### File: dynamo/dynamo/subscriptions.py
```python
from datetime import datetime, timedelta, timezone
from os import environ
from uuid import uuid4
import dateutil.parser
from boto3.dynamodb.conditions import Attr, Key
from dynamo.util import DYNAMODB_RESOURCE, format_time
def validate_subscription(subscription):
start = dateutil.parser.parse(subscription['search_parameters']['start'])
end = dateutil.parser.parse(subscription['search_parameters']['end'])
if end <= start:
raise ValueError(f'End date: {format_time(end)} must be after start date: {format_time(start)}')
end_threshold_in_days = 180
max_end = datetime.now(tz=timezone.utc) + timedelta(days=end_threshold_in_days)
if max_end <= end:
raise ValueError(f'End date: {format_time(end)} must be within {end_threshold_in_days} days: '
f'{format_time(max_end)}')
job_type = subscription.get('job_specification', {}).get('job_type')
processing_level = subscription.get('search_parameters', {}).get('processingLevel', 'SLC')
if job_type == 'INSAR_GAMMA' and processing_level != 'SLC':
raise ValueError('processingLevel must be SLC when job_type is INSAR_GAMMA')
def put_subscription(user, subscription, validate_only=False):
validate_subscription(subscription)
defaults = {
'subscription_id': str(uuid4()),
'creation_date': format_time(datetime.now(tz=timezone.utc)),
'user_id': user,
'enabled': True,
}
for key, value in defaults.items():
if key not in subscription:
subscription[key] = value
search_defaults = {
'platform': 'S1',
'processingLevel': 'SLC',
'beamMode': ['IW'],
'polarization': ['VV', 'VV+VH', 'HH', 'HH+HV'],
}
for key, value in search_defaults.items():
if key not in subscription['search_parameters']:
subscription['search_parameters'][key] = value
table = DYNAMODB_RESOURCE.Table(environ['SUBSCRIPTIONS_TABLE_NAME'])
if not validate_only:
table.put_item(Item=subscription)
return subscription
def get_subscriptions_for_user(user, name=None, job_type=None, enabled=None):
table = DYNAMODB_RESOURCE.Table(environ['SUBSCRIPTIONS_TABLE_NAME'])
filter_expression = Attr('subscription_id').exists()
if name is not None:
filter_expression &= Attr('job_specification.name').eq(name)
if job_type is not None:
filter_expression &= Attr('job_specification.job_type').eq(job_type)
if enabled is not None:
filter_expression &= Attr('enabled').eq(enabled)
params = {
'IndexName': 'user_id',
'KeyConditionExpression': Key('user_id').eq(user),
'FilterExpression': filter_expression,
'ScanIndexForward': False
}
response = table.query(**params)
subscriptions = response['Items']
while 'LastEvaluatedKey' in response:
params['ExclusiveStartKey'] = response['LastEvaluatedKey']
response = table.query(**params)
subscriptions.extend(response['Items'])
return subscriptions
def get_subscription_by_id(subscription_id):
table = DYNAMODB_RESOURCE.Table(environ['SUBSCRIPTIONS_TABLE_NAME'])
response = table.get_item(Key={'subscription_id': subscription_id})
return response.get('Item')
def get_all_subscriptions():
table = DYNAMODB_RESOURCE.Table(environ['SUBSCRIPTIONS_TABLE_NAME'])
response = table.scan()
subscriptions = response['Items']
while 'LastEvaluatedKey' in response:
response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
subscriptions.extend(response['Items'])
return subscriptions
```
#### File: tests/test_api/test_get_user.py
```python
from datetime import datetime, timezone
from http import HTTPStatus
from test_api.conftest import USER_URI, login, make_db_record
from dynamo.util import format_time
def test_get_user(client, tables, monkeypatch):
monkeypatch.setenv('MONTHLY_JOB_QUOTA_PER_USER', '25')
request_time = format_time(datetime.now(timezone.utc))
user = 'user_with_jobs'
items = [
make_db_record('job1', user_id=user, request_time=request_time, status_code='PENDING', name='job1'),
make_db_record('job2', user_id=user, request_time=request_time, status_code='RUNNING', name='job1'),
make_db_record('job3', user_id=user, request_time=request_time, status_code='FAILED', name='job2'),
make_db_record('job4', user_id=user, request_time=request_time, status_code='SUCCEEDED', name=None)
]
for item in items:
tables.jobs_table.put_item(Item=item)
login(client, 'user_with_jobs')
response = client.get(USER_URI)
assert response.status_code == HTTPStatus.OK
assert response.json == {
'user_id': 'user_with_jobs',
'quota': {
'max_jobs_per_month': 25,
'remaining': 21,
},
'job_names': [
'job1',
'job2',
],
}
def test_user_at_quota(client, tables, monkeypatch):
monkeypatch.setenv('MONTHLY_JOB_QUOTA_PER_USER', '25')
request_time = format_time(datetime.now(timezone.utc))
items = [make_db_record(f'job{ii}', request_time=request_time) for ii in range(0, 24)]
for item in items:
tables.jobs_table.put_item(Item=item)
login(client)
response = client.get(USER_URI)
assert response.status_code == HTTPStatus.OK
assert response.json['quota']['remaining'] == 1
tables.jobs_table.put_item(Item=make_db_record('anotherJob', request_time=request_time))
response = client.get(USER_URI)
assert response.status_code == HTTPStatus.OK
assert response.json['quota']['remaining'] == 0
tables.jobs_table.put_item(Item=make_db_record('yetAnotherJob', request_time=request_time))
response = client.get(USER_URI)
assert response.status_code == HTTPStatus.OK
assert response.json['quota']['remaining'] == 0
def test_get_user_custom_quota(client, tables):
username = 'user_with_custom_quota'
login(client, username)
tables.users_table.put_item(Item={'user_id': username, 'max_jobs_per_month': 50})
response = client.get(USER_URI)
assert response.status_code == HTTPStatus.OK
assert response.json == {
'user_id': username,
'quota': {
'max_jobs_per_month': 50,
'remaining': 50,
},
'job_names': [],
}
```
#### File: tests/test_api/test_list_jobs.py
```python
from http import HTTPStatus
from unittest import mock
from urllib.parse import unquote
from conftest import list_have_same_elements
from test_api.conftest import JOBS_URI, login, make_db_record
def test_list_jobs(client, tables):
files = [
{
'filename': 'foo.txt',
'size': 123,
'url': 'https://mybucket.s3.us-west-2.amazonaws.com/prefix/foo.txt',
's3': {
'bucket': 'mybucket',
'key': 'prefix/foo.txt'
},
},
{
'filename': 'bar.png',
'size': 0,
'url': 'https://mybucket.s3.us-west-2.amazonaws.com/prefix/bar.png',
's3': {
'bucket': 'mybucket',
'key': 'prefix/bar.png'
},
},
]
browse_images = ['https://mybucket.s3.us-west-2.amazonaws.com/prefix/browse/foo.png']
thumbnail_images = []
items = [
make_db_record('0ddaeb98-7636-494d-9496-03ea4a7df266', user_id='user_with_jobs'),
make_db_record(job_id='27836b79-e5b2-4d8f-932f-659724ea02c3', user_id='user_with_jobs', files=files,
browse_images=browse_images, thumbnail_images=thumbnail_images)
]
for item in items:
tables.jobs_table.put_item(Item=item)
login(client, 'user_with_jobs')
response = client.get(JOBS_URI)
assert response.status_code == HTTPStatus.OK
assert 'jobs' in response.json
assert list_have_same_elements(response.json['jobs'], items)
login(client, 'user_without_jobs')
response = client.get(JOBS_URI)
assert response.status_code == HTTPStatus.OK
assert response.json == {'jobs': []}
def test_list_jobs_by_name(client, tables):
items = [
make_db_record('0ddaeb98-7636-494d-9496-03ea4a7df266', name='item1'),
make_db_record('27836b79-e5b2-4d8f-932f-659724ea02c3', name='item2')
]
for item in items:
tables.jobs_table.put_item(Item=item)
login(client)
response = client.get(JOBS_URI, query_string={'name': 'item1'})
assert response.status_code == HTTPStatus.OK
assert response.json == {'jobs': [items[0]]}
response = client.get(JOBS_URI, query_string={'name': 'item does not exist'})
assert response.status_code == HTTPStatus.OK
assert response.json == {'jobs': []}
def test_list_jobs_by_type(client, tables):
items = [
make_db_record('0ddaeb98-7636-494d-9496-03ea4a7df266', job_type='RTC_GAMMA'),
make_db_record('874f7533-807d-4b20-afe1-27b5b6fc9d6c', job_type='RTC_GAMMA'),
make_db_record('27836b79-e5b2-4d8f-932f-659724ea02c3', job_type='INSAR_GAMMA'),
]
for item in items:
tables.jobs_table.put_item(Item=item)
login(client)
response = client.get(JOBS_URI, query_string={'job_type': 'RTC_GAMMA'})
assert response.status_code == HTTPStatus.OK
assert list_have_same_elements(response.json['jobs'], items[:2])
response = client.get(JOBS_URI, query_string={'job_type': 'INSAR_GAMMA'})
assert response.status_code == HTTPStatus.OK
assert response.json == {'jobs': [items[2]]}
response = client.get(JOBS_URI, query_string={'job_type': 'FOOBAR'})
assert response.status_code == HTTPStatus.BAD_REQUEST
def test_list_jobs_by_status(client, tables):
items = [
make_db_record('0ddaeb98-7636-494d-9496-03ea4a7df266', status_code='RUNNING'),
make_db_record('27836b79-e5b2-4d8f-932f-659724ea02c3', status_code='SUCCEEDED')
]
for item in items:
tables.jobs_table.put_item(Item=item)
login(client)
response = client.get(JOBS_URI, query_string={'status_code': 'RUNNING'})
assert response.status_code == HTTPStatus.OK
assert response.json == {'jobs': [items[0]]}
response = client.get(JOBS_URI, query_string={'status_code': 'FAILED'})
assert response.status_code == HTTPStatus.OK
assert response.json == {'jobs': []}
def test_list_jobs_bad_status(client):
login(client)
response = client.get(JOBS_URI, query_string={'status_code': 'BAD'})
assert response.status_code == HTTPStatus.BAD_REQUEST
response = client.get(JOBS_URI, query_string={'status_code': ''})
assert response.status_code == HTTPStatus.BAD_REQUEST
def test_list_jobs_date_start_and_end(client, tables):
items = [
make_db_record('874f7533-807d-4b20-afe1-27b5b6fc9d6c', request_time='2019-12-31T10:00:09+00:00'),
make_db_record('0ddaeb98-7636-494d-9496-03ea4a7df266', request_time='2019-12-31T10:00:10+00:00'),
make_db_record('27836b79-e5b2-4d8f-932f-659724ea02c3', request_time='2019-12-31T10:00:11+00:00'),
]
for item in items:
tables.jobs_table.put_item(Item=item)
dates = [
'2019-12-31T10:00:10Z',
'2019-12-31T10:00:10.000Z',
'2019-12-31T10:00:10.999999Z',
'2019-12-31T11:00:10+01:00',
'2019-12-31T09:00:10-01:00',
'2020-01-01T09:00:10+23:00',
'2019-12-30T11:00:10-23:00',
'2019-12-31T11:30:10+01:30',
]
login(client)
for date in dates:
response = client.get(JOBS_URI, query_string={'start': date})
assert response.status_code == HTTPStatus.OK
assert list_have_same_elements(response.json['jobs'], items[1:])
response = client.get(JOBS_URI, query_string={'end': date})
assert response.status_code == HTTPStatus.OK
assert list_have_same_elements(response.json['jobs'], items[:2])
response = client.get(JOBS_URI, query_string={'start': date, 'end': date})
assert response.status_code == HTTPStatus.OK
assert response.json == {'jobs': [items[1]]}
def test_list_jobs_subscription_id(client, tables):
items = [
make_db_record('874f7533-807d-4b20-afe1-27b5b6fc9d6c', subscription_id='9b02d992-e21e-4e2f-9310-5dd469be2708'),
make_db_record('0ddaeb98-7636-494d-9496-03ea4a7df266', subscription_id='9b02d992-e21e-4e2f-9310-5dd469be2708'),
make_db_record('27836b79-e5b2-4d8f-932f-659724ea02c3', subscription_id='9b02d992-e21e-4e2f-9310-5dd469be2700'),
make_db_record('4277c126-6927-4859-b62f-eb3d2a8815c2'),
]
for item in items:
tables.jobs_table.put_item(Item=item)
login(client)
response = client.get(JOBS_URI, query_string={'subscription_id': '9b02d992-e21e-4e2f-9310-5dd469be2708'})
assert response.status_code == HTTPStatus.OK
assert list_have_same_elements(response.json['jobs'], items[:2])
response = client.get(JOBS_URI, query_string={'subscription_id': '9b02d992-e21e-4e2f-9310-5dd469be2700'})
assert response.status_code == HTTPStatus.OK
assert response.json['jobs'] == [items[2]]
response = client.get(JOBS_URI, query_string={'subscription_id': '55c6981e-c33a-4086-b20b-661ee6f592a9'})
assert response.status_code == HTTPStatus.OK
assert response.json['jobs'] == []
response = client.get(JOBS_URI)
assert response.status_code == HTTPStatus.OK
assert list_have_same_elements(response.json['jobs'], items)
response = client.get(JOBS_URI, query_string={'subscription_id': 'not a uuid'})
assert response.status_code == HTTPStatus.BAD_REQUEST
def test_bad_date_formats(client):
datetime_parameters = ['start', 'end']
bad_dates = [
'',
'foo',
'2020-13-01T00:00:00Z',
'01-JAN-2020',
'01/01/2020',
'2020-01-01'
'2020-01-01T00:00Z',
'2020-01-01T00:00:00',
'2020-01-01T00:00:00+01',
'2020-01-01T00:00:00+0100',
'2020-01-01T00:00:00-24:00',
]
login(client)
for datetime_parameter in datetime_parameters:
for bad_date in bad_dates:
response = client.get(JOBS_URI, query_string={datetime_parameter: bad_date})
assert response.status_code == HTTPStatus.BAD_REQUEST
def test_list_paging(client):
login(client)
mock_response = ([], {'foo': 1, 'bar': 2})
with mock.patch('dynamo.jobs.query_jobs', return_value=mock_response):
response = client.get(JOBS_URI)
assert unquote(response.json['next']) == 'http://localhost/jobs?start_token=<KEY>
```
#### File: tests/test_api/test_validation.py
```python
from pytest import raises
from shapely.geometry import Polygon
from hyp3_api import validation
def rectangle(north, south, east, west):
return Polygon([[west, north], [east, north], [east, south], [west, south]])
def test_has_sufficient_coverage():
# Wyoming
poly = rectangle(45, 41, -104, -111)
assert validation.has_sufficient_coverage(poly)
assert validation.has_sufficient_coverage(poly, legacy=True)
# completely covered Aleutian Islands over antimeridian; should pass with fixed antimeridian
poly = rectangle(51.7, 51.3, 179.7, -179.3)
assert validation.has_sufficient_coverage(poly)
assert validation.has_sufficient_coverage(poly, legacy=True)
# not enough coverage of Aleutian Islands over antimeridian
# NOTE: Passes today but should FAIL legacy with antimeridian feature fix
poly = rectangle(51.7, 41.3, 179.7, -179.3)
assert validation.has_sufficient_coverage(poly)
assert validation.has_sufficient_coverage(poly, legacy=True)
# completely encloses tile over Ascension Island in the Atlantic
poly = rectangle(-6, -9, -15, -14)
assert validation.has_sufficient_coverage(poly)
assert validation.has_sufficient_coverage(poly, legacy=True)
# # minimum sufficient coverage off the coast of Eureka, CA
poly = rectangle(40.1, 40, -126, -125.000138)
assert validation.has_sufficient_coverage(poly)
assert not validation.has_sufficient_coverage(poly, legacy=True)
# almost minimum sufficient coverage off the coast of Eureka, CA
poly = rectangle(40.1, 40, -126, -125.000139)
assert not validation.has_sufficient_coverage(poly)
assert not validation.has_sufficient_coverage(poly, legacy=True)
# minimum sufficient legacy coverage off the coast of Eureka, CA
poly = rectangle(40.1, 40, -126, -124.845)
assert validation.has_sufficient_coverage(poly)
assert validation.has_sufficient_coverage(poly, legacy=True)
# almost minimum sufficient legacy coverage off the coast of Eureka, CA
poly = rectangle(40.1, 40, -126, -124.849)
assert validation.has_sufficient_coverage(poly)
assert not validation.has_sufficient_coverage(poly, legacy=True)
# polygon in missing tile over Gulf of California
poly = rectangle(26.9, 26.1, -110.1, -110.9)
assert not validation.has_sufficient_coverage(poly)
assert not validation.has_sufficient_coverage(poly, legacy=True)
# southern Greenland
poly = rectangle(62, 61, -44, -45)
assert validation.has_sufficient_coverage(poly)
assert not validation.has_sufficient_coverage(poly, legacy=True)
# Antarctica
poly = rectangle(-62, -90, 180, -180)
assert validation.has_sufficient_coverage(poly)
assert not validation.has_sufficient_coverage(poly, legacy=True)
# ocean over antimeridian; no dem coverage and also not enough legacy wraparound land intersection
# should FAIL with legacy with antimeridian feature fix
poly = rectangle(-40, -41, 179.7, -179.3)
assert validation.has_sufficient_coverage(poly)
assert not validation.has_sufficient_coverage(poly, legacy=True)
def test_has_sufficient_coverage_legacy_buffer():
needs_buffer = rectangle(40.1, 40, -126, -124.845)
assert validation.has_sufficient_coverage(needs_buffer, legacy=True)
assert validation.has_sufficient_coverage(needs_buffer, buffer=0.16, legacy=True)
assert not validation.has_sufficient_coverage(needs_buffer, buffer=0.14, legacy=True)
def test_has_sufficient_coverage_legacy_threshold():
poly = rectangle(40.1, 40, -126, -124.845)
assert validation.has_sufficient_coverage(poly, legacy=True)
assert validation.has_sufficient_coverage(poly, threshold=0.19, legacy=True)
assert not validation.has_sufficient_coverage(poly, threshold=0.21, legacy=True)
def test_format_points():
point_string = '-31.43 25.04 -29.76 25.54 -29.56 24.66 -31.23 24.15 -31.43 25.04'
assert validation.format_points(point_string) == [
[25.04, -31.43],
[25.54, -29.76],
[24.66, -29.56],
[24.15, -31.23],
[25.04, -31.43]
]
def test_check_dem_coverage():
both = {'name': 'both', 'polygon': rectangle(45, 41, -104, -111)}
copernicus_only = {'name': 'copernicus_only', 'polygon': rectangle(-62, -90, 180, -180)}
neither = {'name': 'neither', 'polygon': rectangle(-20, -30, 70, 100)}
job = {'job_type': 'RTC_GAMMA', 'job_parameters': {'dem_name': 'copernicus'}}
validation.check_dem_coverage(job, [])
validation.check_dem_coverage(job, [both])
validation.check_dem_coverage(job, [copernicus_only])
with raises(validation.GranuleValidationError) as e:
validation.check_dem_coverage(job, [neither])
assert 'neither' in str(e)
with raises(validation.GranuleValidationError) as e:
validation.check_dem_coverage(job, [copernicus_only, neither])
assert 'neither' in str(e)
assert 'copernicus_only' not in str(e)
job = {'job_type': 'RTC_GAMMA', 'job_parameters': {'dem_name': 'legacy'}}
validation.check_dem_coverage(job, [both])
with raises(validation.GranuleValidationError):
validation.check_dem_coverage(job, [copernicus_only])
with raises(validation.GranuleValidationError):
validation.check_dem_coverage(job, [neither])
job = {'job_type': 'RTC_GAMMA', 'job_parameters': {}}
validation.check_dem_coverage(job, [both])
validation.check_dem_coverage(job, [copernicus_only])
with raises(validation.GranuleValidationError):
validation.check_dem_coverage(job, [neither])
job = {'job_type': 'INSAR_GAMMA', 'job_parameters': {}}
validation.check_dem_coverage(job, [both])
validation.check_dem_coverage(job, [copernicus_only])
with raises(validation.GranuleValidationError):
validation.check_dem_coverage(job, [neither])
def test_check_granules_exist():
granule_metadata = [
{
'name': 'scene1',
},
{
'name': 'scene2',
},
]
validation.check_granules_exist([], granule_metadata)
validation.check_granules_exist(['scene1'], granule_metadata)
validation.check_granules_exist(['scene1', 'scene2'], granule_metadata)
with raises(validation.GranuleValidationError) as e:
validation.check_granules_exist(['scene1', 'scene2', 'scene3', 'scene4', 'S2_foo', 'LC08_bar'],
granule_metadata)
assert 'S2_foo' not in str(e)
assert 'LC08_bar' not in str(e)
assert 'scene1' not in str(e)
assert 'scene2' not in str(e)
assert 'scene3' in str(e)
assert 'scene4' in str(e)
def test_is_third_party_granule():
assert validation.is_third_party_granule('S2A_MSIL1C_20200627T150921_N0209_R025_T22WEB_20200627T170912')
assert validation.is_third_party_granule('S2B_22WEB_20200612_0_L1C')
assert validation.is_third_party_granule('LC08_L1TP_009011_20200820_20200905_02_T1')
assert validation.is_third_party_granule('LO08_L1GT_043001_20201106_20201110_02_T2')
assert validation.is_third_party_granule('LT08_L1GT_041001_20200125_20200925_02_T2')
assert not validation.is_third_party_granule('S1A_IW_SLC__1SSH_20150608T205059_20150608T205126_006287_0083E8_C4F0')
assert not validation.is_third_party_granule('foo')
def test_get_cmr_metadata():
granules = ['S1A_IW_SLC__1SSV_20150621T120220_20150621T120232_006471_008934_72D8', 'not a real granule']
assert validation.get_cmr_metadata(granules) == [
{
'name': 'S1A_IW_SLC__1SSV_20150621T120220_20150621T120232_006471_008934_72D8',
'polygon': Polygon([
[-91.927132, 13.705972],
[-91.773392, 14.452647],
[-94.065727, 14.888498],
[-94.211563, 14.143632],
[-91.927132, 13.705972],
]),
},
]
def test_validate_jobs():
unknown_granule = 'unknown'
granule_with_dem_coverage = 'S1A_IW_SLC__1SSV_20150621T120220_20150621T120232_006471_008934_72D8'
granule_without_legacy_dem_coverage = 'S1A_IW_SLC__1SSH_20190326T081759_20190326T081831_026506_02F822_52F9'
granule_without_dem_coverage = 'S1A_IW_GRDH_1SDV_20201219T222530_20201219T222555_035761_042F72_8378'
jobs = [
{
'job_type': 'RTC_GAMMA',
'job_parameters': {
'granules': [granule_without_legacy_dem_coverage],
}
},
{
'job_type': 'RTC_GAMMA',
'job_parameters': {
'granules': [granule_without_legacy_dem_coverage],
'dem_name': 'copernicus',
}
},
{
'job_type': 'RTC_GAMMA',
'job_parameters': {
'granules': [granule_with_dem_coverage],
'dem_name': 'legacy',
}
},
{
'job_type': 'INSAR_GAMMA',
'job_parameters': {
'granules': [granule_with_dem_coverage, granule_without_legacy_dem_coverage],
}
},
{
'job_type': 'AUTORIFT',
'job_parameters': {
'granules': [granule_with_dem_coverage, granule_without_dem_coverage],
}
},
]
validation.validate_jobs(jobs)
jobs = [
{
'job_type': 'RTC_GAMMA',
'job_parameters': {
'granules': [unknown_granule],
}
}
]
with raises(validation.GranuleValidationError):
validation.validate_jobs(jobs)
jobs = [
{
'job_type': 'RTC_GAMMA',
'job_parameters': {
'granules': [granule_without_dem_coverage],
}
}
]
with raises(validation.GranuleValidationError):
validation.validate_jobs(jobs)
```
#### File: tests/test_dynamo/test_subscriptions.py
```python
from datetime import datetime, timedelta, timezone
import pytest
import dynamo
def test_put_subscription(tables):
subscription = {
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-01-02T00:00:00+00:00',
}
}
response = dynamo.subscriptions.put_subscription('user1', subscription)
assert [response] == tables.subscriptions_table.scan()['Items']
assert 'subscription_id' in response
assert isinstance(response['subscription_id'], str)
del response['subscription_id']
assert 'creation_date' in response
assert isinstance(response['creation_date'], str)
del response['creation_date']
assert response == {
'user_id': 'user1',
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-01-02T00:00:00+00:00',
'beamMode': ['IW'],
'platform': 'S1',
'polarization': ['VV', 'VV+VH', 'HH', 'HH+HV'],
'processingLevel': 'SLC',
},
'enabled': True,
}
def test_validate_subscription():
subscription = {
'search_parameters': {
'start': '2021-01-01T00:00:00+00:00',
}
}
good_end_dates = [
'2021-01-01T00:00:00-00:01',
'2021-01-01T00:01:00+00:00',
dynamo.util.format_time(datetime.now(tz=timezone.utc) + timedelta(days=180)),
]
bad_end_dates = [
'2021-01-01T00:00:00+00:00',
'2021-01-01T00:00:00+00:01',
dynamo.util.format_time(datetime.now(tz=timezone.utc) + timedelta(days=180, seconds=1)),
]
for bad_end_date in bad_end_dates:
subscription['search_parameters']['end'] = bad_end_date
with pytest.raises(ValueError):
dynamo.subscriptions.validate_subscription(subscription)
for good_end_date in good_end_dates:
subscription['search_parameters']['end'] = good_end_date
dynamo.subscriptions.validate_subscription(subscription)
for good_end_date in good_end_dates:
subscription['search_parameters']['end'] = good_end_date
dynamo.subscriptions.validate_subscription(subscription)
subscription = {
'job_specification': {
'job_type': 'INSAR_GAMMA',
'name': 'foo',
},
'search_parameters': {
'start': '2021-01-01T00:00:00+00:00',
'end': '2021-01-02T00:00:00+00:00',
},
}
dynamo.subscriptions.validate_subscription(subscription)
subscription['search_parameters']['processingLevel'] = 'SLC'
dynamo.subscriptions.validate_subscription(subscription)
subscription['search_parameters']['processingLevel'] = 'GRD_HD'
with pytest.raises(ValueError):
dynamo.subscriptions.validate_subscription(subscription)
def test_get_subscriptions_for_user(tables):
table_items = [
{
'subscription_id': 'sub1',
'creation_date': '2020-01-04T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub2',
'creation_date': '2020-01-03T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub3',
'creation_date': '2020-01-02T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub4',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user2'
},
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_subscriptions_for_user('user1')
assert response == table_items[:3]
response = dynamo.subscriptions.get_subscriptions_for_user('user2')
assert response == [table_items[3]]
def test_get_subscription_by_id(tables):
assert dynamo.subscriptions.get_subscription_by_id('sub1') is None
table_items = [
{
'subscription_id': 'sub1',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub2',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user2'
},
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
assert dynamo.subscriptions.get_subscription_by_id('sub1') == table_items[0]
assert dynamo.subscriptions.get_subscription_by_id('sub2') == table_items[1]
def test_get_all_subscriptions(tables):
table_items = [
{
'subscription_id': 'sub1',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub2',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub3',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub4',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user2'
},
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_all_subscriptions()
assert response == table_items
def test_put_subscription_update(tables):
subscription = {
'user_id': 'user1',
'subscription_id': 'sub1',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-01-02T00:00:00+00:00',
'beamMode': ['IW'],
'platform': 'S1',
'polarization': ['VV', 'VV+VH', 'HH', 'HH+HV'],
'processingLevel': 'SLC',
}
}
tables.subscriptions_table.put_item(Item=subscription)
updated_subscription = {
'creation_date': '2020-01-01T00:00:00+00:00',
'user_id': 'user1',
'subscription_id': 'sub1',
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-06-02T00:00:00+00:00',
'beamMode': ['IW'],
'platform': 'S1',
'polarization': ['VV', 'VV+VH', 'HH', 'HH+HV'],
'processingLevel': 'SLC',
}
}
dynamo.subscriptions.put_subscription('user1', updated_subscription)
response = tables.subscriptions_table.scan()
assert response['Items'] == [updated_subscription]
def test_put_subscription_validate_only(tables):
bad_subscription = {
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-01-01T00:00:00+00:00',
}
}
with pytest.raises(ValueError):
dynamo.subscriptions.put_subscription('user1', bad_subscription, validate_only=True)
with pytest.raises(ValueError):
dynamo.subscriptions.put_subscription('user1', bad_subscription, validate_only=False)
good_subscription = {
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-01-02T00:00:00+00:00',
}
}
dynamo.subscriptions.put_subscription('user1', good_subscription, validate_only=True)
assert tables.subscriptions_table.scan()['Items'] == []
dynamo.subscriptions.put_subscription('user1', good_subscription, validate_only=False)
assert tables.subscriptions_table.scan()['Items'] == [good_subscription]
def test_query_subscriptions_by_name(tables):
table_items = [
{
'job_specification': {'name': 'name1'},
'creation_date': '2020-01-04T00:00:00+00:00',
'subscription_id': 'sub1',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'job_specification': {'name': 'name1'},
'creation_date': '2020-01-03T00:00:00+00:00',
'subscription_id': 'sub2',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'job_specification': {'name': 'name2'},
'creation_date': '2020-01-02T00:00:00+00:00',
'subscription_id': 'sub3',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'job_specification': {'name': 'name1'},
'creation_date': '2020-01-01T00:00:00+00:00',
'subscription_id': 'sub4',
'job_type': 'INSAR_GAMMA',
'user_id': 'user2'
},
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_subscriptions_for_user('user1', name='name1')
assert response == table_items[:2]
def test_query_by_active_status(tables):
table_items = [
{
'enabled': True,
'subscription_id': 'sub1',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-04T00:00:00+00:00',
'user_id': 'user1'
},
{
'enabled': True,
'subscription_id': 'sub2',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-03T00:00:00+00:00',
'user_id': 'user1'
},
{
'enabled': False,
'subscription_id': 'sub3',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-02T00:00:00+00:00',
'user_id': 'user1'
}
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_subscriptions_for_user('user1', enabled=True)
assert response == table_items[:2]
response = dynamo.subscriptions.get_subscriptions_for_user('user1', enabled=False)
assert response == [table_items[-1]]
def test_query_subscriptions_by_job_type(tables):
table_items = [
{
'job_specification': {'job_type': 'RTC_GAMMA'},
'subscription_id': 'sub1',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-04T00:00:00+00:00',
'user_id': 'user1'
},
{
'job_specification': {'job_type': 'RTC_GAMMA'},
'subscription_id': 'sub2',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-03T00:00:00+00:00',
'user_id': 'user1'
},
{
'job_specification': {'job_type': 'INSAR_GAMMA'},
'subscription_id': 'sub3',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-02T00:00:00+00:00',
'user_id': 'user1'
},
{
'job_specification': {'job_type': 'AUTORIFT'},
'subscription_id': 'sub4',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-01T00:00:00+00:00',
'user_id': 'user2'
},
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_subscriptions_for_user('user1', job_type='RTC_GAMMA')
assert response == table_items[:2]
response = dynamo.subscriptions.get_subscriptions_for_user('user1', job_type='INSAR_GAMMA')
assert response == [table_items[2]]
def test_query_subscriptions_sort_order(tables):
table_items = [
{
'subscription_id': 'sub1',
'creation_date': '2020-01-03T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub2',
'creation_date': '2020-01-02T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub3',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
]
for item in [table_items[1], table_items[2], table_items[0]]:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_subscriptions_for_user('user1')
assert response == table_items
``` |
{
"source": "jhkennedy/itslive",
"score": 3
} |
#### File: itslive/src/grid.py
```python
import math
import numpy as np
class Bounds:
"""
Class to store min/max pair for a variable.
"""
def __init__(self, values=None, min_value=None, max_value=None):
"""
Initialize object based on the list of values or provided min and max
values.
"""
if values is not None:
# If list if provided, use it to determine min and max values
self.min = min(values)
self.max = max(values)
else:
self.min = min_value
self.max = max_value
def __str__(self):
"""
String representation of the object.
"""
return f"min={self.min} max={self.max}"
def extend_to_grid(self, resolution):
"""
Round down minimums and up maximums to the nearest resolution in m.
Returns new Bounds object.
"""
return Bounds(
min_value = math.floor(self.min/resolution)*resolution,
max_value = math.ceil(self.max/resolution)*resolution
)
class Grid:
"""
Grid specific helper functions.
"""
# Supported grid sizes
_SUPPORTED_SIZES = [60, 120, 240, 480, 960, 1920, 3840]
# Original band 8 Landsat pixel size.
# This ensures that the offset grid aligns with the pixel edges used in the
# Landsat gridding convention
L8B8_pix = 15
@staticmethod
def bounding_box(x: Bounds, y: Bounds, grid_spacing: int) -> (Bounds, Bounds):
"""
Define bounding box for provided coordinates.
"""
# Check if requested grid size is allowable
if grid_spacing not in Grid._SUPPORTED_SIZES:
raise RuntimeError(f'Grid spacing should be one of {Grid._SUPPORTED_SIZES} to keep grids of different spacing aligned')
if x.min >= x.max:
raise RuntimeError(f'x.min ({x.min}) must be < x.max ({x.max})')
if y.min >= y.max:
raise RuntimeError(f'y.min ({y.min}) must be < y.max ({y.max})')
# Determine grid edges
x0_min = np.ceil(x.min/grid_spacing)*grid_spacing - Grid.L8B8_pix/2
x0_max = np.ceil(x.max/grid_spacing)*grid_spacing - Grid.L8B8_pix/2
y0_min = np.floor(y.min/grid_spacing)*grid_spacing + Grid.L8B8_pix/2
y0_max = np.floor(y.max/grid_spacing)*grid_spacing + Grid.L8B8_pix/2
# print("bounding_box: x_in: ", x)
# print("bounding_box: y_in: ", y)
#
return Bounds(min_value=x0_min, max_value=x0_max), \
Bounds(min_value=y0_min, max_value=y0_max)
@staticmethod
def create(x: Bounds, y: Bounds, grid_spacing):
"""
Create new grid given the spacing and bounding box for the region.
"""
# Calculate grid bounds
x0, y0 = Grid.bounding_box(x, y, grid_spacing)
# print(f"Grid.create: bounding box: x: {x0} y: {y0}" )
# Generate vectors of grid centers
# Cell center offset
cell_center_offset = grid_spacing/2
x_vals = np.arange(x0.min + cell_center_offset, x0.max, grid_spacing)
y_vals = np.arange(y0.max - cell_center_offset, y0.min, -grid_spacing)
return x_vals, y_vals
```
#### File: src/tools/make_geojson_features_for_imagepairs_v1p1.py
```python
import argparse
from datetime import datetime
import geojson
import h5py
import json
import numpy as np
import os
import psutil
import pyproj
import s3fs
import sys
import time
from tqdm import tqdm
# Date format as it appears in granules filenames of optical format:
# LC08_L1TP_011002_20150821_20170405_01_T1_X_LC08_L1TP_011002_20150720_20170406_01_T1_G0240V01_P038.nc
DATE_FORMAT = "%Y%m%d"
# Date and time format as it appears in granules filenames or radar format:
# S1A_IW_SLC__1SSH_20170221T204710_20170221T204737_015387_0193F6_AB07_X_S1B_IW_SLC__1SSH_20170227T204628_20170227T204655_004491_007D11_6654_G0240V02_P094.nc
DATE_TIME_FORMAT = "%Y%m%dT%H%M%S"
class memtracker:
def __init__(self, include_time=True):
self.output_time = include_time
if include_time:
self.start_time = time.time()
self.process = psutil.Process()
self.startrss = self.process.memory_info().rss
self.startvms = self.process.memory_info().vms
def meminfo(self, message):
if self.output_time:
time_elapsed_seconds = time.time() - self.start_time
print(f'{message:<30}: time: {time_elapsed_seconds:8.2f} seconds mem_percent {self.process.memory_percent()} ' +
f'delrss={self.process.memory_info().rss - self.startrss:16,} ' +
f'delvms={self.process.memory_info().vms - self.startvms:16,}',
flush=True)
else: # don't output time
print(f'{message:<30}: delrss={self.process.memory_info().rss - self.startrss:16,} mem_percent {self.process.memory_percent()} ' +
f'delvms={self.process.memory_info().vms - self.startvms:16,}',
flush=True)
mt = memtracker()
s3 = s3fs.S3FileSystem(anon=True)
s3_out = s3fs.S3FileSystem()
# returns a string (N78W124) for directory name based on granule centerpoint lat,lon
# !!!! Not implemented in geojson code yet !!! <- remove this line when it is.
def finddirstring(lat,lon):
if lat >= 0.0:
NShemi_str = 'N'
else:
NShemi_str = 'S'
if lon >= 0.0:
EWhemi_str = 'E'
else:
EWhemi_str = 'W'
outlat = int(10*np.trunc(np.abs(lat/10.0)))
if outlat == 90: # if you are exactly at a pole, put in lat = 80 bin
outlat = 80
outlon = int(10*np.trunc(np.abs(lon/10.0)))
if outlon >= 180: # if you are at the dateline, back off to the 170 bin
outlon = 170
dirstring = f'{NShemi_str}{outlat:02d}{EWhemi_str}{outlon:03d}'
return(dirstring)
def image_pair_feature_from_path(infilewithpath, five_points_per_side = False):
# from s3.ls:
# infilewithpath = 'https://s3/its-live-data.jpl.nasa.gov/velocity_image_pair/landsat/v00.0/32609/LC08_L1TP_050024_20180713_20180730_01_T1_X_LE07_L1TP_050024_20180315_20180316_01_RT_G0240V01_P072.nc'
# base URL from S3 directory listing has file path for s3fs access, not what you need for http directly,
# so that is hard coded here. (or not used - don't need it in every feature)
# base_URL = 'http://its-live-data.jpl.nasa.gov.s3.amazonaws.com/velocity_image_pair/landsat/v00.0'
filename_tokens = infilewithpath.split('/')
directory = '/'.join(filename_tokens[1:-1])
filename = filename_tokens[-1]
with s3.open(f"s3://{infilewithpath}", "rb") as ins3:
inh5 = h5py.File(ins3, mode = 'r')
# inh5 = h5py.File(s3.open(f"s3://{infilewithpath}", "rb"), mode = 'r')
# inh5 = h5py.File(ingeoimg.in_dir_path + '/' + ingeoimg.filename,mode='r')
# netCDF4/HDF5 cf 1.6 has x and y vectors of array pixel CENTERS
xvals = np.array(inh5.get('x'))
yvals = np.array(inh5.get('y'))
# Extract projection variable
projection_cf = None
if 'mapping' in inh5:
projection_cf = inh5['mapping']
elif 'UTM_Projection' in inh5:
projection_cf = inh5['UTM_Projection']
elif 'Polar_Stereographic' in inh5:
projection_cf = inh5['Polar_Stereographic']
imginfo_attrs = inh5['img_pair_info'].attrs
# turn hdf5 img_pair_info attrs into a python dict to save below
img_pair_info_dict = {}
for k in imginfo_attrs.keys():
if isinstance(imginfo_attrs[k], str):
img_pair_info_dict[k] = imginfo_attrs[k]
elif imginfo_attrs[k].shape == ():
img_pair_info_dict[k] = imginfo_attrs[k].decode('utf-8') # h5py returns byte values, turn into byte characters
else:
img_pair_info_dict[k] = imginfo_attrs[k][0] # h5py returns lists of numbers - all 1 element lists here, so dereference to number
num_pix_x = len(xvals)
num_pix_y = len(yvals)
minval_x, pix_size_x, rot_x_ignored, maxval_y, rot_y_ignored, pix_size_y = [float(x) for x in projection_cf.attrs['GeoTransform'].split()]
epsgcode = int(projection_cf.attrs['spatial_epsg'][0])
inh5.close()
# NOTE: these are pixel center values, need to modify by half the grid size to get bounding box/geotransform values
projection_cf_minx = xvals[0] - pix_size_x/2.0
projection_cf_maxx = xvals[-1] + pix_size_x/2.0
projection_cf_miny = yvals[-1] + pix_size_y/2.0 # pix_size_y is negative!
projection_cf_maxy = yvals[0] - pix_size_y/2.0 # pix_size_y is negative!
transformer = pyproj.Transformer.from_crs(f"EPSG:{epsgcode}", "EPSG:4326", always_xy=True) # ensure lonlat output order
ll_lonlat = np.round(transformer.transform(projection_cf_minx,projection_cf_miny),decimals = 7).tolist()
lr_lonlat = np.round(transformer.transform(projection_cf_maxx,projection_cf_miny),decimals = 7).tolist()
ur_lonlat = np.round(transformer.transform(projection_cf_maxx,projection_cf_maxy),decimals = 7).tolist()
ul_lonlat = np.round(transformer.transform(projection_cf_minx,projection_cf_maxy),decimals = 7).tolist()
# find center lon lat for inclusion in feature (to determine lon lat grid cell directory)
# projection_cf_centerx = (xvals[0] + xvals[-1])/2.0
# projection_cf_centery = (yvals[0] + yvals[-1])/2.0
center_lonlat = np.round(transformer.transform((xvals[0] + xvals[-1])/2.0,(yvals[0] + yvals[-1])/2.0 ),decimals = 7).tolist()
if five_points_per_side:
fracs = [0.25, 0.5, 0.75]
polylist = [] # ring in counterclockwise order
polylist.append(ll_lonlat)
dx = projection_cf_maxx - projection_cf_minx
dy = projection_cf_miny - projection_cf_miny
for frac in fracs:
polylist.append(np.round(transformer.transform(projection_cf_minx + (frac * dx), projection_cf_miny + (frac * dy)),decimals = 7).tolist())
polylist.append(lr_lonlat)
dx = projection_cf_maxx - projection_cf_maxx
dy = projection_cf_maxy - projection_cf_miny
for frac in fracs:
polylist.append(np.round(transformer.transform(projection_cf_maxx + (frac * dx), projection_cf_miny + (frac * dy)),decimals = 7).tolist())
polylist.append(ur_lonlat)
dx = projection_cf_minx - projection_cf_maxx
dy = projection_cf_maxy - projection_cf_maxy
for frac in fracs:
polylist.append(np.round(transformer.transform(projection_cf_maxx + (frac * dx), projection_cf_maxy + (frac * dy)),decimals = 7).tolist())
polylist.append(ul_lonlat)
dx = projection_cf_minx - projection_cf_minx
dy = projection_cf_miny - projection_cf_maxy
for frac in fracs:
polylist.append(np.round(transformer.transform(projection_cf_minx + (frac * dx), projection_cf_maxy + (frac * dy)),decimals = 7).tolist())
polylist.append(ll_lonlat)
else:
# only the corner points
polylist = [ ll_lonlat, lr_lonlat, ur_lonlat, ul_lonlat, ll_lonlat ]
poly = geojson.Polygon([polylist])
middate = img_pair_info_dict['date_center']
deldays = img_pair_info_dict['date_dt']
percent_valid_pix = img_pair_info_dict['roi_valid_percentage']
feat = geojson.Feature( geometry=poly,
properties={
'filename': filename,
'directory': directory,
'middate':middate,
'deldays':deldays,
'percent_valid_pix': percent_valid_pix,
'center_lonlat':center_lonlat,
'data_epsg':epsgcode,
# date_deldays_strrep is a string version of center date and time interval that will sort by date and then by interval length (shorter intervals first) - relies on "string" comparisons by byte
'date_deldays_strrep': img_pair_info_dict['date_center'] + f"{img_pair_info_dict['date_dt']:07.1f}".replace('.',''),
'img_pair_info_dict': img_pair_info_dict,
}
)
return(feat)
def get_tokens_from_filename(filename):
"""
Extract acquisition/processing dates and path/row for two images from the
optical granule filename, or start/end date/time and product unique ID for
radar granule filename.
"""
# Optical format granules have different file naming convention than radar
# format granules
is_optical = True
url_files = os.path.basename(filename).split('_X_')
# Get tokens for the first image name
url_tokens = url_files[0].split('_')
if len(url_tokens) < 9:
# Optical format granule
# Get acquisition/processing dates and path&row for both images
first_date_1 = datetime.strptime(url_tokens[3], DATE_FORMAT)
second_date_1 = datetime.strptime(url_tokens[4], DATE_FORMAT)
key_1 = url_tokens[2]
url_tokens = url_files[1].split('_')
first_date_2 = datetime.strptime(url_tokens[3], DATE_FORMAT)
second_date_2 = datetime.strptime(url_tokens[4], DATE_FORMAT)
key_2 = url_tokens[2]
else:
# Radar format granule
# Get start/end date/time and product unique ID for both images
is_optical = False
url_tokens = url_files[0].split('_')
# Start date and time
first_date_1 = datetime.strptime(url_tokens[-5], DATE_TIME_FORMAT)
# Stop date and time
second_date_1 = datetime.strptime(url_tokens[-4], DATE_TIME_FORMAT)
# Product unique identifier
key_1 = url_tokens[-1]
# Get tokens for the second image name: there are two extra tokens
# at the end of the filename which are specific to ITS_LIVE filename
url_tokens = url_files[1].split('_')
# Start date and time
first_date_2 = datetime.strptime(url_tokens[-7], DATE_TIME_FORMAT)
# Stop date and time
second_date_2 = datetime.strptime(url_tokens[-6], DATE_TIME_FORMAT)
# Product unique identifier
key_2 = url_tokens[-3]
return is_optical, first_date_1, second_date_1, key_1, first_date_2, second_date_2, key_2
def skip_duplicate_granules(found_urls: list, skipped_granules_filename: str):
"""
Skip duplicate granules (the ones that have earlier processing date(s)).
"""
# Need to remove duplicate granules for the middle date: some granules
# have newer processing date, keep those.
keep_urls = {}
skipped_double_granules = []
for each_url in tqdm(found_urls, ascii=True, desc='Skipping duplicate granules...'):
# Extract acquisition and processing dates for optical granule,
# start/end date/time and product unique ID for radar granule
is_optical, url_acq_1, url_proc_1, key_1, url_acq_2, url_proc_2, key_2 = \
get_tokens_from_filename(each_url)
if is_optical:
# Acquisition time and path/row of images should be identical for
# duplicate granules
granule_id = '_'.join([
url_acq_1.strftime(DATE_FORMAT),
key_1,
url_acq_2.strftime(DATE_FORMAT),
key_2
])
else:
# Start/stop date/time of both images
granule_id = '_'.join([
url_acq_1.strftime(DATE_TIME_FORMAT),
url_proc_1.strftime(DATE_TIME_FORMAT),
url_acq_2.strftime(DATE_TIME_FORMAT),
url_proc_2.strftime(DATE_TIME_FORMAT),
])
# There is a granule for the mid_date already:
# * For radar granule: issue a warning reporting product unique ID for duplicate granules
# * For optical granule: check which processing time is newer,
# keep the one with newer processing date
if granule_id in keep_urls:
if not is_optical:
# Radar format granule, just issue a warning
all_urls = ' '.join(keep_urls[granule_id])
print(f"WARNING: multiple granules are detected for {each_url}: {all_urls}")
keep_urls[granule_id].append(each_url)
continue
# Process optical granule
# Flag if newly found URL should be kept
keep_found_url = False
for found_url in keep_urls[granule_id]:
# Check already found URLs for processing time
_, _, found_proc_1, _, _, found_proc_2, _ = \
get_tokens_from_filename(found_url)
# If both granules have identical processing time,
# keep them both - granules might be in different projections,
# any other than target projection will be handled later
if url_proc_1 == found_proc_1 and \
url_proc_2 == found_proc_2:
keep_urls[granule_id].append(each_url)
keep_found_url = True
break
# There are no "identical" (same acquision and processing times)
# granules to "each_url", check if new granule has newer processing dates
if not keep_found_url:
# Check if any of the found URLs have older processing time
# than newly found URL
remove_urls = []
for found_url in keep_urls[granule_id]:
# Check already found URL for processing time
_, _, found_proc_1, _, _, found_proc_2, _ = \
get_tokens_from_filename(found_url)
if url_proc_1 >= found_proc_1 and \
url_proc_2 >= found_proc_2:
# The granule will need to be replaced with a newer
# processed one
remove_urls.append(found_url)
elif url_proc_1 > found_proc_1:
# There are few cases when proc_1 is newer in
# each_url and proc_2 is newer in found_url, then
# keep the granule with newer proc_1
remove_urls.append(found_url)
if len(remove_urls):
# Some of the URLs need to be removed due to newer
# processed granule
print(f"Skipping {remove_urls} in favor of new {each_url}")
skipped_double_granules.extend(remove_urls)
# Remove older processed granules based on dates for "each_url"
keep_urls[granule_id][:] = [each for each in keep_urls[granule_id] if each not in remove_urls]
# Add new granule with newer processing date
keep_urls[granule_id].append(each_url)
else:
# New granule has older processing date, don't include
print(f"Skipping new {each_url} in favor of {keep_urls[granule_id]}")
skipped_double_granules.append(each_url)
else:
# This is a granule for new ID, append it to URLs to keep
keep_urls.setdefault(granule_id, []).append(each_url)
granules = []
for each in keep_urls.values():
granules.extend(each)
print(f"Keeping {len(granules)} unique granules")
with s3_out.open(skipped_granules_filename, 'w') as outf:
geojson.dump(skipped_double_granules, outf)
# with open(skipped_granules_filename, 'w') as out_fhandle:
# for each_granule in skipped_double_granules:
# out_fhandle.write(each_granule+os.linesep)
#
print(f"Wrote skipped granules to '{skipped_granules_filename}'")
return granules
parser = argparse.ArgumentParser( \
description="""make_geojson_features_for_imagepairs_v1.py
produces output geojson FeatureCollection for each nn image_pairs from a directory.
v1 adds 5 points per side to geom (so 3 interior and the two corners from v0)
and the ability to stop the chunks (in addition to the start allowed in v0)
so that the code can be run on a range of chunks.
""",
epilog="""
There are two steps to create geojson catalogs:
1. Create a list of granules to be used for catalog generation. The file that stores
URLs of such granules is placed in the destination S3 bucket.
2. Create geojson catalogs using a list of granules as generated by step #1. The list of
granules is read from the file stored in the destination S3 bucket.""",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-base_dir_s3fs',
action='store',
type=str,
default='its-live-data.jpl.nasa.gov/velocity_image_pair/landsat/v02',
help='S3 path to tile catalog directories (not including the grid code for zone of tile) [%(default)s]')
parser.add_argument('-S3_output_directory',
action='store',
type=str,
default='its-live-data.jpl.nasa.gov/catalog_geojson/landsat/v02',
help='output path for featurecollections [%(default)s]')
parser.add_argument('-chunk_by',
action='store',
type=int,
default=20000,
help='chunk feature collections to have chunk_by features each [%(default)d]')
parser.add_argument('-start_chunks_at_file',
action='store',
type=int,
default=0,
help='start run at chunk that begins at file n [%(default)d]')
parser.add_argument('-stop_chunks_at_file',
action='store',
type=int,
default=0,
help='stop run just before chunk that begins at file n [%(default)d]')
parser.add_argument('-skipped_granules_file',
action='store',
type=str,
default='its-live-data.jpl.nasa.gov/catalog_geojson/landsat/v02/skipped_granules_landsat.json',
help='S3 filename to keep track of skipped duplicate granules [%(default)s]')
parser.add_argument('-catalog_granules_file',
action='store',
type=str,
default='its-live-data.jpl.nasa.gov/catalog_geojson/landsat/v02/used_granules_landsat.json',
help='S3 filename to keep track of granules used for the geojson catalog [%(default)s]')
parser.add_argument('-c', '--create_catalog_list',
action='store_true',
help='build a list of granules for catalog generation [%(default)s], otherwise read the list of granules from catalog_granules_file')
parser.add_argument('-glob',
action='store',
type=str,
default='*/*.nc',
help='glob pattern for the granule search under "base_dir_s3fs" [%(default)s]')
args = parser.parse_args()
inzonesdir = args.base_dir_s3fs
if not args.create_catalog_list:
# read in infiles from S3 file
with s3.open(args.catalog_granules_file, 'r') as ins3file:
infiles = json.load(ins3file)
print(f"Loaded granule list from '{args.catalog_granules_file}'")
else:
# use a glob to list directory
infilelist = s3.glob(f'{inzonesdir}/{args.glob}')
# check for '_P' in filename - filters out temp.nc files that can be left by bad transfers
# also skips txt file placeholders for 000 Pct (all invalid) pairs
infiles = [x for x in infilelist if '_P' in x and 'txt' not in x]
# Skip duplicate granules (the same middle date, but different processing date)
infiles = skip_duplicate_granules(infiles, args.skipped_granules_file)
# Write all unique granules to the file (TODO: for future use to be split
# b/w multiple processes)
with s3_out.open(args.catalog_granules_file, 'w') as outf:
geojson.dump(infiles, outf)
print(f"Wrote catalog granules to '{args.catalog_granules_file}'")
sys.exit(0)
totalnumfiles = len(infiles)
mt.meminfo(f'working on {totalnumfiles} total files from {inzonesdir}')
# set up tuples of start,stop indicies in file list for chunk processing
numout_featuresets = np.round(totalnumfiles/args.chunk_by).astype('int')
if numout_featuresets == 0:
if totalnumfiles == 0:
print(f'No files found for {inzonesdir}, exiting...')
sys.exit(0)
else:
chunks_startstop = [(0, totalnumfiles-1)]
else:
if numout_featuresets==1:
chunks_startstop = [(0, totalnumfiles-1)]
else:
chunks_startstop = [((i)*args.chunk_by,((i+1) * args.chunk_by)-1) for i in range(numout_featuresets - 1)]
chunks_startstop.append(((numout_featuresets - 1) * args.chunk_by, totalnumfiles-1))
# find start, and if specified, stop chunks in this list of tuples
if args.start_chunks_at_file != 0:
new_chunks_startstop = [(x,y) for x,y in chunks_startstop if x >= args.start_chunks_at_file]
if new_chunks_startstop[0][0] == args.start_chunks_at_file:
chunks_startstop = new_chunks_startstop
else:
print(f'-start_chunks_at_file {args.start_chunks_at_file} not in {chunks_startstop}, quitting...')
sys.exit(0)
if args.stop_chunks_at_file != 0:
new_chunks_startstop = [(x,y) for x,y in chunks_startstop if x < args.stop_chunks_at_file]
if new_chunks_startstop[-1][0] + args.chunk_by == args.stop_chunks_at_file:
chunks_startstop = new_chunks_startstop
else:
print(f'-stop_chunks_at_file {args.stop_chunks_at_file} not in {chunks_startstop}, quitting...')
sys.exit(0)
# Use sub-directory name of input path as base for output filename
base_dir = os.path.basename(inzonesdir)
for num,(start,stop) in enumerate(chunks_startstop):
print(f'working on chunk {start},{stop}', flush = True)
featurelist = []
count = start
for infilewithpath in infiles[start:stop+1]:
count += 1
if count%100 == 0:
if count%1000 == 0:
mt.meminfo(f'{count:6d}/{stop:6d}')
else:
print(f'{count:6d}/{stop:6d}', end = '\r', flush = True)
feature = image_pair_feature_from_path(infilewithpath, five_points_per_side = True)
featurelist.append(feature)
featureColl = geojson.FeatureCollection(featurelist)
outfilename = f'imgpr_{base_dir}_{start:06d}_{stop:06d}.json'
with s3_out.open(f'{args.S3_output_directory}/{outfilename}','w') as outf:
geojson.dump(featureColl,outf)
mt.meminfo(f'wrote {args.S3_output_directory}/{outfilename}')
featurelist = None
featureColl = None
``` |
{
"source": "jhkennedy/LIVVkit",
"score": 3
} |
#### File: LIVVkit/docs/style.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import pymodule
import pymodule2
import livvmodule
class ClassName(object):
"""
Class descriptions
"""
def __init__(self):
""" Constructor """
self.var = "value"
self.auto = "nalue"
self.bagger = "salue"
self.autocrummify = "dalue"
@functionAnnotation
def foo(self, bar, baz):
"""
A description of foo.
Args:
bar: What is this.
baz: What is this.
Returns:
a combo of bar and baz
"""
# Some extra logic
return bar + baz
``` |
{
"source": "jhkennedy/processflow",
"score": 2
} |
#### File: processflow/jobs/e3smdiags.py
```python
import os
import logging
from bs4 import BeautifulSoup
from jobs.diag import Diag
from lib.util import render, print_line
from lib.jobstatus import JobStatus
class E3SMDiags(Diag):
def __init__(self, *args, **kwargs):
super(E3SMDiags, self).__init__(*args, **kwargs)
self._job_type = 'e3sm_diags'
self._requires = 'climo'
self._data_required = ['climo_regrid']
self._host_path = ''
self._host_url = ''
self._short_comp_name = ''
custom_args = kwargs['config']['diags']['e3sm_diags'].get(
'custom_args')
if custom_args:
self.set_custom_args(custom_args)
if self.comparison == 'obs':
self._short_comp_name = 'obs'
else:
self._short_comp_name = kwargs['config']['simulations'][self.comparison]['short_name']
# -----------------------------------------------
def _dep_filter(self, job):
"""
find the climo job we're waiting for, assuming there's only
one climo job in this case with the same start and end years
"""
if job.job_type != self._requires:
return False
if job.start_year != self.start_year:
return False
if job.end_year != self.end_year:
return False
return True
# -----------------------------------------------
def setup_dependencies(self, jobs, *args, **kwargs):
"""
Adds climo jobs from this or the comparison case to the list of dependent jobs
Parameters
----------
jobs (list): a list of the rest of the run managers jobs
optional: comparison_jobs (list): if this job is being compared to
another case, the climos for that other case have to be done already too
"""
if self.comparison != 'obs':
other_jobs = kwargs['comparison_jobs']
try:
self_climo, = filter(lambda job: self._dep_filter(job), jobs)
except ValueError:
msg = 'Unable to find climo for {}, is this case set to generate climos?'.format(
self.msg_prefix())
raise Exception(msg)
try:
comparison_climo, = filter(
lambda job: self._dep_filter(job), other_jobs)
except ValueError:
msg = 'Unable to find climo for {}, is that case set to generates climos?'.format(
self.comparison)
raise Exception(msg)
self.depends_on.extend((self_climo.id, comparison_climo.id))
else:
try:
self_climo, = filter(lambda job: self._dep_filter(job), jobs)
except ValueError:
msg = 'Unable to find climo for {}, is this case set to generate climos?'.format(
self.msg_prefix())
raise Exception(msg)
self.depends_on.append(self_climo.id)
# -----------------------------------------------
def execute(self, config, event_list, slurm_args=None, dryrun=False):
"""
Generates and submits a run script for e3sm_diags
Parameters
----------
config (dict): the globus processflow config object
dryrun (bool): a flag to denote that all the data should be set,
and the scripts generated, but not actually submitted
"""
self._dryrun = dryrun
# setup the jobs output path, creating it if it doesnt already exist
self._output_path = os.path.join(
config['global']['project_path'],
'output', 'diags', self.short_name, 'e3sm_diags',
'{start:04d}_{end:04d}_vs_{comp}'.format(
start=self.start_year,
end=self.end_year,
comp=self._short_comp_name))
if not os.path.exists(self._output_path):
os.makedirs(self._output_path)
# render the parameter file from the template
param_template_out = os.path.join(
config['global']['run_scripts_path'],
'e3sm_diags_{start:04d}_{end:04d}_{case}_vs_{comp}_params.py'.format(
start=self.start_year,
end=self.end_year,
case=self.short_name,
comp=self._short_comp_name))
variables = dict()
input_path, _ = os.path.split(self._input_file_paths[0])
variables['short_test_name'] = self.short_name
variables['test_data_path'] = input_path
variables['test_name'] = self.case
variables['backend'] = config['diags']['e3sm_diags']['backend']
variables['results_dir'] = self._output_path
if self.comparison == 'obs':
template_input_path = os.path.join(
config['global']['resource_path'],
'e3sm_diags_template_vs_obs.py')
variables['reference_data_path'] = config['diags']['e3sm_diags']['reference_data_path']
else:
template_input_path = os.path.join(
config['global']['resource_path'],
'e3sm_diags_template_vs_model.py')
input_path, _ = os.path.split(self._input_file_paths[0])
variables['reference_data_path'] = input_path
variables['ref_name'] = self.comparison
variables['reference_name'] = config['simulations'][self.comparison]['short_name']
render(
variables=variables,
input_path=template_input_path,
output_path=param_template_out)
cmd = ['acme_diags_driver.py', '-p', param_template_out]
self._has_been_executed = True
return self._submit_cmd_to_manager(config, cmd)
# -----------------------------------------------
def postvalidate(self, config, *args, **kwargs):
"""
Check that all the links created by the diagnostic are correct
Parameters
----------
config (dict): the global config object
Returns
-------
True if all links are found
False otherwise
"""
return self._check_links(config)
# -----------------------------------------------
def handle_completion(self, event_list, config, *args):
"""
Perform setup for webhosting
Parameters
----------
event_list (EventList): an event list to push user notifications into
config (dict): the global config object
"""
if self.status != JobStatus.COMPLETED:
msg = '{prefix}: Job failed'.format(
prefix=self.msg_prefix())
print_line(msg, event_list)
logging.info(msg)
else:
msg = '{prefix}: Job complete'.format(
prefix=self.msg_prefix())
print_line(msg, event_list)
logging.info(msg)
# if hosting is turned off, simply return
if not config['global']['host']:
return
# else setup the web hosting
hostname = config['img_hosting']['img_host_server']
self.host_path = os.path.join(
config['img_hosting']['host_directory'],
self.short_name,
'e3sm_diags',
'{start:04d}_{end:04d}_vs_{comp}'.format(
start=self.start_year,
end=self.end_year,
comp=self._short_comp_name))
self.setup_hosting(config, self._output_path,
self.host_path, event_list)
self._host_url = 'https://{server}/{prefix}/{case}/e3sm_diags/{start:04d}_{end:04d}_vs_{comp}/viewer/index.html'.format(
server=config['img_hosting']['img_host_server'],
prefix=config['img_hosting']['url_prefix'],
case=self.short_name,
start=self.start_year,
end=self.end_year,
comp=self._short_comp_name)
# -----------------------------------------------
def _check_links(self, config):
self._output_path = os.path.join(
config['global']['project_path'],
'output', 'diags', self.short_name, 'e3sm_diags',
'{start:04d}_{end:04d}_vs_{comp}'.format(
start=self.start_year,
end=self.end_year,
comp=self._short_comp_name))
viewer_path = os.path.join(self._output_path, 'viewer', 'index.html')
if not os.path.exists(viewer_path):
msg = '{}: could not find page index at {}'.format(
self.msg_prefix(), viewer_path)
logging.error(msg)
return False
viewer_head = os.path.join(self._output_path, 'viewer')
if not os.path.exists(viewer_head):
msg = '{}: could not find output viewer at {}'.format(
self.msg_prefix(), viewer_head)
logging.error(msg)
return False
missing_links = list()
with open(viewer_path, 'r') as viewer_pointer:
viewer_page = BeautifulSoup(viewer_pointer, 'lxml')
viewer_links = viewer_page.findAll('a')
for link in viewer_links:
link_path = os.path.join(viewer_head, link.attrs['href'])
if not os.path.exists(link_path):
missing_links.append(link_path)
continue
if link_path[-4:] == 'html':
link_tail, _ = os.path.split(link_path)
with open(link_path, 'r') as link_pointer:
link_page = BeautifulSoup(link_pointer, 'lxml')
link_links = link_page.findAll('a')
for sublink in link_links:
try:
sublink_preview = sublink.attrs['data-preview']
except:
continue
else:
sublink_path = os.path.join(
link_tail, sublink_preview)
if not os.path.exists(sublink_path):
missing_links.append(sublink_path)
if missing_links:
msg = '{prefix}: missing the following links'.format(
prefix=self.msg_prefix())
logging.error(msg)
logging.error(missing_links)
return False
else:
msg = '{prefix}: all links found'.format(
prefix=self.msg_prefix())
logging.info(msg)
return True
# -----------------------------------------------
```
#### File: processflow/jobs/regrid.py
```python
import json
import os
import re
import logging
from jobs.job import Job
from lib.jobstatus import JobStatus
from lib.util import print_line, get_data_output_files
from lib.filemanager import FileStatus
class Regrid(Job):
"""
Perform regridding with no climatology or timeseries generation on atm, lnd, and orn data
"""
def __init__(self, *args, **kwargs):
"""
Initialize a regrid job
Parameters:
data_type (str): what type of data to run on (atm, lnd)
"""
super(Regrid, self).__init__(*args, **kwargs)
self._job_type = 'regrid'
self._data_required = [self._run_type]
custom_args = kwargs['config']['post-processing']['regrid'].get(
'custom_args')
if custom_args:
self.set_custom_args(custom_args)
# -----------------------------------------------
def setup_dependencies(self, *args, **kwargs):
"""
Regrid doesnt require any other jobs
"""
return True
# -----------------------------------------------
def execute(self, config, event_list, dryrun=False):
"""
Generates and submits a run script for ncremap to regrid model output
Parameters
----------
config (dict): the globus processflow config object
event_list (EventList): an event list to push user notifications into
dryrun (bool): a flag to denote that all the data should be set,
and the scripts generated, but not actually submitted
"""
self._dryrun = dryrun
# setup output paths
self._output_path = os.path.join(
config['global']['project_path'], 'output', 'pp',
config['post-processing']['regrid'][self.run_type]['destination_grid_name'],
self._short_name, self.job_type, self.run_type)
if not os.path.exists(self._output_path):
os.makedirs(self._output_path)
input_path, _ = os.path.split(self._input_file_paths[0])
# setups the ncremap run command
cmd = ['ncks --version\n',
'ncremap --version\n',
'ncremap -I {}'.format(input_path)]
if self.run_type == 'lnd':
cmd.extend([
'-P', 'sgs',
'-a', 'conserve',
'-s', config['post-processing']['regrid']['lnd']['source_grid_path'],
'-g', config['post-processing']['regrid']['lnd']['destination_grid_path']
])
elif self.run_type == 'ocn':
cmd.extend([
'-P', 'mpas',
'-m', config['post-processing']['regrid'][self.run_type]['regrid_map_path']
])
elif self.run_type == 'atm':
cmd.extend([
'-m', config['post-processing']['regrid'][self.run_type]['regrid_map_path']
])
else:
msg = 'Unsupported regrid type'
logging.error(msg)
self.status = FAILED
return 0
# input_path, _ = os.path.split(self._input_file_paths[0])
# clean up the input directory to make sure there's only nc files
for item in os.listdir(input_path):
if not item[-3:] == '.nc':
os.remove(os.path.join(input_path, item))
cmd.extend([
'-O', self._output_path,
])
self._has_been_executed = True
return self._submit_cmd_to_manager(config, cmd)
# -----------------------------------------------
def postvalidate(self, config, *args, **kwargs):
self._output_path = os.path.join(
config['global']['project_path'],
'output',
'pp',
config['post-processing']['regrid'][self.run_type]['destination_grid_name'],
self._short_name,
self.job_type,
self.run_type)
if not self._output_path or not os.path.exists(self._output_path):
return False
contents = os.listdir(self._output_path)
contents.sort()
for year in range(self.start_year, self.end_year + 1):
for month in range(1, 13):
pattern = r'%04d-%02d' % (year, month)
found = False
for item in contents:
if re.search(pattern, item):
found = True
break
if not found:
if not self._has_been_executed:
msg = '{prefix}: Unable to find regridded output file for {yr}-{mon}'.format(
prefix=self.msg_prefix(),
yr=year,
mon=month)
logging.error(msg)
return False
return True
# -----------------------------------------------
def handle_completion(self, filemanager, event_list, config):
if self.status != JobStatus.COMPLETED:
msg = '{prefix}: Job failed, not running completion handler'.format(
prefix=self.msg_prefix())
print_line(msg, event_list)
logging.info(msg)
return
else:
msg = '{prefix}: Job complete'.format(
prefix=self.msg_prefix())
print_line(msg, event_list)
logging.info(msg)
new_files = list()
regrid_files = get_data_output_files(self._output_path, self.case, self.start_year, self.end_year)
for regrid_file in regrid_files:
new_files.append({
'name': regrid_file,
'local_path': os.path.join(self._output_path, regrid_file),
'case': self.case,
'year': self.start_year,
'local_status': FileStatus.PRESENT.value
})
filemanager.add_files(
data_type='regrid',
file_list=new_files)
if not config['data_types'].get('regrid'):
config['data_types']['regrid'] = {'monthly': True}
# -----------------------------------------------
@property
def run_type(self):
return self._run_type
# -----------------------------------------------
@property
def data_type(self):
return self._data_type
# -----------------------------------------------
```
#### File: processflow/tests/test_e3sm.py
```python
import unittest
import os, sys
import inspect
from configobj import ConfigObj
from threading import Event, Lock
if sys.path[0] != '.':
sys.path.insert(0, os.path.abspath('.'))
from lib.events import EventList
from lib.jobstatus import JobStatus
from lib.util import print_message
from lib.filemanager import FileManager
from lib.runmanager import RunManager
from lib.initialize import initialize
from jobs.e3smdiags import E3SMDiags
from jobs.diag import Diag
class TestE3SM(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestE3SM, self).__init__(*args, **kwargs)
self.event_list = EventList()
self.config_path = 'tests/test_configs/e3sm_diags_complete.cfg'
self.config = ConfigObj(self.config_path)
def test_e3sm_diags_skip_complete(self):
"""
Checks that the e3sm_diags job successfully marks a job thats already
been run as complete and wont get executed
"""
print '\n'; print_message('---- Starting Test: {} ----'.format(inspect.stack()[0][3]), 'ok')
e3sm_diags = E3SMDiags(
short_name='piControl_testing',
case='20180129.DECKv1b_piControl.ne30_oEC.edison',
start=1,
end=2,
comparison='obs',
config=self.config)
self.assertTrue(isinstance(e3sm_diags, Diag))
self.assertTrue(
e3sm_diags.postvalidate(
self.config,
self.event_list))
def test_e3sm_diags_prevalidate(self):
"""
test that the e3sm_diags prevalidate and prerun setup works correctly
"""
print '\n'; print_message('---- Starting Test: {} ----'.format(inspect.stack()[0][3]), 'ok')
_args = ['-c', self.config_path, '-r', 'resources/']
config, filemanager, runmanager = initialize(
argv=_args,
version="2.0.0",
branch="testing",
event_list=self.event_list,
kill_event=Event(),
testing=True)
self.assertFalse(config is None)
self.assertFalse(filemanager is None)
self.assertFalse(runmanager is None)
config['global']['dryrun'] = True
runmanager.check_data_ready()
runmanager.start_ready_jobs()
for case in runmanager.cases:
for job in case['jobs']:
if job.job_type == 'e3sm_diags':
job.setup_data(
config=config,
filemanager=filemanager,
case='20180129.DECKv1b_piControl.ne30_oEC.edison')
job.execute(
config=config,
dryrun=True)
self.assertEquals(
job.status,
JobStatus.COMPLETED)
if __name__ == '__main__':
unittest.main()
```
#### File: processflow/tests/test_finalize.py
```python
import os
import sys
import unittest
import threading
import inspect
from configobj import ConfigObj
if sys.path[0] != '.':
sys.path.insert(0, os.path.abspath('.'))
from lib.initialize import initialize
from lib.finalize import finalize
from lib.events import EventList
from lib.util import print_message
class TestFinalize(unittest.TestCase):
"""
A test class for processflows finilization methods
These tests should be run from the main project directory
"""
def __init__(self, *args, **kwargs):
super(TestFinalize, self).__init__(*args, **kwargs)
self.event_list = EventList()
self.config_path = 'tests/test_configs/e3sm_diags_complete.cfg'
self.config = ConfigObj(self.config_path)
self.event_list = EventList()
def test_finilize_complete(self):
print '\n'; print_message('---- Starting Test: {} ----'.format(inspect.stack()[0][3]), 'ok')
pargv = ['-c', 'tests/test_configs/test_amwg_complete.cfg']
config, filemanager, runmanager = initialize(
argv=pargv,
version="2.0.0",
branch="master",
event_list=EventList(),
kill_event=threading.Event(),
testing=True)
finalize(
config=config,
event_list=self.event_list,
status=1,
runmanager=runmanager)
if __name__ == '__main__':
unittest.main()
```
#### File: processflow/tests/test_util.py
```python
import os, sys
import shutil
import unittest
import threading
import inspect
if sys.path[0] != '.':
sys.path.insert(0, os.path.abspath('.'))
from lib.util import transfer_directory
from lib.util import path_exists
from lib.util import cmd_exists
from lib.util import render
from lib.util import print_message
from lib.events import EventList
class TestFileManager(unittest.TestCase):
def test_path_exists_invalid(self):
print '\n'; print_message('---- Starting Test: {} ----'.format(inspect.stack()[0][3]), 'ok')
project_path = os.path.abspath(os.path.join('..', 'testproject'))
config = {
'global': {
'project_path': project_path,
'source_path': '/global/homes/r/renata/ACME_simulations/20170926.FCT2.A_WCYCL1850S.ne30_oECv3.anvil',
'simulation_start_year': 51,
'simulation_end_year': 60,
'set_frequency': [5, 10],
'experiment': '20170926.FCT2.A_WCYCL1850S.ne30_oECv3.anvil',
'email': '<EMAIL>',
'short_term_archive': 0,
'img_host_server': 'https://acme-viewer.llnl.gov',
'host_directory': '/var/www/acme/acme-diags/',
'file_types': ['atm', 'ice', 'ocn', 'rest', 'streams.ocean', 'streams.cice'],
'resource_dir': '',
'input_path': os.path.join(project_path, 'input'),
'output_path': os.path.join(project_path, 'output'),
'log_path': os.path.join(project_path, 'output', 'workflow.log'),
'run_scripts_path': os.path.join(project_path, 'output', 'run_scripts'),
'tmp_path': os.path.join(project_path, 'output', 'tmp'),
'error_path': os.path.join(project_path, 'output', 'workflow.error'),
'ui': True,
'no_host': False,
'no_monitor': False,
'print_file_list': True,
'set_jobs': {
'ncclimo': ['5', '10'],
'timeseries': '10',
'amwg': ['5', '10'],
'aprime_diags': '',
'e3sm_diags': '10'}},
'e3sm_diags': {
'host_directory': 'e3sm-diags',
'backend': 'mpl',
# 'seasons': ['DJF', 'MAM', 'JJA', 'SON', 'ANN'],
'reference_data_path': '/p/cscratch/acme/data/obs_for_acme_diags',
'sets': ['3', '4', '5', '7', '13']},
'transfer': {
'destination_endpoint': 'a871c6de-2acd-11e7-bc7c-22000b9a448b',
'source_endpoint': '9d6d994a-6d04-11e5-ba46-22000b92c6ec'},
'amwg': {
'diag_home': '/p/cscratch/acme/amwg/amwg_diag',
'host_directory': 'amwg'},
'ncclimo': {
'regrid_map_path': '/NOT/A/FILE.nc',
'var_list': ['FSNTOA', 'FLUT', 'FSNT', 'FLNT', 'FSNS', 'FLNS', 'SHFLX', 'QFLX', 'PRECC', 'PRECL', 'PRECSC', 'PRECSL', 'TS', 'TREFHT']},
'aprime_diags': {
'host_directory': 'aprime-diags',
'aprime_code_path': '/p/cscratch/acme/data/a-prime',
'test_atm_res': 'ne30',
'test_mpas_mesh_name': 'oEC60to30v3'}}
status = path_exists(config)
self.assertFalse(status)
def test_cmd_exists_valid(self):
print '\n'; print_message('---- Starting Test: {} ----'.format(inspect.stack()[0][3]), 'ok')
self.assertTrue(cmd_exists('ncclimo'))
def test_cmd_exists_invalid(self):
print '\n'; print_message('---- Starting Test: {} ----'.format(inspect.stack()[0][3]), 'ok')
self.assertFalse(cmd_exists('not_a_cmd'))
def test_render(self):
print '\n'; print_message('---- Starting Test: {} ----'.format(inspect.stack()[0][3]), 'ok')
render_target = os.path.join(
os.getcwd(), 'tests', 'test_render_target.txt')
render_reference = os.path.join(
os.getcwd(), 'tests', 'test_render_reference.txt')
render_output = os.path.join(os.getcwd(), 'tests', 'render_output.txt')
reference = ''
with open(render_reference, 'r') as fp:
for line in fp.readlines():
reference += line
vals = {
'a': 'a',
'b': 'b',
'd': 'd',
'e': 'e'
}
self.assertTrue(render(vals, render_target, render_output))
with open(render_output, 'r') as fp:
self.assertTrue(fp.readline() in reference)
def test_render_bad_input_file(self):
print '\n'; print_message('---- Starting Test: {} ----'.format(inspect.stack()[0][3]), 'ok')
render_target = os.path.join(os.getcwd(), 'tests', 'DOES_NOT_EXIST')
render_output = os.path.join(os.getcwd(), 'tests', 'render_output.txt')
self.assertFalse(render({}, render_target, render_output))
def test_render_bad_outout_file(self):
print '\n'; print_message('---- Starting Test: {} ----'.format(inspect.stack()[0][3]), 'ok')
render_target = os.path.join(
os.getcwd(), 'tests', 'test_render_target.txt')
render_output = '/usr/local/NO_PERMISSIONS'
self.assertFalse(render({}, render_target, render_output))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jhKessler/Tagesschau-Analysis",
"score": 3
} |
#### File: Tagesschau-Analysis/scripts/scraper.py
```python
import time
import datetime
import requests
import pandas as pd
from tqdm import tqdm
from bs4 import BeautifulSoup
DATE_FORMAT = "%d/%m/%Y"
ARCHIVE_URL = "https://www.tagesschau.de/multimedia/video/videoarchiv2~_date-"
SECOND_DELAY = 1
# dates
first_description = datetime.date(year=2013, month=4, day=22)
today = datetime.date.today()
current_date = first_description
# list for storing articles
all_articles = []
def update_progress_bar(pbar: tqdm, current_date: datetime.datetime) -> None:
"""Update Progress bar"""
pbar.update(1)
estimated_time = round(((today - current_date).days * (SECOND_DELAY+0.3)) / 60)
pbar.set_description(f"Scraping articles: Date:{current_date.strftime(DATE_FORMAT)}, Articles: {len(all_articles)}, Estimated time left: {round(estimated_time, 2)} min")
# init progressbar
total_days = (today - first_description).days
print(total_days)
progress_bar = tqdm(total=total_days)
update_progress_bar(progress_bar, current_date)
# loop over days
while current_date <= today:
date_string = current_date.strftime("%Y%m%d")
# format url to right form
url_string = f"{ARCHIVE_URL}{date_string}.html"
# request html and scrape it for the datapoints
response = requests.get(url_string).text
soup = BeautifulSoup(response, 'html.parser')
# save articles
article_teasers = list(soup.findAll(class_="teasertext"))
titles = soup.findAll(class_="headline")
dates_and_times = list(soup.findAll(class_="dachzeile"))
for title, date_and_time, article, in zip(titles, dates_and_times, article_teasers):
all_articles.append([current_date.strftime(DATE_FORMAT), article.text, title.text, date_and_time.text])
# go to next day
current_date = current_date + datetime.timedelta(days=1)
# sleep
time.sleep(SECOND_DELAY)
update_progress_bar(progress_bar, current_date)
# format data
article_df = pd.DataFrame(all_articles, columns=["date", "article", "title", "time_text"])
article_df.to_excel("data/raw.xlsx", index=False)
``` |
{
"source": "jhkim06/grpc",
"score": 2
} |
#### File: python/data_transmission/face_recog.py
```python
import face_recognition
import cv2
import camera
import os
import numpy as np
import argparse
from utils.anchor_generator import generate_anchors
from utils.anchor_decode import decode_bbox
from utils.nms import single_class_non_max_suppression
from PIL import Image, ImageDraw, ImageFont
# anchor configuration
feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]
anchor_ratios = [[1, 0.62, 0.42]] * 5
# generate anchors
anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)
# for inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)
def getOutputsNames(net):
# Get the names of all the layers in the network
layersNames = net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected outputs
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
class FaceRecog():
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
self.camera = camera.VideoCamera()
self.known_face_encodings = []
self.known_face_names = []
net = cv2.dnn.readNet('models/face_mask_detection.caffemodel', 'models/face_mask_detection.prototxt')
# Load sample pictures and learn how to recognize it.
dirname = 'knowns'
files = os.listdir(dirname)
for filename in files:
name, ext = os.path.splitext(filename)
if ext == '.jpg':
self.known_face_names.append(name)
pathname = os.path.join(dirname, filename)
img = face_recognition.load_image_file(pathname) # face recongnition without mask
# TODO How to handle for face with mask on ##############
# 1. Get the boundary of face
# 2.
height, width, _ = img.shape
target_shape = (260, 260)
blob = cv2.dnn.blobFromImage(img, scalefactor=1/255.0, size=target_shape)
net.setInput(blob)
y_bboxes_output, y_cls_output = net.forward(getOutputsNames(net))
# remove the batch dimension, for batch is always 1 for inference.
y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
y_cls = y_cls_output[0]
# To speed up, do single class NMS, not multiple classes NMS.
bbox_max_scores = np.max(y_cls, axis=1)
bbox_max_score_classes = np.argmax(y_cls, axis=1)
conf_thresh=0.5
iou_thresh=0.4
keep_idxs = single_class_non_max_suppression(y_bboxes, bbox_max_scores, conf_thresh=conf_thresh, iou_thresh=iou_thresh)
for idx in keep_idxs:
conf = float(bbox_max_scores[idx])
class_id = bbox_max_score_classes[idx]
bbox = y_bboxes[idx]
# clip the coordinate, avoid the value exceed the image boundary.
xmin = max(0, int(bbox[0] * width))
ymin = max(0, int(bbox[1] * height))
xmax = min(int(bbox[2] * width), width)
ymax = min(int(bbox[3] * height), height)
face_location = [(ymin, xmax, ymax, xmin)]
break # Only one face per input image
#########################################################
#face_encoding = face_recognition.face_encodings(img)[0] # Face encoding of source image
face_encoding = face_recognition.face_encodings(img, face_location)[0] # Face encoding of source image
self.known_face_encodings.append(face_encoding)
# Initialize some variables
self.face_locations = []
self.face_encodings = []
self.face_names = []
self.process_this_frame = True
def __del__(self):
del self.camera
def recog_face(self, face_encoding) :
# See if the face is a match for the known face(s)
distances = face_recognition.face_distance(self.known_face_encodings, face_encoding[0])
min_value = min(distances)
# tolerance: How much distance between faces to consider it a match. Lower is more strict.
# 0.6 is typical best performance.
name = "Unknown"
if min_value < 0.5:
index = np.argmin(distances)
name = self.known_face_names[index]
return name
def get_frame(self):
# Grab a single frame of video
frame = self.camera.get_frame()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if self.process_this_frame:
# Find all the faces and face encodings in the current frame of video
self.face_locations = face_recognition.face_locations(rgb_small_frame)
self.face_encodings = face_recognition.face_encodings(rgb_small_frame, self.face_locations)
self.face_names = []
for face_encoding in self.face_encodings:
# See if the face is a match for the known face(s)
distances = face_recognition.face_distance(self.known_face_encodings, face_encoding)
min_value = min(distances)
# tolerance: How much distance between faces to consider it a match. Lower is more strict.
# 0.6 is typical best performance.
name = "Unknown"
if min_value < 0.4:
index = np.argmin(distances)
name = self.known_face_names[index]
self.face_names.append(name)
self.process_this_frame = not self.process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(self.face_locations, self.face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
return frame
def get_jpg_bytes(self):
frame = self.get_frame()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
ret, jpg = cv2.imencode('.jpg', frame)
return jpg.tobytes()
if __name__ == '__main__':
face_recog = FaceRecog()
print(face_recog.known_face_names)
while True:
frame = face_recog.get_frame()
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
print('finish')
```
#### File: python/data_transmission/server.py
```python
from concurrent import futures
import grpc
import demo_pb2_grpc
import demo_pb2
import cv2
import argparse
from utils.anchor_generator import generate_anchors
from utils.anchor_decode import decode_bbox
from utils.nms import single_class_non_max_suppression
from PIL import Image, ImageDraw, ImageFont
import face_recog as fr
import numpy as np
import face_recognition
__all__ = 'DemoServer'
SERVER_ADDRESS = '172.16.58.3:8080'
SERVER_ID = 1
# anchor configuration
feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]
anchor_ratios = [[1, 0.62, 0.42]] * 5
colors = ((0, 255, 0), (255, 0 , 0))
id2class = {0: 'Mask', 1: 'NoMask'}
# generate anchors
anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)
anchors_exp = np.expand_dims(anchors, axis=0)
class DemoServer(demo_pb2_grpc.GRPCDemoServicer):
def __init__(self):
self.face_recog = fr.FaceRecog()
self.proto = 'models/face_mask_detection.prototxt'
self.model= 'models/face_mask_detection.caffemodel'
self.Net = cv2.dnn.readNet(self.model, self.proto)
def getOutputsNames(self, net):
# Get the names of all the layers in the network
layersNames = net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected outputs
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
def inference(self, net, image, conf_thresh=0.5, iou_thresh=0.4, target_shape=(160, 160), draw_result=True, chinese=False, face_recog=None):
height, width, _ = image.shape
blob = cv2.dnn.blobFromImage(image, scalefactor=1/255.0, size=target_shape)
net.setInput(blob)
y_bboxes_output, y_cls_output = net.forward(self.getOutputsNames(net))
# remove the batch dimension, for batch is always 1 for inference.
y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
y_cls = y_cls_output[0]
# To speed up, do single class NMS, not multiple classes NMS.
bbox_max_scores = np.max(y_cls, axis=1)
bbox_max_score_classes = np.argmax(y_cls, axis=1)
keep_idxs = single_class_non_max_suppression(y_bboxes, bbox_max_scores, conf_thresh=conf_thresh, iou_thresh=iou_thresh)
# keep_idxs = cv2.dnn.NMSBoxes(y_bboxes.tolist(), bbox_max_scores.tolist(), conf_thresh, iou_thresh)[:,0]
tl = round(0.002 * (height + width) * 0.5) + 1 # line thickness
for idx in keep_idxs:
conf = float(bbox_max_scores[idx])
class_id = bbox_max_score_classes[idx]
bbox = y_bboxes[idx]
# clip the coordinate, avoid the value exceed the image boundary.
xmin = max(0, int(bbox[0] * width))
ymin = max(0, int(bbox[1] * height))
xmax = min(int(bbox[2] * width), width)
ymax = min(int(bbox[3] * height), height)
xmin_ = max(0, int(bbox[0] * width * 0.25))
ymin_ = max(0, int(bbox[1] * height * 0.25))
xmax_ = min(int(bbox[2] * width * 0.25), width * 0.25)
ymax_ = min(int(bbox[3] * height * 0.25), height * 0.25)
# TODO Need to resize image ?
#face_location = [(ymax_, xmax_, ymin_, xmin_)]
# top right bottom left
face_location = [(ymin_, xmax_, ymax_, xmin_)]
name = "Unknown"
frame = image
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1] # BGR to RGB
if face_recog != None :
face_encoding = face_recognition.face_encodings(small_frame, face_location)
name = face_recog.recog_face(face_encoding)
#print("name ", name)
# Try face mosaic
# https://jinho-study.tistory.com/231
if name == "Unknown" :
face_img = frame[ymin:ymax, xmin:xmax] # 인식된 얼굴 이미지 crop
face_img = cv2.resize(face_img, dsize=(0, 0), fx=0.04, fy=0.04) # 축소
face_img = cv2.resize(face_img, (xmax-xmin, ymax-ymin), interpolation=cv2.INTER_AREA) # 확대
frame[ymin:ymax, xmin:xmax] = face_img # 인식된 얼굴 영역 모자이크 처리
if draw_result:
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), colors[class_id], thickness=tl)
if chinese:
image = puttext_chinese(image, id2chiclass[class_id], (xmin, ymin), colors[class_id]) ###puttext_chinese
else:
cv2.putText(image, "%s: %.2f" % (id2class[class_id], conf), (xmin + 2, ymin - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, colors[class_id])
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(image, name, (xmin + 6, ymax - 6), font, 1.0, (255, 255, 255), 1)
return image
def SimpleMethod(self, request, context):
print("start simple")
# 'repair' image from byte array
nparr = np.frombuffer(request.src, np.byte)
img = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # BGR to RGB
conf_thresh = 0.5
img_modified = self.inference(self.Net, img, target_shape=(260, 260), conf_thresh=conf_thresh, face_recog=self.face_recog)
src=cv2.imencode(".jpg", img_modified)[1].tobytes()
response = demo_pb2.Response(
server_id=SERVER_ID,
response_data="Python server SimpleMethod Ok!!!!",
#tgt=request.src)
tgt=src)
return response
def ClientStreamingMethod(self, request_iterator, context):
print("ClientStreamingMethod called by client...")
for request in request_iterator:
print("recv from client(%d), message= %s" %
(request.client_id, request.request_data))
response = demo_pb2.Response(
server_id=SERVER_ID,
response_data="Python server ClientStreamingMethod ok")
return response
def ServerStreamingMethod(self, request, context):
print("ServerStreamingMethod called by client(%d), message= %s" %
(request.client_id, request.request_data))
# 创建一个生成器
# create a generator
def response_messages():
for i in range(5):
response = demo_pb2.Response(
server_id=SERVER_ID,
response_data=("send by Python server, message=%d" % i))
yield response
return response_messages()
def BidirectionalStreamingMethod(self, request_iterator, context):
tmp_list = list()
def parse_request():
for request in request_iterator:
print("recv from client(%d), message= %s" %
(request.client_id, request.request_data), request.test_str)
tmp_list.append(request.src)
t = Thread(target=parse_request)
t.start()
for i in range(1):
yield demo_pb2.Response(
server_id=SERVER_ID,
response_data=("send by Python server, message= %d" % i),
response_test_str="test",
tgt=tmp_list[i])
t.join()
def main():
server = grpc.server(futures.ThreadPoolExecutor())
demo_pb2_grpc.add_GRPCDemoServicer_to_server(DemoServer(), server)
server.add_insecure_port(SERVER_ADDRESS)
print("------------------start Python GRPC server")
server.start()
server.wait_for_termination()
# If raise Error:
# AttributeError: '_Server' object has no attribute 'wait_for_termination'
# You can use the following code instead:
# import time
# while 1:
# time.sleep(10)
if __name__ == '__main__':
main()
``` |
{
"source": "jhkim6467/input_distill",
"score": 3
} |
#### File: jhkim6467/input_distill/new_distillation.py
```python
import gensim
#import sklearn
import sys
import os
import pickle
from scipy import spatial
if len(sys.argv) > 1:
model_num = sys.argv[1]
else:
print ("Using : python new_distillation.py [model_number]")
sys.exit()
def cos_sim(list_1, list_2):
return 1 - spatial.distance.cosine(list_1, list_2)
# Generate output list
output_file = open("distill_files/output.txt{}".format(str(model_num)), 'r')
output_list = []
all_list = []
for line in output_file:
all_list.append(line)
if line not in output_list:
output_list.append(line)
output_file.close()
print ("output list length :", len(output_list))
# Find index in dictionary
index_dictionary = {}
index = 0
for line in all_list:
if line not in index_dictionary:
index_dictionary[line] = [index]
else:
index_dictionary[line].append(index)
index += 1
index_dictionary = sorted(index_dictionary.items(), key=lambda x: len(x[1]),reverse=True)
print ("index list length :", len(index_dictionary))
# Calculating input similarity
similarity_list = []
input_file = open("distill_files/input_emb_{}.pkl".format(model_num), 'rb')
#input_file = open("enc_embedding.pkl", 'rb')
input_embed = pickle.load(input_file)
input_file.close()
sim_file = open("distill_files/similarity_list_{}".format(model_num), 'w')
i = 0
threshold_num = 20
for line in index_dictionary:
length = len(line[1])
if length<=threshold_num:break
ave_sim = 0
count = 0
for r_index in range(len(line[1])):
if r_index != len(line[1]):
for c_index in range(r_index, len(line[1])):
sim = cos_sim(input_embed[line[1][r_index]], input_embed[line[1][c_index]])
ave_sim += sim
count += 1
ave_sim = float((ave_sim-length)/(count-length))
print ("ave_sim : ", ave_sim)
print ("count : ", count-length)
print ("length : ", length)
similarity_list.append(ave_sim)
sim_file.write(str(ave_sim))
sim_file.write("\t")
sim_file.write(str(line[1]))
sim_file.write("\t")
sim_file.write(str(line[0]).strip())
sim_file.write("\n")
sim_file.close()
```
#### File: jhkim6467/input_distill/rein_train.py
```python
from __future__ import division, unicode_literals
import argparse
import time
import math
import random
import torch.nn as nn, torch
import torch.nn.init as init
import torch.optim as optim
import os
import numpy as np
import pickle
from torch.autograd import Variable
from torch.utils.data import DataLoader
from scipy import spatial
from sklearn.feature_extraction.text import CountVectorizer
from onmt.utils.logging import init_logger
from onmt.translate.translator_new import build_translator
import onmt.inputters
import onmt.translate
import onmt
import onmt.model_builder
import onmt.modules
import onmt.opts
class BaseEncoder(nn.Module):
def __init__(self, vocab_size, emb_size, hid_size):
super(BaseEncoder, self).__init__()
self.hid_size = hid_size
self.num_lyr = 1
self.drop = nn.Dropout(0.4)
self.direction = 2 # 22
self.embed = nn.Embedding(vocab_size, emb_size, padding_idx=0, sparse=False)
self.rnn = nn.GRU(input_size=emb_size, hidden_size=hid_size, num_layers=self.num_lyr, bidirectional=True, batch_first=True, dropout=0.4)
def forward(self, inp):
x = inp.view(-1, inp.size(2))
x_emb = self.embed(x)
x_emb = self.drop(x_emb)
bt_siz, seq_len = x_emb.size(0), x_emb.size(1)
h_0 = Variable(torch.zeros(self.direction * self.num_lyr, bt_siz, self.hid_size))
if use_cuda:
h_0 = h_0.cuda()
x_o, x_hid = self.rnn(x_emb, h_0)
if self.direction == 2:
x_hids = []
for i in range(self.num_lyr):
x_hid_temp, _ = torch.max(x_hid[2 * i:2 * i + 2, :, :], 0, keepdim=True)
x_hids.append(x_hid_temp)
x_hid = torch.cat(x_hids, 0)
x_hid = x_hid[self.num_lyr - 1, :, :].unsqueeze(0)
x_hid = x_hid.transpose(0, 1)
return x_hid
class Policy_Network(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_size, batch_size, translator):
super(Policy_Network, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.embedding = BaseEncoder(vocab_size, 300, 400).cuda()
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=1)
self.hidden2out = nn.Linear(hidden_dim, output_size)
self.softmax = nn.LogSoftmax(dim=1)
self.dropout_layer = nn.Dropout(p=0.2)
self.total_reward = 0
self.num_reward = 0
self.total_batch = 0
self.hidden = self.init_hidden(batch_size)
self.translator = translator
def init_hidden(self, batch_size):
return(torch.randn(1, batch_size, self.hidden_dim).cuda(), torch.randn(1, batch_size, self.hidden_dim).cuda())
def baseline_score(self, reward, num_reward):
return reward / num_reward
def calculate_reward(self, list, pred):
max = 0
for line in list:
cos = cos_sim(translator._translate_batch(line), pred)
if cos > max:
max = cos
return max
def forward(self, input):
input = self.embedding(input)
input = input.transpose(0, 1)
outputs, self.hidden = self.lstm(input, self.hidden)
output = self.dropout_layer(self.hidden[0][-1])
output = self.hidden2out(output)
output = self.softmax(output)
pred_index = (output.max(1)[1])
# Calculate reward & base_score
reward_list = []
base_list = []
for batch_index in range(len(pred_index)):
response_file = open("distill_files/Response.txt{}".format(pred_index[batch_index]), 'r')
predict_output = open("distill_files/train_output_{}.txt".format(pred_index[batch_index]), 'r')
reward = calculate_reward(response_file.readlines(), predict_output.readlines()[total_batch])
total_batch += 1
predict_output.close()
response_file.close()
# addtional line
reward = 1 - reward
reward_list.append(reward)
self.total_reward += reward
self.num_reward += 1
base_list.append(self.baseline_score(self.total_reward, self.num_reward))
reward_list = torch.Tensor(reward_list).cuda()
base_list = torch.Tensor(base_list).cuda()
new_output = output.transpose(0, 1) * (reward_list - base_list)
new_output = new_output.transpose(0, 1)
return new_output, output
def cos_sim(list1, list2):
return nn.functional.cosine_similarity(list1, list2)
use_cuda = torch.cuda.is_available()
torch.manual_seed(123)
np.random.seed(123)
if use_cuda:
torch.cuda.manual_seed(123)
##############################################################################################################################################
def RL_train_model(RL_model, optimizer, dataloader, num_epochs, inv_dict):
criterion = nn.NLLLoss()
if use_cuda:
criterion.cuda()
RL_model.train()
for epoch in range(num_epochs):
total_loss = 0
start_index = 0
temp_loss = 10000
for i_batch, sample_batch in enumerate(dataloader):
temp_list = rele_list[start_index : start_index + len(sample_batch)]
start_index += len(sample_batch)
RL_model.zero_grad()
RL_model.hidden = RL_model.init_hidden(len(sample_batch))
pred_rele, pred_base = RL_model(sample_batch)
pred_base = pred_base.max(1)[1]
loss = criterion(pred_rele, pred_base)
if temp_loss > loss:
loss.backward()
temp_loss = loss.item()
optimizer.step()
total_loss += loss.item()
#print("Epoch : {} / Train loss: {}".format(epoch, total_loss))
if (epoch + 1) % 10 == 0:
model_fname = './save/new_RL_model_epoch{}.pt'.format(epoch)
torch.save(RL_model.state_dict(), model_fname)
##############################################################################################################################################
def main():
with open('./distill_files/w2i', 'rb') as f:
inv_dict = pickle.load(f)
# parameter
N = 4
folder_path = "distill_files/"
f = open(folder_path + "src-train.0", 'rb')
line_list = pickle.load(f)
f.close()
new_list = []
for line in line_list:
new_list.append(Variable(torch.LongTensor([line])).cuda())
dataloader = DataLoader(new_list, 64, shuffle=False)
parser = argparse.ArgumentParser(
description='rein_train.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
onmt.opts.add_md_help_argument(parser)
onmt.opts.translate_opts(parser)
opt = parser.parse_args()
translator = build_translator(opt, report_score=True)
RL_model = Policy_Network(len(inv_dict), 400, 128, N, 64, translator).cuda()
optimizer = optim.SGD(RL_model.parameters(), lr=0.1, weight_decay=1e-4)
num_epochs = 300
RL_train_model(RL_model, optimizer, dataloader, num_epochs, inv_dict)
main()
``` |
{
"source": "jhkim-spa/CVNet",
"score": 3
} |
#### File: mmdet3d/cv_utils/resize_utils.py
```python
import torch
import torch.nn as nn
@torch.no_grad()
def pad(cv, pad_size):
m = nn.ZeroPad2d(pad_size)
cv_padded = m(cv)
return cv_padded
@torch.no_grad()
def resize(cv, size, nonzero_idx=0):
device = cv.device
w_des, h_des = size
w_scale = w_des / cv.shape[1]
h_scale = h_des / cv.shape[0]
if (w_scale == 1. and h_scale == 1.):
cv_resized = cv
else:
cv_resized = torch.zeros((h_des, w_des, cv.shape[-1]),
dtype=torch.float32, device=device)
idx_src = torch.nonzero(cv[..., nonzero_idx], as_tuple=True)
idx_des = list()
idx_des.append((h_scale * idx_src[0]).to(torch.long))
idx_des.append((w_scale * idx_src[1]).to(torch.long))
cv_resized[idx_des[0], idx_des[1], :] = \
cv[idx_src[0], idx_src[1], :]
return cv_resized
```
#### File: models/dense_heads/cvnet_head.py
```python
import copy
import numpy as np
import torch
from mmcv.cnn import ConvModule, build_conv_layer, kaiming_init
from mmcv.runner import force_fp32
from torch import nn
from mmdet3d.core import (circle_nms, draw_heatmap_gaussian, gaussian_radius,
xywhr2xyxyr)
from mmdet3d.models import builder
from mmdet3d.models.builder import HEADS, build_loss
from mmdet3d.models.utils import clip_sigmoid
from mmdet3d.ops.iou3d.iou3d_utils import nms_gpu
from mmdet.core import build_bbox_coder, multi_apply
from mmcv.cnn import bias_init_with_prob, normal_init
from mmdet3d.cv_utils import project, project_to_image, pad, resize
@HEADS.register_module()
class CenterHeadCV(nn.Module):
def __init__(self,
num_classes=1,
feat_channels=64,
train_cfg=None,
test_cfg=None,
loss_cls=dict(type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1loss',
beta=1.0 / 9.0,
loss_weight=2.0)):
super(CenterHeadCV, self).__init__()
self.num_classes = num_classes
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.feat_channels = feat_channels
self.use_sigmoid_cls = loss_cls.use_sigmoid
self.sampling = loss_cls['type'] not in ['FocalLoss', 'GHMC']
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.reg_target_size = 8
self._init_layers()
def _init_layers(self):
"""Initialize neural network layers of the head."""
self.cls_out_channels = self.num_classes
self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1)
self.conv_reg = nn.Conv2d(self.feat_channels, self.reg_target_size, 1)
def init_weights(self):
"""Initialize the weights of head."""
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv_cls, std=0.01, bias=bias_cls)
normal_init(self.conv_reg, std=0.01)
def forward_single(self, x):
"""Forward function on a single-scale feature map.
Args:
x (torch.Tensor): Input features.
Returns:
tuple[torch.Tensor]: Contain score of each class, bbox \
regression and direction classification predictions.
"""
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
return cls_score, bbox_pred
def forward(self, feats):
"""Forward pass.
Args:
feats (list[torch.Tensor]): Multi-level features, e.g.,
features produced by FPN.
Returns:
tuple[list[torch.Tensor]]: Multi-level class score, bbox \
and direction predictions.
"""
return multi_apply(self.forward_single, feats)
def loss_single(self, cls_score, bbox_pred, cls_targets, reg_targets):
# classification loss
cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.num_classes)
cls_targets = cls_targets.permute(0, 2, 3, 1).reshape(-1).to(torch.long)
loss_cls = self.loss_cls(cls_score, cls_targets)
# regression loss
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(-1, self.reg_target_size)
reg_targets = reg_targets.permute(0, 2, 3,
1).reshape(-1, self.reg_target_size)
pos_inds = (cls_targets == 1).reshape(-1)
num_pos = pos_inds.sum()
pos_bbox_pred = bbox_pred[pos_inds]
pos_reg_targets = reg_targets[pos_inds]
if num_pos > 0:
loss_bbox = self.loss_bbox(pos_bbox_pred,
pos_reg_targets,
avg_factor=num_pos)
else:
loss_bbox = pos_bbox_pred.sum()
return loss_cls, loss_bbox
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
input_metas,
cv_size,
pad_size,
gt_bboxes_ignore=None):
"""Calculate losses.
Args:
cls_scores (list[torch.Tensor]): Multi-level class scores.
bbox_preds (list[torch.Tensor]): Multi-level bbox predictions.
dir_cls_preds (list[torch.Tensor]): Multi-level direction
class predictions.
gt_bboxes (list[:obj:`BaseInstance3DBoxes`]): Gt bboxes
of each sample.
gt_labels (list[torch.Tensor]): Gt labels of each sample.
input_metas (list[dict]): Contain pcd and img's meta info.
gt_bboxes_ignore (None | list[torch.Tensor]): Specify
which bounding.
Returns:
dict[str, list[torch.Tensor]]: Classification, bbox, and \
direction losses of each level.
- loss_cls (list[torch.Tensor]): Classification losses.
- loss_bbox (list[torch.Tensor]): Box regression losses.
- loss_dir (list[torch.Tensor]): Direction classification \
losses.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
device = cls_scores[0].device
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
device,
self.num_classes,
gt_bboxes,
input_metas,
cv_size,
pad_size,
featmap_sizes,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels)
if cls_reg_targets is None:
return None
(cls_targets_list, reg_targets_list) = cls_reg_targets
losses_cls, losses_bbox = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
cls_targets_list,
reg_targets_list)
return dict(
loss_cls=losses_cls, loss_bbox=losses_bbox)
def get_targets(self,
device,
num_classes,
gt_bboxes,
input_metas,
cv_size,
pad_size,
featmap_sizes,
gt_bboxes_ignore_list,
gt_labels_list,
label_channels):
norm = torch.tensor([70.4, 80, 4, 1.6, 3.9, 1.56], device=device)
valid_idxs= [torch.where((res != -1) & (res < num_classes))[0]\
for res in gt_labels_list]
gt_bboxes = [box[idx].to(device) for box, idx in zip(gt_bboxes, valid_idxs)]
gt_labels_list= [label[idx].to(device) for label, idx in zip(gt_labels_list, valid_idxs)]
proj_mats = [torch.tensor(res['lidar2img'][:3]).to(device)\
for res in input_metas]
centers_3d = [res.gravity_center for res in gt_bboxes]
centers_2d = [project_to_image(res.transpose(1, 0), proj_mat).to(torch.long)\
for res, proj_mat in zip(centers_3d, proj_mats)]
# centers_2d_ = [project(res, meta) for res, meta in zip(centers_3d, input_metas)]
# centers_2d_ = [torch.nonzero(res[..., 0])[:, [1, 0]].permute(1, 0) for res in centers_2d_]
## shift x coords (padding)
centers_2d = [res + torch.tensor([pad_size[0], 0], device=device).reshape(-1, 1)\
for res in centers_2d]
for i, centers in enumerate(centers_2d):
if (centers < 0).sum() != 0:
valid_idx = (0 <= centers[0]) &\
(centers[0] <= cv_size[1]) &\
(0 <= centers[1]) &\
(centers[1] <= cv_size[0])
gt_labels_list[i] = gt_labels_list[i][valid_idx]
gt_bboxes[i] = gt_bboxes[i][valid_idx]
centers_2d[i] = centers_2d[i][:, valid_idx]
gt_labels_list
targets = [torch.cat((center.transpose(1, 0).to(torch.float32),
label.reshape(-1, 1).to(torch.float32),
box.tensor[:, :-1] / norm,
torch.cos(box.tensor[:, -1].reshape(-1, 1)),
torch.sin(box.tensor[:, -1].reshape(-1, 1))), dim=1)\
for label, center, box in zip(gt_labels_list, centers_2d, gt_bboxes)]
target_maps = []
target_map_channel = label_channels + self.reg_target_size
for target in targets:
target_map = torch.zeros((cv_size[0], cv_size[1],
target_map_channel), dtype=torch.float32, device=device)
x_coords = target[:, 0].to(torch.long)
y_coords = target[:, 1].to(torch.long)
target = target[:, 2:]
target_map[y_coords, x_coords, label_channels:] =\
target[:, label_channels:]
target_map[y_coords, x_coords, target[:, 0].to(torch.long)] = 1.
target_maps.append(target_map)
mlvl_target_maps = []
for featmap_size in featmap_sizes:
des_size = (featmap_size[1], featmap_size[0])
target_maps_resized = [resize(res, des_size, nonzero_idx=1)\
for res in target_maps]
mlvl_target_maps.append(target_maps_resized)
cls_targets = [[res[..., :label_channels].permute(2, 0, 1)\
for res in target_maps] for target_maps in mlvl_target_maps]
reg_targets = [[res[..., label_channels:label_channels +\
self.reg_target_size].permute(2, 0, 1)\
for res in target_maps] for target_maps in mlvl_target_maps]
# stack batches
cls_targets = [torch.stack(tuple(res), dim=0)\
for res in cls_targets]
reg_targets = [torch.stack(tuple(res), dim=0)\
for res in reg_targets]
return (cls_targets, reg_targets)
```
#### File: models/detectors/cvnet.py
```python
import torch
from mmdet3d.core import bbox3d2result, merge_aug_bboxes_3d
from mmdet3d.cv_utils import pad, project, resize
from mmdet.models import DETECTORS
from .single_stage import SingleStage3DDetector
@DETECTORS.register_module()
class CVNet(SingleStage3DDetector):
def __init__(self,
cv_size,
pad_size,
backbone,
neck=None,
bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(CVNet, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
)
self.cv_size = cv_size
self.pad_size = pad_size
def extract_feat(self, points, img_metas):
"""Extract features from points."""
cv = [project(res_points, res_img_metas)\
for res_points, res_img_metas in zip(points, img_metas)]
cv = [resize(res_cv, self.cv_size) for res_cv in cv]
cv = torch.stack(cv)
cv = cv.permute(0, 3, 1, 2)
cv = pad(cv, self.pad_size)
cv_size = cv.shape[-2:]
x = self.backbone(cv)
if self.with_neck:
x = self.neck(x)
return x, cv_size
def forward_train(self,
points,
img_metas,
gt_bboxes_3d,
gt_labels_3d,
gt_bboxes_ignore=None):
"""Training forward function.
Args:
points (list[torch.Tensor]): Point cloud of each sample.
img_metas (list[dict]): Meta information of each sample
gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth
boxes for each sample.
gt_labels_3d (list[torch.Tensor]): Ground truth labels for
boxes of each sampole
gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth
boxes to be ignored. Defaults to None.
Returns:
dict: Losses of each branch.
"""
x, cv_size = self.extract_feat(points, img_metas)
outs = self.bbox_head(x)
loss_inputs = outs + (gt_bboxes_3d, gt_labels_3d, img_metas,
cv_size, self.pad_size)
losses = self.bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
def simple_test(self, points, img_metas, imgs=None, rescale=False):
"""Test function without augmentaiton."""
x = self.extract_feat(points, img_metas)
outs = self.bbox_head(x)
bbox_list = self.bbox_head.get_bboxes(
*outs, img_metas, rescale=rescale)
bbox_results = [
bbox3d2result(bboxes, scores, labels)
for bboxes, scores, labels in bbox_list
]
return bbox_results
def aug_test(self, points, img_metas, imgs=None, rescale=False):
"""Test function with augmentaiton."""
feats = self.extract_feats(points, img_metas)
# only support aug_test for one sample
aug_bboxes = []
for x, img_meta in zip(feats, img_metas):
outs = self.bbox_head(x)
bbox_list = self.bbox_head.get_bboxes(
*outs, img_meta, rescale=rescale)
bbox_list = [
dict(boxes_3d=bboxes, scores_3d=scores, labels_3d=labels)
for bboxes, scores, labels in bbox_list
]
aug_bboxes.append(bbox_list[0])
# after merging, bboxes will be rescaled to the original image size
merged_bboxes = merge_aug_bboxes_3d(aug_bboxes, img_metas,
self.bbox_head.test_cfg)
return [merged_bboxes]
``` |
{
"source": "jhkim-spa/FVNet",
"score": 2
} |
#### File: core/anchor/line_anchor_generator.py
```python
import mmcv
import torch
import numpy as np
from mmdet3d.core.bbox import Box3DMode, CameraInstance3DBoxes, get_box_type
from mmdet.core.anchor import ANCHOR_GENERATORS
@ANCHOR_GENERATORS.register_module()
class LineAnchorGenerator(object):
def __init__(self,
dist_list,
num_bins,
ranges,
sizes=[[1.6, 3.9, 1.56]],
scales=[1],
rotations=[0, 1.5707963],
custom_values=(),
reshape_out=True,
size_per_range=True):
assert mmcv.is_list_of(ranges, list)
if size_per_range:
if len(sizes) != len(ranges):
assert len(ranges) == 1
ranges = ranges * len(sizes)
assert len(ranges) == len(sizes)
else:
assert len(ranges) == 1
assert mmcv.is_list_of(sizes, list)
assert isinstance(scales, list)
self.dist_list = dist_list
self.num_bins = num_bins
self.sizes = sizes
self.scales = scales
self.ranges = ranges
self.rotations = rotations
self.custom_values = custom_values
self.cached_anchors = None
self.reshape_out = reshape_out
self.size_per_range = size_per_range
self.multi_level_anchors = None
self.box_type_3d, self.box_mode_3d = get_box_type('LiDAR')
# self.cam2lidar = torch.tensor([[2.3477350e-04, 1.0449406e-02, 9.9994540e-01, 2.7290344e-01],
# [-9.9994421e-01, 1.0565354e-02, 1.2436594e-04, -1.9692658e-03],
# [-1.0563478e-02, -9.9988955e-01, 1.0451305e-02, -7.2285898e-02],
# [ 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00]],
# dtype=torch.float32)
self.cam2lidar = torch.tensor([[ 7.5337449e-03, 1.4802488e-02, 9.9986202e-01, 2.7290344e-01],
[-9.9997145e-01, 7.2807324e-04, 7.5237905e-03, -1.9692658e-03],
[-6.1660202e-04, -9.9989015e-01, 1.4807552e-02, -7.2285898e-02],
[ 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00]],
dtype=torch.float32)
def __repr__(self):
s = self.__class__.__name__ + '('
s += f'anchor_range={self.ranges},\n'
s += f'scales={self.scales},\n'
s += f'sizes={self.sizes},\n'
s += f'rotations={self.rotations},\n'
s += f'reshape_out={self.reshape_out},\n'
s += f'size_per_range={self.size_per_range})'
return s
@property
def num_base_anchors(self):
"""list[int]: Total number of base anchors in a feature grid."""
num_rot = len(self.rotations)
num_size = torch.tensor(self.sizes).reshape(-1, 3).size(0)
return num_rot * num_size
@property
def num_levels(self):
"""int: Number of feature levels that the generator is applied to."""
return len(self.scales)
def grid_anchors(self, featmap_sizes, device='cuda'):
"""Generate grid anchors in multiple feature levels.
Args:
featmap_sizes (list[tuple]): List of feature map sizes in
multiple feature levels.
device (str): Device where the anchors will be put on.
Returns:
list[torch.Tensor]: Anchors in multiple feature levels. \
The sizes of each tensor should be [N, 4], where \
N = width * height * num_base_anchors, width and height \
are the sizes of the corresponding feature lavel, \
num_base_anchors is the number of anchors for that level.
"""
if self.multi_level_anchors is not None:
return self.multi_level_anchors, self.multi_level_val_masks
assert self.num_levels == len(featmap_sizes)
multi_level_anchors = []
multi_level_val_masks = []
dist_list = []
for i in range(self.num_levels):
dist_list.append(self.dist_list[i*self.num_bins:(i+1)*self.num_bins])
dist_list.reverse()
for i in range(self.num_levels):
anchors, val_masks = self.single_level_grid_anchors(
featmap_sizes[i], dist_list[i], self.scales[i], device=device)
if self.reshape_out:
anchors = anchors.reshape(-1, anchors.size(-1))
multi_level_anchors.append(anchors)
multi_level_val_masks.append(val_masks)
self.multi_level_anchors = multi_level_anchors
self.multi_level_val_masks = multi_level_val_masks
return multi_level_anchors, multi_level_val_masks
def single_level_grid_anchors(self, featmap_size, dist_list,
scale, device='cuda'):
"""Generate grid anchors of a single level feature map.
This function is usually called by method ``self.grid_anchors``.
Args:
featmap_size (tuple[int]): Size of the feature map.
scale (float): Scale factor of the anchors in the current level.
device (str, optional): Device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature map.
"""
# We reimplement the anchor generator using torch in cuda
# torch: 0.6975 s for 1000 times
# numpy: 4.3345 s for 1000 times
# which is ~5 times faster than the numpy implementation
if not self.size_per_range:
return self.anchors_single_range(
featmap_size,
self.ranges[0],
scale,
self.sizes,
self.rotations,
device=device)
mr_anchors = []
mr_val_masks = []
for anchor_range, anchor_size in zip(self.ranges, self.sizes):
mr_anchor, mr_val_mask = self.anchors_single_range(
featmap_size,
dist_list,
anchor_range,
scale,
anchor_size,
self.rotations,
device=device)
mr_anchors.append(mr_anchor)
mr_val_masks.append(mr_val_mask)
mr_anchors = torch.cat(mr_anchors, dim=-3)
mr_val_masks = torch.cat(mr_val_masks, dim=-2)
return mr_anchors, mr_val_masks
def anchors_single_range(self,
feature_size,
dist_list,
anchor_range,
scale=1,
sizes=[[1.6, 3.9, 1.56]],
rotations=[0, 1.5707963],
device='cuda'):
# TODO
# 1. Convert camera coord to lidar coord
# 2. Multi-class
# 3. filtering with anchor_range
anchors = []
val_masks = []
sizes = torch.tensor(sizes).reshape(-1, 3)
rotations = torch.tensor(rotations).reshape(-1, 1)
for dist in dist_list:
ref_centers = self.get_ref_points(*feature_size).T
k = dist / torch.sqrt((1. + ref_centers[:, 0]**2 + ref_centers[:, 1]**2))
center = k.reshape(-1, 1) * ref_centers
# idx 0: y, 1: z, 2: x
num_centers = center.shape[0]
center = torch.repeat_interleave(center, 2, dim=0)
size = sizes.repeat((num_centers * 2, 1))
rotation = rotations.repeat(num_centers, 1)
anchor = torch.cat((center, size, rotation), dim=1)
anchor = anchor.reshape(-1, 2, 7)
# import matplotlib.pyplot as plt
# anchor = anchor.cpu()
# plt.scatter(anchor[..., 2], -anchor[..., 0], s=0.1)
# plt.savefig('test_.png')
anchor = self.convert_cam2lidar(anchor)
anchor[..., 2] = anchor[..., 2] - anchor[..., 5] / 2
val_mask = (anchor[..., 0] >= anchor_range[0]) &\
(anchor[..., 0] <= anchor_range[3]) &\
(anchor[..., 1] >= anchor_range[1]) &\
(anchor[..., 1] <= anchor_range[4]) &\
(anchor[..., 2] >= anchor_range[2]) &\
(anchor[..., 2] <= anchor_range[5])
# anchor[..., 2] = -1.78
# val_mask = anchor[..., 0] > -100000
anchors.append(anchor)
val_masks.append(val_mask)
anchors = torch.stack(anchors)
anchors = anchors.permute(1, 0, 2, 3)
anchors = anchors.reshape(1, *feature_size, 1, -1, 7)
val_masks = torch.stack(val_masks)
val_masks = val_masks.permute(1, 0, 2)
val_masks = val_masks.reshape(1, *feature_size, 1, -1)
return anchors, val_masks
def convert_cam2lidar(self, boxes):
# boxes = boxes.reshape(-1, 7)
# boxes[:, [3, 4, 5]] = boxes[:, [4, 5, 3]]
# boxes = boxes.contiguous()
# boxes = CameraInstance3DBoxes(boxes).convert_to(self.box_mode_3d,
# self.cam2lidar)
# boxes = boxes.tensor.reshape(-1, 2, 7)
boxes = boxes.reshape(-1, 7)
boxes[:, [0, 1, 2]] = boxes[:, [2, 0, 1]]
boxes[:, [1, 2]] = -boxes[:, [1, 2]]
# cam2 to cam0
boxes[:, 1] += 0.06
# cam0 to velo
boxes[:, 2] -= 0.08
boxes[:, 0] += 0.27
# boxes = boxes.tensor.reshape(-1, 2, 7)
boxes = boxes.reshape(-1, 2, 7)
return boxes
def get_ref_points(self, height, width):
K = self.intrinsic_from_fov(height, width, 90)
K_inv = torch.tensor(np.linalg.inv(K), dtype=torch.float32)
pixel_coords = torch.tensor(self.pixel_coord_np(width, height),
dtype=torch.float32)
cam_coords = K_inv[:3, :3] @ pixel_coords * 1.0
return cam_coords
def pixel_coord_np(self, width, height):
"""
Pixel in homogenous coordinate
Returns:
Pixel coordinate: [3, width * height]
"""
x = np.linspace(0, width - 1, width).astype(np.int)
y = np.linspace(0, height - 1, height).astype(np.int)
[x, y] = np.meshgrid(x, y)
return np.vstack((x.flatten(), y.flatten(), np.ones_like(x.flatten())))
def intrinsic_from_fov(self, height, width, fov=90):
"""
Basic Pinhole Camera Model
intrinsic params from fov and sensor width and height in pixels
Returns:
K: [4, 4]
"""
px, py = (width / 2, height / 2)
hfov = fov / 360. * 2. * np.pi
fx = width / (2. * np.tan(hfov / 2.))
vfov = 2. * np.arctan(np.tan(hfov / 2) * height / width)
fy = height / (2. * np.tan(vfov / 2.))
return np.array([[fx, 0, px, 0.],
[0, fy, py, 0.],
[0, 0, 1., 0.],
[0., 0., 0., 1.]])
```
#### File: models/backbones/fpn18.py
```python
import torch
import torch.nn as nn
from torch.nn import functional as F
from collections import OrderedDict
from ..builder import BACKBONES
class PyramidFeatures(nn.Module):
'''
FPN pyramid layer
'''
def __init__(self, C3_size, C4_size, C5_size, feature_size=256):
super(PyramidFeatures, self).__init__()
# upsample C5 to get P5 from the FPN paper
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# "P6 is obtained via a 3x3 stride-2 conv on C5"
self.P6 = nn.Conv2d(C5_size, feature_size, kernel_size=3, stride=2, padding=1)
# "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
self.P7_1 = nn.ReLU()
self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
def forward(self, inputs):
C3, C4, C5 = inputs
P5 = self.P5_1(C5)
P5_up = self.P5_upsampled(P5)
P5 = self.P5_2(P5)
P4 = self.P4_1(C4)
P4 = P4 + P5_up
P4_up = self.P4_upsampled(P4)
P4 = self.P4_2(P4)
P3 = self.P3_1(C3)
P3 = P3 + P4_up
P3 = self.P3_2(P3)
P6 = self.P6(C5)
P7 = self.P7_1(P6)
P7 = self.P7_2(P7)
return [P3, P4, P5, P6, P7]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, \
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes))
self.stride = stride
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.downsample(x)
out = F.relu(out)
return out
@BACKBONES.register_module()
class FPN18(nn.Module):
def __init__(self):
super(FPN18, self).__init__()
num_blocks = [2,2,2,2]
bb_block = BasicBlock
self.f_in_planes_det = 64
# For RGB Feature Network
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer_det(bb_block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer_det(bb_block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer_det(bb_block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer_det(bb_block, 512, num_blocks[3], stride=2)
fpn_sizes = [
self.layer2[1].conv2.out_channels,
self.layer3[1].conv2.out_channels,
self.layer4[1].conv2.out_channels]
self.fpn = PyramidFeatures(fpn_sizes[0], fpn_sizes[1], fpn_sizes[2])
def _make_layer_det(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.f_in_planes_det, planes, stride))
self.f_in_planes_det = planes * block.expansion
return nn.Sequential(*layers)
def init_weights(self, pretrained=None):
pth_path = 'pretrained/FPN18_retinanet_968.pth'
pre_weights = torch.load(pth_path)
new_res_state_dict = OrderedDict()
model_dict = self.state_dict()
for k,v in pre_weights['state_dict'].items():
if ('regressionModel' not in k) and ('classificationModel' not in k):
# name = k.replace('module', 'rpn')
name = '.'.join(k.split('.')[1:])
new_res_state_dict[name] = v
model_dict.update(new_res_state_dict)
self.load_state_dict(model_dict)
def forward(self, x):
"""Forward function."""
f1 = self.maxpool(F.relu(self.bn1(self.conv1(x))))
f2 = self.layer1(f1)
f3 = self.layer2(f2)
f4 = self.layer3(f3)
f5 = self.layer4(f4)
x = self.fpn([f3, f4, f5])
return x
```
#### File: models/dense_heads/pvg_aux_head.py
```python
import numpy as np
import torch
from mmcv.cnn import bias_init_with_prob, normal_init
from mmcv.runner import force_fp32
from torch import nn as nn
import torch.nn.functional as F
from mmdet3d.core import (PseudoSampler, anchor, box3d_multiclass_nms, limit_period,
xywhr2xyxyr)
from mmdet.core import (build_anchor_generator, build_assigner,
build_bbox_coder, build_sampler, multi_apply)
from mmdet.models import HEADS
from ..builder import build_loss
from mmdet3d.ops.roiaware_pool3d import points_in_boxes_gpu
@HEADS.register_module()
class PVGAuxHead(nn.Module):
def __init__(self,
in_channels,
loss_seg=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=0.1),
loss_depth=dict(
type='smoothL1Loss', beta=1.0 / 9.0, loss_weight=0.1)):
super().__init__()
self.in_channels = in_channels
self.fp16_enabled = False
# build loss function
self.use_sigmoid_cls = loss_seg.get('use_sigmoid', False)
self.sampling = loss_seg['type'] not in ['FocalLoss', 'GHMC']
self.loss_seg = build_loss(loss_seg)
self.loss_depth = build_loss(loss_depth)
self._init_layers()
self._init_assigner_sampler()
def _init_assigner_sampler(self):
"""Initialize the target assigner and sampler of the head."""
self.bbox_sampler = PseudoSampler()
def _init_layers(self):
"""Initialize neural network layers of the head."""
self.upsample_layer = nn.Sequential(
nn.ConvTranspose2d(self.in_channels, 128, kernel_size=2, stride=2),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2),
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True)
)
self.conv_seg = nn.Conv2d(32, 1, 1)
self.conv_depth = nn.Conv2d(32, 1, 1)
def init_weights(self):
"""Initialize the weights of head."""
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv_seg, std=0.01, bias=bias_cls)
normal_init(self.conv_depth, std=0.01)
def forward_single(self, x):
x = self.upsample_layer(x)
seg_score = self.conv_seg(x)
depth_pred = self.conv_depth(x)
return seg_score, depth_pred
def forward(self, feats):
return multi_apply(self.forward_single, feats)
@force_fp32(apply_to=('seg_scores', 'depth_preds'))
def loss(self,
seg_scores,
depth_preds,
points,
gt_bboxes,
gt_labels,
input_metas,
gt_bboxes_ignore=None):
device = seg_scores[0].device
cls_reg_targets = self.get_targets(points,
gt_bboxes,
gt_labels,
input_metas,
device)
if cls_reg_targets is None:
return None
seg_targets_list, depth_targets_list = cls_reg_targets
losses_seg, losses_depth = multi_apply(
self.loss_single,
seg_scores,
depth_preds,
seg_targets_list,
depth_targets_list)
return dict(
loss_aux_seg=losses_seg, loss_aux_depth=losses_depth)
def get_targets(self, points, gt_bboxes, gt_labels,
input_metas, device):
batch_size = len(gt_bboxes)
seg_targets_list = []
depth_targets_list = []
for i in range(batch_size):
res_points = points[i][:, :3]
res_lidar2img = input_metas[i]['lidar2img'][:3]
res_lidar2img = torch.from_numpy(res_lidar2img)
# Back transform
res_points = res_points / input_metas[i]['pcd_scale_factor']
if input_metas[i].get('pcd_rotation') is not None:
rotation = input_metas[i]['pcd_rotation'].to(device)
res_points = res_points @ torch.inverse(rotation)
if input_metas[i]['pcd_horizontal_flip']:
res_points[:, 1] *= -1
uv = self.project_to_img(res_points, res_lidar2img)
width = input_metas[i]['img_shape'][1]
height = input_metas[i]['img_shape'][0]
w_scale, h_scale = input_metas[i]['scale_factor'][:2]
uv[:, 0] *= h_scale
uv[:, 1] *= w_scale
uv = uv.to(torch.long)
if input_metas[i]['flip']:
uv[:, 1] = width - uv[:, 1] - 1
valid_inds = torch.where(
(uv[:, 0] < height) & (uv[:, 0] >= 0) &
(uv[:, 1] < width) & (uv[:, 1] >= 0)
)[0]
# filtering invalid points
uv = uv[valid_inds]
res_points = res_points[valid_inds]
# segmentation targets
seg_targets = torch.ones((height, width, 1),
dtype=torch.long,
device=device) * -1
assigned_inds = points_in_boxes_gpu(points[i][:, :3].unsqueeze(dim=0),
gt_bboxes[i].tensor.unsqueeze(dim=0)
.to(device))
assigned_inds = assigned_inds.squeeze(dim=0).to(torch.long)
assigned_inds = assigned_inds[valid_inds]
res_labels = torch.cat([gt_labels[i], torch.tensor([1]).to(device)])
seg_targets[uv[:, 0], uv[:, 1], 0] = res_labels[assigned_inds]
# depth targets
depth_targets = torch.zeros((height, width, 1),
dtype=torch.float32,
device=device)
depth_targets[uv[:, 0], uv[:, 1], 0] = res_points[:, 0] / 69.12
seg_targets_list.append(seg_targets)
depth_targets_list.append(depth_targets)
seg_targets = torch.stack(seg_targets_list, dim=0)
depth_targets = torch.stack(depth_targets_list, dim=0)
return [seg_targets], [depth_targets]
@torch.no_grad()
def project_to_img(self, xyz, lidar2img):
device = xyz.device
lidar2img = lidar2img.to(device)
num_points = xyz.shape[0]
xyz_hom = torch.cat((xyz.T, torch.ones((1, num_points)).to(device)))
uv_hom = lidar2img @ xyz_hom
uv_hom[:2, :] /= uv_hom[2, :]
uv = uv_hom[:2, :].T
uv = uv[:, [1, 0]]
return uv
def loss_single(self, seg_score, depth_pred, seg_targets, depth_targets):
# fg/bg segmentation loss
num_valid_points = (seg_targets != -1).sum()
seg_score = seg_score.permute(0, 2, 3, 1).contiguous().reshape(-1, 1)
seg_targets = seg_targets.reshape(-1)
loss_seg = self.loss_seg(seg_score, seg_targets, avg_factor=num_valid_points)
# depth regression loss
depth_pred = depth_pred.permute(0, 2, 3, 1).contiguous().reshape(-1, 1)
depth_targets = depth_targets.reshape(-1, 1)
valid_inds = depth_targets.nonzero()[:, 0]
loss_depth = self.loss_depth(depth_pred[valid_inds],
depth_targets[valid_inds],
avg_factor=valid_inds.shape[0])
return loss_seg, loss_depth
``` |
{
"source": "jhkim-spa/FVNet_v2",
"score": 2
} |
#### File: tools/data_converter/kitti_converter.py
```python
import mmcv
import numpy as np
from collections import OrderedDict
from nuscenes.utils.geometry_utils import view_points
from pathlib import Path
from mmdet3d.core.bbox import box_np_ops
from .kitti_data_utils import get_kitti_image_info, get_waymo_image_info
from .nuscenes_converter import post_process_coords
kitti_categories = ('Pedestrian', 'Cyclist', 'Car')
def convert_to_kitti_info_version2(info):
"""convert kitti info v1 to v2 if possible.
Args:
info (dict): Info of the input kitti data.
- image (dict): image info
- calib (dict): calibration info
- point_cloud (dict): point cloud info
"""
if 'image' not in info or 'calib' not in info or 'point_cloud' not in info:
info['image'] = {
'image_shape': info['img_shape'],
'image_idx': info['image_idx'],
'image_path': info['img_path'],
}
info['calib'] = {
'R0_rect': info['calib/R0_rect'],
'Tr_velo_to_cam': info['calib/Tr_velo_to_cam'],
'P2': info['calib/P2'],
}
info['point_cloud'] = {
'velodyne_path': info['velodyne_path'],
}
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
def _calculate_num_points_in_gt(data_path,
infos,
relative_path,
remove_outside=True,
num_features=4):
for info in mmcv.track_iter_progress(infos):
pc_info = info['point_cloud']
image_info = info['image']
calib = info['calib']
if relative_path:
v_path = str(Path(data_path) / pc_info['velodyne_path'])
else:
v_path = pc_info['velodyne_path']
points_v = np.fromfile(
v_path, dtype=np.float32, count=-1).reshape([-1, num_features])
rect = calib['R0_rect']
Trv2c = calib['Tr_velo_to_cam']
P2 = calib['P2']
if remove_outside:
points_v = box_np_ops.remove_outside_points(
points_v, rect, Trv2c, P2, image_info['image_shape'])
# points_v = points_v[points_v[:, 0] > 0]
annos = info['annos']
num_obj = len([n for n in annos['name'] if n != 'DontCare'])
# annos = kitti.filter_kitti_anno(annos, ['DontCare'])
dims = annos['dimensions'][:num_obj]
loc = annos['location'][:num_obj]
rots = annos['rotation_y'][:num_obj]
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
gt_boxes_lidar = box_np_ops.box_camera_to_lidar(
gt_boxes_camera, rect, Trv2c)
indices = box_np_ops.points_in_rbbox(points_v[:, :3], gt_boxes_lidar)
num_points_in_gt = indices.sum(0)
num_ignored = len(annos['dimensions']) - num_obj
num_points_in_gt = np.concatenate(
[num_points_in_gt, -np.ones([num_ignored])])
annos['num_points_in_gt'] = num_points_in_gt.astype(np.int32)
def create_kitti_info_file(data_path,
pkl_prefix='kitti',
save_path=None,
relative_path=True):
"""Create info file of KITTI dataset.
Given the raw data, generate its related info file in pkl format.
Args:
data_path (str): Path of the data root.
pkl_prefix (str): Prefix of the info file to be generated.
save_path (str): Path to save the info file.
relative_path (bool): Whether to use relative path.
"""
imageset_folder = Path(data_path) / 'ImageSets'
train_img_ids = _read_imageset_file(str(imageset_folder / 'train.txt'))
val_img_ids = _read_imageset_file(str(imageset_folder / 'val.txt'))
test_img_ids = _read_imageset_file(str(imageset_folder / 'test.txt'))
debug_img_ids = _read_imageset_file(str(imageset_folder / 'debug.txt'))
print('Generate info. this may take several minutes.')
if save_path is None:
save_path = Path(data_path)
else:
save_path = Path(save_path)
kitti_infos_train = get_kitti_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
image_ids=train_img_ids,
relative_path=relative_path)
_calculate_num_points_in_gt(data_path, kitti_infos_train, relative_path)
filename = save_path / f'{pkl_prefix}_infos_train.pkl'
print(f'Kitti info train file is saved to {filename}')
mmcv.dump(kitti_infos_train, filename)
kitti_infos_val = get_kitti_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
image_ids=val_img_ids,
relative_path=relative_path)
_calculate_num_points_in_gt(data_path, kitti_infos_val, relative_path)
filename = save_path / f'{pkl_prefix}_infos_val.pkl'
print(f'Kitti info val file is saved to {filename}')
mmcv.dump(kitti_infos_val, filename)
filename = save_path / f'{pkl_prefix}_infos_trainval.pkl'
print(f'Kitti info trainval file is saved to {filename}')
mmcv.dump(kitti_infos_train + kitti_infos_val, filename)
kitti_infos_test = get_kitti_image_info(
data_path,
training=False,
label_info=False,
velodyne=True,
calib=True,
image_ids=test_img_ids,
relative_path=relative_path)
filename = save_path / f'{pkl_prefix}_infos_test.pkl'
print(f'Kitti info test file is saved to {filename}')
mmcv.dump(kitti_infos_test, filename)
kitti_infos_debug = get_kitti_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
image_ids=debug_img_ids,
relative_path=relative_path)
_calculate_num_points_in_gt(data_path, kitti_infos_debug, relative_path)
filename = save_path / f'{pkl_prefix}_infos_debug.pkl'
print(f'Kitti info debug file is saved to {filename}')
mmcv.dump(kitti_infos_debug, filename)
def create_waymo_info_file(data_path,
pkl_prefix='waymo',
save_path=None,
relative_path=True,
max_sweeps=5):
"""Create info file of waymo dataset.
Given the raw data, generate its related info file in pkl format.
Args:
data_path (str): Path of the data root.
pkl_prefix (str): Prefix of the info file to be generated.
save_path (str | None): Path to save the info file.
relative_path (bool): Whether to use relative path.
max_sweeps (int): Max sweeps before the detection frame to be used.
"""
imageset_folder = Path(data_path) / 'ImageSets'
train_img_ids = _read_imageset_file(str(imageset_folder / 'train.txt'))
val_img_ids = _read_imageset_file(str(imageset_folder / 'val.txt'))
test_img_ids = _read_imageset_file(str(imageset_folder / 'test.txt'))
print('Generate info. this may take several minutes.')
if save_path is None:
save_path = Path(data_path)
else:
save_path = Path(save_path)
waymo_infos_train = get_waymo_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
pose=True,
image_ids=train_img_ids,
relative_path=relative_path,
max_sweeps=max_sweeps)
_calculate_num_points_in_gt(
data_path,
waymo_infos_train,
relative_path,
num_features=6,
remove_outside=False)
filename = save_path / f'{pkl_prefix}_infos_train.pkl'
print(f'Waymo info train file is saved to {filename}')
mmcv.dump(waymo_infos_train, filename)
waymo_infos_val = get_waymo_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
pose=True,
image_ids=val_img_ids,
relative_path=relative_path,
max_sweeps=max_sweeps)
_calculate_num_points_in_gt(
data_path,
waymo_infos_val,
relative_path,
num_features=6,
remove_outside=False)
filename = save_path / f'{pkl_prefix}_infos_val.pkl'
print(f'Waymo info val file is saved to {filename}')
mmcv.dump(waymo_infos_val, filename)
filename = save_path / f'{pkl_prefix}_infos_trainval.pkl'
print(f'Waymo info trainval file is saved to {filename}')
mmcv.dump(waymo_infos_train + waymo_infos_val, filename)
waymo_infos_test = get_waymo_image_info(
data_path,
training=False,
label_info=False,
velodyne=True,
calib=True,
pose=True,
image_ids=test_img_ids,
relative_path=relative_path,
max_sweeps=max_sweeps)
filename = save_path / f'{pkl_prefix}_infos_test.pkl'
print(f'Waymo info test file is saved to {filename}')
mmcv.dump(waymo_infos_test, filename)
def _create_reduced_point_cloud(data_path,
info_path,
save_path=None,
back=False,
num_features=4,
front_camera_id=2):
"""Create reduced point clouds for given info.
Args:
data_path (str): Path of original data.
info_path (str): Path of data info.
save_path (str | None): Path to save reduced point cloud data.
Default: None.
back (bool): Whether to flip the points to back.
num_features (int): Number of point features. Default: 4.
front_camera_id (int): The referenced/front camera ID. Default: 2.
"""
kitti_infos = mmcv.load(info_path)
for info in mmcv.track_iter_progress(kitti_infos):
pc_info = info['point_cloud']
image_info = info['image']
calib = info['calib']
v_path = pc_info['velodyne_path']
v_path = Path(data_path) / v_path
points_v = np.fromfile(
str(v_path), dtype=np.float32,
count=-1).reshape([-1, num_features])
rect = calib['R0_rect']
if front_camera_id == 2:
P2 = calib['P2']
else:
P2 = calib[f'P{str(front_camera_id)}']
Trv2c = calib['Tr_velo_to_cam']
# first remove z < 0 points
# keep = points_v[:, -1] > 0
# points_v = points_v[keep]
# then remove outside.
if back:
points_v[:, 0] = -points_v[:, 0]
points_v = box_np_ops.remove_outside_points(points_v, rect, Trv2c, P2,
image_info['image_shape'])
if save_path is None:
save_dir = v_path.parent.parent / (v_path.parent.stem + '_reduced')
if not save_dir.exists():
save_dir.mkdir()
save_filename = save_dir / v_path.name
# save_filename = str(v_path) + '_reduced'
if back:
save_filename += '_back'
else:
save_filename = str(Path(save_path) / v_path.name)
if back:
save_filename += '_back'
with open(save_filename, 'w') as f:
points_v.tofile(f)
def create_reduced_point_cloud(data_path,
pkl_prefix,
train_info_path=None,
val_info_path=None,
test_info_path=None,
save_path=None,
with_back=False):
"""Create reduced point clouds for training/validation/testing.
Args:
data_path (str): Path of original data.
pkl_prefix (str): Prefix of info files.
train_info_path (str | None): Path of training set info.
Default: None.
val_info_path (str | None): Path of validation set info.
Default: None.
test_info_path (str | None): Path of test set info.
Default: None.
save_path (str | None): Path to save reduced point cloud data.
with_back (bool): Whether to flip the points to back.
"""
if train_info_path is None:
train_info_path = Path(data_path) / f'{pkl_prefix}_infos_train.pkl'
if val_info_path is None:
val_info_path = Path(data_path) / f'{pkl_prefix}_infos_val.pkl'
if test_info_path is None:
test_info_path = Path(data_path) / f'{pkl_prefix}_infos_test.pkl'
print('create reduced point cloud for training set')
_create_reduced_point_cloud(data_path, train_info_path, save_path)
print('create reduced point cloud for validation set')
_create_reduced_point_cloud(data_path, val_info_path, save_path)
print('create reduced point cloud for testing set')
_create_reduced_point_cloud(data_path, test_info_path, save_path)
if with_back:
_create_reduced_point_cloud(
data_path, train_info_path, save_path, back=True)
_create_reduced_point_cloud(
data_path, val_info_path, save_path, back=True)
_create_reduced_point_cloud(
data_path, test_info_path, save_path, back=True)
def export_2d_annotation(root_path, info_path, mono3d=True):
"""Export 2d annotation from the info file and raw data.
Args:
root_path (str): Root path of the raw data.
info_path (str): Path of the info file.
mono3d (bool): Whether to export mono3d annotation. Default: True.
"""
# get bbox annotations for camera
kitti_infos = mmcv.load(info_path)
cat2Ids = [
dict(id=kitti_categories.index(cat_name), name=cat_name)
for cat_name in kitti_categories
]
coco_ann_id = 0
coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids)
from os import path as osp
for info in mmcv.track_iter_progress(kitti_infos):
coco_infos = get_2d_boxes(info, occluded=[0, 1, 2, 3], mono3d=mono3d)
(height, width,
_) = mmcv.imread(osp.join(root_path,
info['image']['image_path'])).shape
coco_2d_dict['images'].append(
dict(
file_name=info['image']['image_path'],
id=info['image']['image_idx'],
Tri2v=info['calib']['Tr_imu_to_velo'],
Trv2c=info['calib']['Tr_velo_to_cam'],
rect=info['calib']['R0_rect'],
cam_intrinsic=info['calib']['P2'],
width=width,
height=height))
for coco_info in coco_infos:
if coco_info is None:
continue
# add an empty key for coco format
coco_info['segmentation'] = []
coco_info['id'] = coco_ann_id
coco_2d_dict['annotations'].append(coco_info)
coco_ann_id += 1
if mono3d:
json_prefix = f'{info_path[:-4]}_mono3d'
else:
json_prefix = f'{info_path[:-4]}'
mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json')
def get_2d_boxes(info, occluded, mono3d=True):
"""Get the 2D annotation records for a given info.
Args:
info: Information of the given sample data.
occluded: Integer (0, 1, 2, 3) indicating occlusion state: \
0 = fully visible, 1 = partly occluded, 2 = largely occluded, \
3 = unknown, -1 = DontCare
mono3d (bool): Whether to get boxes with mono3d annotation.
Return:
list[dict]: List of 2D annotation record that belongs to the input
`sample_data_token`.
"""
# Get calibration information
P2 = info['calib']['P2']
repro_recs = []
# if no annotations in info (test dataset), then return
if 'annos' not in info:
return repro_recs
# Get all the annotation with the specified visibilties.
ann_dicts = info['annos']
mask = [(ocld in occluded) for ocld in ann_dicts['occluded']]
for k in ann_dicts.keys():
ann_dicts[k] = ann_dicts[k][mask]
# convert dict of list to list of dict
ann_recs = []
for i in range(len(ann_dicts['occluded'])):
ann_rec = {}
for k in ann_dicts.keys():
ann_rec[k] = ann_dicts[k][i]
ann_recs.append(ann_rec)
for ann_idx, ann_rec in enumerate(ann_recs):
# Augment sample_annotation with token information.
ann_rec['sample_annotation_token'] = \
f"{info['image']['image_idx']}.{ann_idx}"
ann_rec['sample_data_token'] = info['image']['image_idx']
sample_data_token = info['image']['image_idx']
loc = ann_rec['location'][np.newaxis, :]
dim = ann_rec['dimensions'][np.newaxis, :]
rot = ann_rec['rotation_y'][np.newaxis, np.newaxis]
# transform the center from [0.5, 1.0, 0.5] to [0.5, 0.5, 0.5]
dst = np.array([0.5, 0.5, 0.5])
src = np.array([0.5, 1.0, 0.5])
loc = loc + dim * (dst - src)
offset = (info['calib']['P2'][0, 3] - info['calib']['P0'][0, 3]) \
/ info['calib']['P2'][0, 0]
loc_3d = np.copy(loc)
loc_3d[0, 0] += offset
gt_bbox_3d = np.concatenate([loc, dim, rot], axis=1).astype(np.float32)
# Filter out the corners that are not in front of the calibrated
# sensor.
corners_3d = box_np_ops.center_to_corner_box3d(
gt_bbox_3d[:, :3],
gt_bbox_3d[:, 3:6],
gt_bbox_3d[:, 6], [0.5, 0.5, 0.5],
axis=1)
corners_3d = corners_3d[0].T # (1, 8, 3) -> (3, 8)
in_front = np.argwhere(corners_3d[2, :] > 0).flatten()
corners_3d = corners_3d[:, in_front]
# Project 3d box to 2d.
camera_intrinsic = P2
corner_coords = view_points(corners_3d, camera_intrinsic,
True).T[:, :2].tolist()
# Keep only corners that fall within the image.
final_coords = post_process_coords(corner_coords)
# Skip if the convex hull of the re-projected corners
# does not intersect the image canvas.
if final_coords is None:
continue
else:
min_x, min_y, max_x, max_y = final_coords
# Generate dictionary record to be included in the .json file.
repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y,
sample_data_token,
info['image']['image_path'])
# If mono3d=True, add 3D annotations in camera coordinates
if mono3d and (repro_rec is not None):
repro_rec['bbox_cam3d'] = np.concatenate(
[loc_3d, dim, rot],
axis=1).astype(np.float32).squeeze().tolist()
repro_rec['velo_cam3d'] = -1 # no velocity in KITTI
center3d = np.array(loc).reshape([1, 3])
center2d = box_np_ops.points_cam2img(
center3d, camera_intrinsic, with_depth=True)
repro_rec['center2d'] = center2d.squeeze().tolist()
# normalized center2D + depth
# samples with depth < 0 will be removed
if repro_rec['center2d'][2] <= 0:
continue
repro_rec['attribute_name'] = -1 # no attribute in KITTI
repro_rec['attribute_id'] = -1
repro_recs.append(repro_rec)
return repro_recs
def generate_record(ann_rec, x1, y1, x2, y2, sample_data_token, filename):
"""Generate one 2D annotation record given various informations on top of
the 2D bounding box coordinates.
Args:
ann_rec (dict): Original 3d annotation record.
x1 (float): Minimum value of the x coordinate.
y1 (float): Minimum value of the y coordinate.
x2 (float): Maximum value of the x coordinate.
y2 (float): Maximum value of the y coordinate.
sample_data_token (str): Sample data token.
filename (str):The corresponding image file where the annotation
is present.
Returns:
dict: A sample 2D annotation record.
- file_name (str): flie name
- image_id (str): sample data token
- area (float): 2d box area
- category_name (str): category name
- category_id (int): category id
- bbox (list[float]): left x, top y, dx, dy of 2d box
- iscrowd (int): whether the area is crowd
"""
repro_rec = OrderedDict()
repro_rec['sample_data_token'] = sample_data_token
coco_rec = dict()
key_mapping = {
'name': 'category_name',
'num_points_in_gt': 'num_lidar_pts',
'sample_annotation_token': 'sample_annotation_token',
'sample_data_token': 'sample_data_token',
}
for key, value in ann_rec.items():
if key in key_mapping.keys():
repro_rec[key_mapping[key]] = value
repro_rec['bbox_corners'] = [x1, y1, x2, y2]
repro_rec['filename'] = filename
coco_rec['file_name'] = filename
coco_rec['image_id'] = sample_data_token
coco_rec['area'] = (y2 - y1) * (x2 - x1)
if repro_rec['category_name'] not in kitti_categories:
return None
cat_name = repro_rec['category_name']
coco_rec['category_name'] = cat_name
coco_rec['category_id'] = kitti_categories.index(cat_name)
coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1]
coco_rec['iscrowd'] = 0
return coco_rec
``` |
{
"source": "jhkloss/libg3n_frontend",
"score": 3
} |
#### File: libg3n_frontend/logic/variation_point.py
```python
from enum import Enum, auto
class VariationPoint:
class TYPE(Enum):
FUNCTION = auto()
CLASS = auto()
class FORM_TYPE(Enum):
INPUT = auto()
CHECKBOX = auto()
SLIDER = auto()
CODE = auto()
id: str
name: str
description: str
# Variation Point Type
type: TYPE
# Form Element Type
form: FORM_TYPE
# Properties dict for Class Variation points
properties = {}
def __init__(self, element: dict):
if 'id' in element:
self.id = element['id']
if 'type' in element:
self.type = self._parse_type(element['type'])
if 'name' in element:
self.name = element['name']
if 'form' in element:
self.form = element['form']
if 'desc' in element:
self.description = element['desc']
if 'properties' in element:
self.properties = element['properties']
def _parse_type(self, type_str: str) -> TYPE:
if type_str == 'function':
return self.TYPE.FUNCTION
elif type_str == 'class':
return self.TYPE.CLASS
def _parse_form_type(self, form_type_str: str) -> FORM_TYPE:
pass
``` |
{
"source": "jhkloss/libg3n",
"score": 3
} |
#### File: libg3n/exception/EmptyFileException.py
```python
class EmptyFileException(Exception):
def __init__(self, path: str):
self.path = path
def __str__(self):
return 'File {path} is empty.'.format(path=self.path)
```
#### File: libg3n/exception/InvalidTokenException.py
```python
class InvalidTokenException(Exception):
def __init__(self, token: list, path="", line=0):
self.token = token
self.path = path
self.line = line
super().__init__()
def __str__(self):
result = "Encountered invalid token: " + self.token[0]
if self.path:
result += " in file: " + self.path
if self.line:
result += " in line:" + str(self.line)
return result
```
#### File: libg3n/model/libg3n_class.py
```python
import libg3n
from abc import ABC, abstractmethod
from .libg3n_property import Libg3nProperty
class Libg3nClass(ABC):
"""
Abstract class describing the Libg3n class template. Implementing classes should implement the to_code function
regarding the language specification of the representing language module.
"""
# The class name
_name: str = ""
# The name of the metaclass which this class should implement
_meta_class: str = ""
# List of class properties
_properties = {}
@property
def name(self):
return self._name
@name.setter
def name(self, name: str):
self._name = name
@property
def meta_class(self):
return self._meta_class
@meta_class.setter
def meta_class(self, meta_class: str):
self._meta_class = meta_class
@property
def properties(self):
return self._properties
@property
@abstractmethod
def file_extension(self) -> str:
"""
Abstract property. File extension of the file.
"""
pass
def add_property(self, property: Libg3nProperty):
"""
Adds the given property to the properties dict. The property name is used as a dict key.
"""
self._properties[property.name] = property
def get_property(self, name: str):
"""
Returns a property which matches the given property name.
"""
result = None
if name in self._properties:
result = self._properties[name]
return result
@abstractmethod
def to_code(self) -> str:
"""
Abstract function which turns the class metadata into an actual class specification.
"""
pass
def write(self, class_path: str = '', class_name: str = '', class_prefix: str = '', encoding: str = 'utf-8'):
"""
Writes the class to a file.
"""
# If there is no class name specified, use the Ident instead
if not class_name:
class_name = self._name
# Construct the resulting path
result_path = class_path + class_prefix + class_name + '.' + self.file_extension
libg3n.logger.debug('Writing class: ' + result_path)
# Generate the class code
code = self.to_code()
# Make sure the sourcecode is valid before it is written
if code:
# Write the sourcecode to a new file.
with open(result_path, 'w', encoding=encoding) as f:
f.write(code)
```
#### File: libg3n/model/libg3n_config_parser_g3n.py
```python
from abc import abstractmethod
from libg3n.model.libg3n_config_parser import Libg3nConfigParser
from libg3n.model.libg3n_function import FunctionType
from libg3n.model.libg3n_function import Libg3nFunction
from libg3n.model.libg3n_class import Libg3nClass
from libg3n.exception.ConfigSyntaxException import ConfigSyntaxException
from libg3n.exception.InvalidTokenException import InvalidTokenException
from libg3n.exception.FailedTokenizationException import FailedTokenizationexception
from libg3n.exception.EmptyFileException import EmptyFileException
class Libg3nConfigParserG3n(Libg3nConfigParser):
"""
Abstract class which inherits the Libg3nConfigParser class. This class is designed to parse libg3n configs with the
.gen syntax.
"""
# Literals
FUNCTION_KEYWORD = 'function'
CLASS_KEYWORD = 'class'
PROPERTY_KEYWORD = 'property'
KEYWORDS_LEVEL1 = [FUNCTION_KEYWORD, CLASS_KEYWORD]
KEYWORDS_LEVEL2 = [PROPERTY_KEYWORD]
# Symbols
SYMBOLS = [':']
NULLWORDS = [' ', '\n', '\t']
STOPWORDS = SYMBOLS + NULLWORDS
# String Delimiters
STRING_DELIMITERS = ['"', '\'']
# Join Keywords
KEYWORDS = KEYWORDS_LEVEL1 + KEYWORDS_LEVEL2
# Config token
_tokenized_config = None
# Config token array
_token_array = []
# Current processed line
_current_line = 0
# Parsed functions
_functions = {}
# Parsed classes
_classes = {}
def parse(self, path) -> dict:
"""
Parses the config file and produces a dictionary of Libg3n functions and classes.
"""
# Read the config file
self._load_file(path)
# If config was correctly loaded
if self._config:
# Tokenize the config contents
self._tokenized_config = self.tokenize(self._config)
# If tokenization was successful
if self._tokenized_config:
# Split Token into array
self._token_array = self.split_token_array(self._tokenized_config)
# If token array was created
if self._token_array:
# Iterate over all token
for token in self._token_array:
# Parse the token
self.parse_token(token)
return {self.FUNCTION_DICT_KEY: self._functions, self.CLASS_DICT_KEY: self._classes}
else:
raise FailedTokenizationexception()
else:
raise EmptyFileException(path)
def tokenize(self, content: str) -> list:
"""
Splits a string into a list of token.
"""
# Current lex token
lex = ''
# Result list
result = []
# Determine maximum index so we dont overstep the content bounds
max_length = len(content) - 2
# Determine if we currently lexing a String
in_string = False
# Iterate over all chars in the config
for i, char in enumerate(content):
if i != max_length and char not in self.STOPWORDS:
# Continue Lexing -----
# Append char in case it's not a nullword
if char not in self.NULLWORDS and char not in self.STRING_DELIMITERS:
lex += char
# String Handling -----
if char in self.STRING_DELIMITERS:
if in_string:
in_string = False
result.append(lex)
lex = ''
continue
else:
in_string = True
lex = ''
continue
if in_string:
continue
# Keyword Handling -----
# In case we found a valid keyword with the lex
if self._is_valid_keyword(lex, content[i + 1]):
result.append(lex)
lex = ''
else:
if lex:
result.append(lex)
lex = ''
if char in self.SYMBOLS:
result.append(char)
return result
def _is_valid_keyword(self, string: str, successor: str) -> bool:
"""
Returns true in case the given String is a valid keyword.
"""
return (string in self.KEYWORDS and successor in self.STOPWORDS) or (string in self.SYMBOLS)
def split_token_array(self, token_array: list) -> list:
"""
Splits a token list by applying the function and class syntax.
"""
# Result list
result = []
# Current syntax part
part = []
# Determine maximum index so we don't overstep array bounds
length = len(token_array) - 1
# Iterate over all token
for i, token in enumerate(token_array):
# In case the token matches with a level 1 keyword
if token in self.KEYWORDS_LEVEL1:
# In case the part has an actual value
if part:
result.append(part)
part = []
# Append the token to the current part
part.append(token)
# In case we reach the end, save the last token to the current part
if i == length:
result.append(part)
return result
def parse_token(self, token: list):
"""
Parses a single token into a function / class.
"""
# Determine if the token is a (valid) function / class
if self.is_function_token(token) and self.validate_function_token(token):
result = self.process_function(token)
self._functions[result.ident] = result
elif self.is_class_token(token) and self.validate_class_token(token):
result = self.process_class(token)
self._classes[result.name] = result
else:
# TODO: Find error position
raise InvalidTokenException(token, self._path)
return result
def is_function_token(self, token) -> bool:
"""
Determines if the given token is a function token.
"""
result = False
if token[0] == self.FUNCTION_KEYWORD:
result = True
return result
def is_class_token(self, token) -> bool:
"""
Determines if the given token is a class token.
"""
result = False
if token[0] == self.CLASS_KEYWORD:
result = True
return result
def validate_function_token(self, token) -> bool:
"""
Determines if the given function token is valid.
"""
valid = True
if token[0] != self.FUNCTION_KEYWORD or \
token[2] not in self.SYMBOLS or \
token[3] not in self.FUNCTION_TYPES.keys():
valid = False
return valid
def validate_class_token(self, token) -> bool:
"""
Determines if the given class token is valid.
"""
valid = True
# TODO: Rework
if token[0] != self.CLASS_KEYWORD or \
(token[2] in self.SYMBOLS and token[4] != self.PROPERTY_KEYWORD):
valid = False
return valid
@abstractmethod
def process_function(self, function_element) -> Libg3nFunction:
"""
Abstract function which should implemented to map a config function element to a libg3n function.
"""
pass
@abstractmethod
def process_class(self, class_element) -> Libg3nClass:
"""
Abstract function which should implemented to map a config class element to a libg3n class.
"""
pass
```
#### File: libg3n/model/libg3n_config.py
```python
from abc import ABC, abstractmethod
from libg3n.model.libg3n_config_parser import Libg3nConfigParser
class Libg3nConfig(ABC):
"""
Represents a Libg3n config, providing the original config file path and the parsed functions and classes.
"""
# Path to the config file
_path: str
# Parsed functions
_functions = {}
# Parsed classes
_classes = {}
def __init__(self, path):
# Preserve Path
self._path = path
# Use the parser to parse the file and extract functions and classes from it
token_dict = self.parser.parse(path)
# Save the parsed result, seperated into functions and classes
self._functions = token_dict[self.parser.FUNCTION_DICT_KEY]
self._classes = token_dict[self.parser.CLASS_DICT_KEY]
@property
def functions(self):
"""
Returns all configured functions representing variation points.
"""
return self._functions
@property
def classes(self):
"""
Returns all configured classes.
"""
return self._classes
@property
@abstractmethod
def parser(self) -> Libg3nConfigParser:
"""
Abstract class defining the module / language specific config parser.
"""
pass
```
#### File: libg3n/model/libg3n_regex_parser.py
```python
import os.path
import re
import libg3n
from abc import ABC, abstractmethod
from strenum import StrEnum
from libg3n.exception.InvalidFileException import InvalidFileException
from libg3n.exception.InvalidParsingSyntaxException import InvalidParsingSyntaxException
class Libg3nRegexParser(ABC):
"""
Abstract Parser for parsing various programming languages ot extract functions and classes.
"""
class GroupNames(StrEnum):
"""
Enum defining the RegEx group names for the important bits.
"""
NAME = 'name'
IDENT = 'ident'
PARAMETER = 'pram'
MODIFICATOR = 'mod'
TYPE = 'type'
SIGNATURE = 'sig'
LINE_START = 'line_start'
LINE_END = 'line_end'
@property
def regex_spacer(self) -> str:
return r'\n?(\s*|\t*)'
@property
def regex_annotation_symbol(self) -> str:
return '@'
@property
def regex_annotation_name(self) -> str:
return 'Generate'
@property
def regex_annotation_param_name(self) -> str:
return 'ident'
@property
def regex_annotation_param(self) -> str:
return r'.+'
@property
def regex_annotation(self) -> str:
"""
Prebuilds the annotation regex with use of the predefined parts.
"""
return self.regex_annotation_symbol + self.regex_annotation_name + '\(' + self.regex_annotation_param_name + self.regex_spacer + '=' \
+ self.regex_spacer + '("|\')' \
+ self._add_regex_group(self.regex_annotation_param, self.GroupNames.IDENT) + '("|\')' \
+ self.regex_spacer + '\)'
@property
@abstractmethod
def regex_modificator(self) -> str:
"""
Abstract property defining the RegEx string to match function access modificators.
"""
pass
@property
@abstractmethod
def regex_type(self) -> str:
"""
Abstract property defining the RegEx string to match function return types.
"""
pass
@property
@abstractmethod
def regex_sig(self) -> str:
"""
Abstract property defining the RegEx string to match function signatures.
"""
pass
@property
def regex_body(self) -> str:
"""
Property defining the RegEx String to match function bodies. Should be overwritten in case the specific
programming language uses another syntax.
"""
return r'{(.*\n)*}'
@property
@abstractmethod
def regex_string(self) -> str:
"""
Builds the complete regex string by using the class functions for single regex parts.
"""
pass
@staticmethod
def syntax_check(code: str) -> bool:
"""
Checks the syntax of the given code. This function defaults to True and needs to be implemented and adjusted to
the given programming language. It is used in the parsing function of this class, which means it should either
be properly implemented or should default to True, otherwise the parsing may fail.
"""
return True
@staticmethod
def _add_regex_group(regex: str, group_name: str):
"""
Adds the group syntax to any regex string. The group name is defined by the group_name parameter.
"""
return f'(?P<{group_name}>' + regex + ')'
@staticmethod
def glue_regex_token_list(token_list: list) -> str:
"""
Glues multiple token to a regex list chained with logic or's.
"""
result = ''
# Get maximum length
length = len(token_list) - 1
for i, token in enumerate(token_list):
result += token
if i < length:
result += '|'
return '({result})'.format(result=result)
def parse_file(self, file_path: str) -> dict:
"""
Parses a specific file and produces a dict with all annotation matches.
"""
libg3n.logger.debug('Regex Parsing file: ' + file_path)
# If file exists
if os.path.exists(file_path):
# Open and read file contents
with open(file_path) as f:
content = f.read()
return self.parse_code(content)
else:
raise InvalidFileException(file_path, "Source")
def parse_code(self, code: str) -> dict:
"""
Parses the given code and matches functions and classes.
"""
result = {}
if self.syntax_check(code):
for match in re.finditer(self.regex_string, code):
libg3n.logger.debug('Found Regex match in line: ' + str(match.pos))
# Get regex group_dict for the match
match_groups = match.groupdict()
# Add positional metadata to the match
match_groups[self.GroupNames.LINE_START] = match.start()
match_groups[self.GroupNames.LINE_END] = match.end()
# Save the result
result[match_groups[self.GroupNames.IDENT]] = match_groups
# Return the result
return result
else:
raise InvalidParsingSyntaxException()
```
#### File: modules/java/java_config_parser.py
```python
import libg3n
from libg3n.model.libg3n_class import Libg3nClass
from libg3n.model.libg3n_config_parser_g3n import Libg3nConfigParserG3n
from libg3n.model.libg3n_function import Libg3nFunction
from libg3n.modules.java.java_function import JavaFunction
from libg3n.modules.java.java_class import JavaClass
from libg3n.modules.java.java_property import JavaProperty
class JavaConfigParser(Libg3nConfigParserG3n):
PROPERTY_TYPE_CONSTANTS = {
'String': '',
'char': '',
'int': 0,
'short': 0,
'long': 0,
'byte': 0,
'float': 0.0,
'double': 0.0,
'boolean': True,
'void': None
}
def process_function(self, function_element) -> Libg3nFunction:
libg3n.logger.debug('Parse Java function from token: ' + str(function_element))
id = function_element[1]
type = function_element[3]
value = function_element[4]
function_type = self._parse_function_type(type)
return JavaFunction(id, function_type, value)
def process_class(self, class_element) -> Libg3nClass:
libg3n.logger.debug('Parse Java class from token: ' + str(class_element))
new_class = JavaClass()
new_class.name = class_element[1]
if class_element[2] in self.SYMBOLS:
new_class.meta_class = class_element[3]
for i, token in enumerate(class_element):
if token == self.PROPERTY_KEYWORD:
new_property = JavaProperty()
new_property.name = class_element[i + 1]
new_property.type = class_element[i + 3]
new_property.value = self.PROPERTY_TYPE_CONSTANTS[new_property.type]
new_class.add_property(new_property)
return new_class
```
#### File: modules/python/python_class.py
```python
import ast
from libg3n.model.libg3n_class import Libg3nClass
from libg3n.modules.python.python_codegen import to_source
class PythonClass(Libg3nClass):
@property
def file_extension(self) -> str:
return 'py'
def glue_properties(self):
result = []
for property in self._properties.values():
result.append(property.to_ast())
return result
def to_ast(self):
class_definition = ast.ClassDef(self._name, decorator_list=[], bases=[])
# Add Meta class
if self._meta_class:
class_definition.bases.append(ast.Name(self._meta_class, ctx=ast.Load()))
#TODO: meta_class Import
class_definition.body = self.glue_properties()
return ast.Module(body=[class_definition])
def to_code(self) -> str:
class_ast = self.to_ast()
return to_source(class_ast)
```
#### File: modules/python/python_function_visitor.py
```python
import ast
import libg3n
import libg3n.modules.python.python_decorators as python_decorators
class PythonFunctionVisitor(ast.NodeVisitor):
__loaded_functions: dict
__current_file : None
def load_functions(self, functions: dict):
self.__loaded_functions = functions
def set_current_file(self, file):
self.__current_file = file
def __get_function_by_ident(self, ident: str):
func = None
# Match ident with config function dict
if ident in self.__loaded_functions:
libg3n.logger.debug('Found matching config value for decorator id ' + ident)
# Get the right function
func = self.__loaded_functions[ident]
else:
libg3n.logger.debug('No matching function with ident ' + ident + ' found, skipping this decorator')
return func
def __refactor_function(self, ident, node) -> bool:
success = False
# Find and validate Function
func = self.__get_function_by_ident(ident)
if func:
function_body = func.generate_body()
# Perform the refactoring
# TODO: AST Validation
if function_body:
node.body = function_body
# Touch the file, so we know it was altered and needs to be recompiled
self.__current_file.touch()
success = True
return success
def visit_FunctionDef(self, node):
for decorator in node.decorator_list:
# Confirm decorator structure
if isinstance(decorator, ast.Call):
# Get decorator name
decorator_name: str = self._get_decorator_id(decorator)
# Check if decorator is used by libg3n
if decorator_name and self._is_libg3n_decorator(decorator_name):
libg3n.logger.debug(
'Found libg3n decorator in ' + self.__current_file.path + ' in line ' + str(node.lineno - 1))
# Get and validate the ident arg
if isinstance(decorator.args[0], ast.Constant):
ident = decorator.args[0].__getattribute__('value')
self.__refactor_function(ident, node)
@staticmethod
def _get_decorator_id(decorator: ast.Call):
decorator_id: str = ''
# Get decorator name
decorator_function: ast.Name = decorator.__getattribute__('func')
if isinstance(decorator_function, ast.Name):
decorator_id = decorator_function.__getattribute__('id')
assert decorator_id != '', 'Decorator id could not be extracted. Wrong structure?'
return decorator_id
# Check if decorator is used by libg3n
@staticmethod
def _is_libg3n_decorator(identifier: str):
found = False
if identifier == python_decorators.generate.__name__:
found = True
return found
``` |
{
"source": "jhkloss/libg3n_parsing_notebook",
"score": 3
} |
#### File: libg3n_parsing_notebook/parse_xml/parser.py
```python
from os.path import exists
import xml.etree.ElementTree as et
from parse_manual.parsed_function import ParsedFunction
from parse_manual.parsed_class import ParsedClass
from parse_manual.parsed_property import ParsedProperty
def parse(file: str) -> dict:
result = {}
element_tree = load_file(file)
result['functions'] = get_functions(element_tree)
result['classes'] = get_classes(element_tree)
return result
def load_file(path: str):
if exists(path):
return et.ElementTree(file=path)
def get_functions(element_tree: et.ElementTree):
# We use the python hashtable (dict) to quickly access the right functions later
function_dict = {}
functions = element_tree.findall('func')
assert id not in function_dict, 'Encountered duplicate function id!'
for function in functions:
new_function = process_function(function)
function_dict[new_function.name] = new_function
return function_dict
def get_classes(element_tree: et.ElementTree):
classes_dict = {}
classes = element_tree.findall('class')
for current_class in classes:
new_class = process_class(current_class)
classes_dict[new_class.name] = new_class
return classes_dict
def process_function(function_tree) -> ParsedFunction:
result = ParsedFunction()
result.name = function_tree.find('id').text
result.type = function_tree.find('type').text
result.value = function_tree.find('value').text
return result
def process_class(cls_tree) -> ParsedClass:
result = ParsedClass()
result.name = cls_tree.find('name').text
meta_class_tree = cls_tree.find('metaclass')
if meta_class_tree is not None:
result.meta_class = meta_class_tree.text
properties = cls_tree.findall('property')
for current_property in properties:
new_property = ParsedProperty()
new_property.name = current_property.find('name').text
new_property.value = current_property.find('type').text
result.properties.append(new_property)
return result
```
#### File: libg3n_parsing_notebook/timer/PerformanceTimer.py
```python
import time
class PerformanceTimer:
timers = {}
def __init__(self, name: str = "", iterations: int = 5):
self.running = False
self.start = None
self.name = name
self.elapsed = 0.0
self.measurements = []
self.iterations = iterations
PerformanceTimer.timers[self.name] = self
def measure_function(self, func, *kwargs):
for i in range(self.iterations):
self.start_timer()
func(*kwargs)
self.stop_timer()
self.measurements.append(self.elapsed)
self.reset()
def start_timer(self):
if self.running is False:
self.start = time.perf_counter()
self.running = True
else:
raise Exception('Timer already started.')
def stop_timer(self):
if self.running is True:
self.elapsed = time.perf_counter() - self.start
self.running = False
else:
raise Exception('Timer is not running.')
def reset(self):
self.start = None
self.elapsed = 0.0
self.running = False
def average_time(self):
return sum(self.measurements) / len(self.measurements)
def print(self):
print(('Timer: ' + self.name).center(50, '-'))
print('Running: ' + str(self.running))
if self.measurements:
print('Measured Times: ' + str(self.measurements))
print('Average: ' + str(self.average_time()))
else:
print('Elapsed Time: ' + str(self.elapsed))
print('\n')
``` |
{
"source": "Jhko725/Contact-Point-Detection",
"score": 3
} |
#### File: Contact-Point-Detection/cp_detection/DataPreparation.py
```python
from .ApproachCurve import ApproachCurve
from .FileParse import Json2App
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
import numpy as np
class AppCurveDataset(Dataset):
"""
A dataset class that holds the processed approach curve data. Inherits from torch.utils.data.Dataset.
To change how an ApproachCurve instance is modified into a dataset element, change the _PrepOne function.
...
Attributes
----------
appcurve_list : a list of ApproachCurve instances used to create the dataset
Methods
-------
__len__()
Returns the number of elements in the dataset.
__getitem(idx)
Returns the idx-th element of the dataset.
_PrepOne(appcurve)
A staticmethod that changes a given ApproachCurve instance into a dataset element.
PlotParamDist()
Returns the histograms of the approach curve parameters (f0, f/f0, Q, A0) in the dataset.
"""
def __init__(self, json_list, type_ = "both"):
# Create a list of z-increasingly sorted approach-only curves
self.appcurve_list = [Json2App(jsonfile)(type_).app() for jsonfile in json_list]
for app in self.appcurve_list:
app.SortData()
def __len__(self):
return len(self.appcurve_list)
def __getitem__(self, idx):
return None
@staticmethod
def _PrepOne(appcurve):
pass
def PlotParamDist(self, figsize = (16, 12), fontsize = 14):
fig, axes = plt.subplots(2, 2, figsize = figsize)
f_arr = np.array([app.f for app in self.appcurve_list])
f0_arr = np.array([app.f0 for app in self.appcurve_list])
Q_arr = np.array([app.Q for app in self.appcurve_list])
A0_arr = np.array([app.A0*1e9 for app in self.appcurve_list])
axes[0][0].hist(f0_arr)
axes[0][0].set_title('Histogram of resonance frequency: $f_0$ [Hz]', fontsize = fontsize)
axes[0][1].hist(f_arr/f0_arr)
axes[0][1].set_title('Histogram of relative driving frequency: $f_0/f$', fontsize = fontsize)
axes[1][0].hist(Q_arr)
axes[1][0].set_title('Histogram of Q factor $Q$', fontsize = fontsize)
axes[1][1].hist(A0_arr)
axes[1][1].set_title('Histogram of free amplitude $A_0$ [nm]', fontsize = fontsize)
for ax in axes.flatten():
ax.grid(ls = '--')
return fig, axes
```
#### File: Contact-Point-Detection/cp_detection/ForceSimulation.py
```python
import numpy as np
from scipy.integrate import solve_ivp
import sys, abc
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from .InteractionForce import TipSampleInteraction
class EquationOfMotion(abc.ABC):
@abc.abstractmethod
def _get_eom(self, d):
return lambda t, x: None
@abc.abstractmethod
def _get_default_x0(self, d):
"""
Returns the default initial value for the ode problem.
Returns
-------
x0 : Numpy array with shape (m, 1)
Default initial value for the state vector. m corresponds to the state dimensionality.
"""
return None
@abc.abstractmethod
def _get_default_drive(self):
return lambda t: None
@abc.abstractproperty
def tau(self):
"""
A read-only property corresponding to the time constant of the given eom model.
This is used in cases where steady state dynamics are of the interest
"""
return None
def solve(self, d, t, x0 = None, **kwargs):
"""
Solves the ode and returns the solution.
Parameters
----------
d : float [nm]
Average tip-sample distance.
t : 1D numpy array
Time to evaluate the ode solutions. Must be sorted in increasing order.
x0 : Numpy array with shape (m, 1)
Default initial value for the state vector. m corresponds to the state dimensionality.
If none is given, x0 falls back to the result of self._get_x0().
kwargs : dict
Keyword arguments for scipy.integrate.solve_ivp.
"""
# If no explicit initial conditions are given, fall back to default initial conditions
if x0 == None:
x0 = self._get_default_x0(d)
sol = solve_ivp(self._get_eom(d), (t[0], t[-1]), x0, t_eval = t, vectorized = True, **kwargs)
return sol
class ForcedHarmonicOscillator(EquationOfMotion):
"""
A class to model the AFM QTF/cantilever - sample system as a forced harmonic oscillator subject to a sinusodial driving force and a given tip-sample force F_int.
Note that in this formulation, rescaled time t_rescaled = omega_0*t is used, and the quantity of interest is the instantaneous tip-sample distance z(t).
The exact functional form of the tip-sample force must be given during initialization.
All units used are rescaled so that 1nm = 1
...
Attributes
----------
Q : float [dimensionless]
Q-factor of the cantilever/QTF.
k : float [N/m]
Force constant of the cantilever/QTF
Om : float [dimensionless]
Relative driving frequency of the oscillator - Om = f/f0, where f is the driving freqency and f0 is the resonance frequency
A0 : float [nm]
Oscillator amplitude at resonance frequency and without tip-sample force F_int applied to the system.
F_int : function
Tip-sample interaction force. Must accept z and dz/dt as input and return a single float as return value.
The returned force has dimension of [1e-9N].
T : float [dimensionless]
Rescaled relaxation time of the cantilever/QTF.
T = 2Q, where 2Q/omega_0 is the true relaxation time.
"""
def __init__(self, Q, k, Om, A0, force_model):
"""
Parameters
----------
Q : float [dimensionless]
Q-factor of the cantilever/QTF.
k : float [N/m]
Force constant of the cantilever/QTF
Om : float [dimensionless]
Relative driving frequency of the oscillator - Om = f/f0, where f is the driving freqency and f0 is the resonance frequency
A0 : float [nm]
Oscillator amplitude at resonance frequency and without tip-sample force F_int applied to the system.
F_int : function
Tip-sample interaction force. Must accept z and dz/dt as input and return the corresponding tip-sample force.
The returned force has dimension of [1e-9N].
"""
self.Q = Q
self.k = k
self.Om = Om
self.A0 = A0
assert issubclass(type(force_model), TipSampleInteraction), "F_int must be a TipSampleInteraction!"
self.Fint = force_model
def _get_eom(self, d, F_drive = None):
"""
Returns the corresponding ode function of the model.
x is a state vector, where each column corresponds to the form x = [y, z]', where y = dz/dt.
t is the rescaled time of the form t_rescaled = t_true * omega_0.
Parameters
----------
t : float [dimensionless]
Rescaled time, given by t_rescaled = t_true * omega_0, where omega_0 is the angular resonance frequency.
x : Numpy array with shape (2, k)
State vector, where each column corresponds to the form x = [y, z]', where y = dz/dt.
k is the number of different x vectors in a single batch.
d : float [nm]
Average tip-sample distance.
Returns
-------
dxdt : Numpy array with shape (2, k)
State vector, where each column corresponds to the form dxdt = [dydt, dzdt]'
"""
# Assignments for better readability
Q = self.Q
k = self.k
# Coefficient matrices
C1 = np.array([[-1/Q, -1], [1, 0]], dtype = float)
C2 = np.array([1, 0], dtype = float).reshape(2, 1)
# Check forces, assign default to F_drive if passed None
if F_drive == None:
F_drive = self._get_default_drive()
def eom(t, x):
Fd = F_drive(t)
# Force Fts to be two-dimensional, with the second dimension being the batch size
Fts = self.Fint(x).reshape(1, -1)
dxdt = np.matmul(C1, x) + np.matmul(C2, (d + Fd/k + Fts/k))
return dxdt
return eom
def _get_default_x0(self, d):
return np.array([0., d])
def _get_default_drive(self):
return lambda t: (self.k*self.A0/self.Q)*np.cos(t)
@property
def tau(self):
return 2*self.Q
# Create function for plotting normalized tip-sample force
class BimodalFHO(EquationOfMotion):
def __init__(self, Q0, Q1, k1, k2, Om, A00, A01, force_model):
self.Q0 = Q0
self.Q1 = Q1
self.k1 = k1
self.k2 = k2
self.Om = Om
self.A00 = A00
self.A01 = A01
self.Fint = force_model
def _get_eom(self, d):
# Look into the equation. Is Q1 correct?
C1 = np.array([[-1/self.Q1, -1, 0, 0], [1, 0, 0, 0], [0, 0, -self.Om/self.Q1, -self.Om**2], [0, 0, 1, 0]], dtype = float)
C2 = np.array([[1], [0], [0], [0]], dtype = float)
C3 = np.array([[0], [0], [1], [0]], dtype = float)
def eom(t, x):
d_state = np.zeros(x[0:2, :].shape)
d_state[1, :] = d_state[1, :] + d
z_state = x[0:2, :] + x[2:, :] + d_state
F = self.Fint(z_state)
dxdt = np.matmul(C1, x) + np.matmul(C2, ((self.A00/self.Q0)*np.cos(t) + (self.A01/self.Q1)*np.cos(self.Om*t) + F/self.k1)) + np.matmul(C3, ((self.A00/self.Q0)*np.cos(t) + (self.A01/self.Q1)*np.cos(self.Om*t) + F/self.k2))
return dxdt
return eom
def _get_default_x0(self, d = None):
return np.array([0., self.A00, 0., self.A01])
@property
def tau(self):
return 2*np.max(self.Q0, self.Q1)
def SimulateGeneralMode(AFM, d_array, dt, N_data, relaxation = 7, x0 = None, **kwargs):
"""
Creates the general mode AFM approach curve according to the given AFM model.
For each average tip-sample distance d in d_array, the steady state trajectory of the tip is calculated.
Parameters
----------
AFM : an instance of a class modeling the AFM
The AFM model to be used in simulating the tip dynamics.
d_array : 1D numpy array
An array of average tip-sample distances for the approach curve.
dt : float
Time increment for the cantilever trajectory z(t).
N_data : int
Number of steady state trajectory data to be generated per average tip-sample distance d.
relaxation : int
How many multiples of the time constant to be discarded prior to sampling the steady state dynamics.
kwargs : dict
Keyword arguments for scipy.integrate.solve_ivp.
Returns
-------
t : numpy 1D array
Time array used to solve the ode
x_array : numpy 3D array with dimensions (len(d_array), 2, N_data)
Simulated general mode approach curve data.
The first dimension corrresponds to a given average tip-sample distance d.
The second dimension corresponds to the dimension of the state vector x in the form (z_dot, z)
The last dimension is the time series data dimension
"""
# Number of data points needed for relaxation
N_relax = np.ceil(AFM.tau*relaxation/dt)
t = np.arange(N_relax+N_data)*dt
d_array = np.array(d_array)
x_array = np.zeros((d_array.size, 2, N_data))
sys.stdout.write('Data generation started\n')
for i in range(d_array.size):
sol = AFM.solve(d_array[i], t, x0 = x0, **kwargs)
x_array[i, :, :] = sol.y[:, -N_data:]
sys.stdout.write('\r')
sys.stdout.write('{:d}/{:d} generated'.format(i+1, d_array.size))
sys.stdout.flush()
return t, x_array
```
#### File: Contact-Point-Detection/cp_detection/InteractionForce.py
```python
import numpy as np
import matplotlib.pyplot as plt
import abc
from numba import vectorize, float64
import functools
class TipSampleInteraction(abc.ABC):
def __init__(self):
self._F = self._get_F()
def __neg__(self):
return NegateForce(self)
def __sum__(self, other):
return SumForce([self, other])
@abc.abstractmethod
def _get_F(self):
return lambda x, y: None
def __call__(self, x):
return self._F(x[1,:], x[0,:])
def PlotForce(self, z_range, zdot_range, n_steps = 1000, figsize = (7, 5), fontsize = 14, **kwargs):
"""
Plots the tip-sample interaction force as a function of either z, dz/dt, or both.
"""
assert len(z_range) == 2 and len(zdot_range) == 2, 'z_range and zdot_range must be of the form (start, stop)'
z = np.linspace(*z_range, n_steps)
zdot = np.linspace(*zdot_range, n_steps)
x = np.vstack([zdot, z])
f = self(x).flatten()
if z_range[0] == z_range[1]:
fig, ax = plt.subplots(1, 1, figsize = figsize)
ax.plot(zdot, f, **kwargs)
ax.set_xlabel(r'Scaled tip velocity $\omega_0\dot{z} (nm/s)$', fontsize = fontsize)
ax.set_ylabel('Tip-sample interaction force $F_{int}$(nN)', fontsize = fontsize)
elif zdot_range[0] == zdot_range[1]:
fig, ax = plt.subplots(1, 1, figsize = figsize)
ax.plot(z, f, **kwargs)
ax.set_xlabel('Tip displacement z (nm)', fontsize = fontsize)
ax.set_ylabel('Tip-sample interaction force $F_{int}$(nN)', fontsize = fontsize)
else:
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
ax.scatter(z, zdot, f, **kwargs)
ax.set_xlabel('Tip displacement z(nm)', fontsize = fontsize)
ax.set_ylabel(r'Scaled tip velocity $\omega_0\dot{z} (nm/s)$', fontsize = fontsize)
ax.set_zlabel('Tip-sample interaction force $F_{int}$(nN)', fontsize = fontsize)
ax.grid(ls = '--')
return fig, ax
class NullForce(TipSampleInteraction):
def __init__(self):
self._F = self._get_F()
def _get_F(self):
@vectorize([float64(float64, float64)])
def _F(z = None, zdot = None):
return 0
return _F
class ConstantForce(TipSampleInteraction):
def __init__(self, F0):
self.F0 = F0
self._F = self._get_F()
def _get_F(self):
@vectorize([float64(float64, float64)])
def _F(z = None, zdot = None):
return self.F0
return _F
class NegateForce(TipSampleInteraction):
def __init__(self, force):
assert issubclass(type(force), TipSampleInteraction), "Input force must be a TipSampleInteraction!"
self.original_force = force
self._F = self._get_F()
def _get_F(self):
return lambda z, zdot: -self.original_force._F(z, zdot)
class SumForce(TipSampleInteraction):
def __init__(self, force_list):
for force in force_list:
assert issubclass(type(force), TipSampleInteraction), "Input force must be a TipSampleInteraction!"
self.force_list = force_list
self._F = self._get_F()
def _get_F(self):
def _F(z, zdot):
F_list = [force._F(z, zdot) for force in self.force_list]
return sum(F_list)
return _F
class DMT_Maugis(TipSampleInteraction):
"""
Models the tip-sample interaction according to Maugis' approximation to the Derjaguin-Muller-Toporov (a.k.a. Hertz-plus-offset model).
...
Attributes
----------
H : float [1e-18 J]
Hamaker constant of the tip-sample Van-der-Waals interaction.
R : float [nm]
Radius of the tip, which is assumed to be spherical.
z0 : float [nm]
Distance at which contact is established.
E : float [GPa]
Effective Young's modulus between the tip and the sample.
"""
def __init__(self, H, R, z0, Et, Es, vt, vs):
"""
Parameters
----------
H : float [1e-18 J]
Hamaker constant of the tip-sample Van-der-Waals interaction.
R : float [nm]
Radius of the tip, which is assumed to be spherical.
z0 : float [nm]
Distance at which contact is established.
Et : float [GPa]
Young's modulus of the tip.
Es : float [GPa]
Young's modulus of the sample.
vt : float [dimensionless]
Poisson ratio of the tip.
vs : float [dimensionless]
Poisson ratio of the sample.
"""
self.H = H
self.R = R
self.z0 = z0
self.E = 1/((1-vt**2)/Et + (1-vs**2)/Es)
self._F = self._get_F()
def _get_F(self):
z0 = self.z0
H = self.H
R = self.R
E = self.E
@vectorize([float64(float64, float64)])
def _F(z, zdot = None):
if z > z0:
return -H*R/(6*z**2)
else:
return (4/3)*E*np.sqrt(R)*(z0 - z)**1.5 - H*R/(6*z0**2)
return _F
class Capillary(TipSampleInteraction):
"""
Models the capillary force due to the formation of a water nano-meniscus between the tip and the sample.
The derivations are found in <NAME>, <NAME>, and <NAME>, Phys. Rev. B, 66, 155436 (2002).
"""
def __init__(self, H, R, z0, Et, Es, vt, vs, h, gamma_lv, app):
"""
Parameters
----------
H : float [1e-18 J]
Hamaker constant of the tip-sample Van-der-Waals interaction.
R : float [nm]
Radius of the tip, which is assumed to be spherical.
z0 : float [nm]
Distance at which contact is established.
Et : float [GPa]
Young's modulus of the tip.
Es : float [GPa]
Young's modulus of the sample.
vt : float [dimensionless]
Poisson ratio of the tip.
vs : float [dimensionless]
Poisson ratio of the sample.
h : float [nm]
Thickness of the hydration layer. Note that for the model to hold, h > z0 should be satisfied.
gamma_lv : float [J/m^2]
Surface tension (or liquid-vapor surface energy) of the liquid forming the capillary bridge.
app : bool
True if the tip is approaching the surface, and False if retracting.
"""
self.H = H
self.R = R
self.z0 = z0
self.h = h
self.gamma_lv = gamma_lv
self.app = app
self.E = 1/((1-vt**2)/Et + (1-vs**2)/Es)
def _z_off(self):
gamma_sv = self.H/(24*np.pi*self.z0**2)
r = (3*np.pi*gamma_sv*self.R**2/self.E)**(1/3)
V = 4*np.pi*self.R*self.h + (4/3)*np.pi*self.h**3 + 2*np.pi*r**2*self.h
z_off = V**(1/3) - V**(2/3)/(5*self.R)
return z_off
def _get_F(self):
R = self.R
h = self.h
gamma_lv = self.gamma_lv
app = self.app
z_on = 2*self.h
z_off = self._z_off()
@vectorize([float64(float64, float64)])
def _F(z, zdot = None):
if app:
return -4*np.pi*gamma_lv*R/(1 + z/h) if z<z_on else 0
else:
return -4*np.pi*gamma_lv*R/(1 + z/h) if z<z_off else 0
return _F
``` |
{
"source": "Jhko725/ProteinStructureReconstruction.jl",
"score": 3
} |
#### File: Jhko725/ProteinStructureReconstruction.jl/visualizeSIM.py
```python
from typing import Optional
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.axis import Axis
from matplotlib.patches import Rectangle
from superresolution import SIM_3D_Data
def make_axis_if_none(ax: Optional[Axis]) -> Axis:
if ax is None:
_, ax = plt.subplots(1, 1, figsize = (10, 10))
return ax
def plot_overlay(SIM_image: SIM_3D_Data, plt_axis: Optional[Axis] = None, projection_dim: int = 0, **imshow_kwargs) -> Axis:
ax = make_axis_if_none(plt_axis)
overlay = np.mean(SIM_image.data, axis = projection_dim)
ax.imshow(overlay, **imshow_kwargs)
ax.set_xlabel('x (pixels)')
ax.set_ylabel('y (pixels)')
fig = ax.get_figure()
fig.tight_layout()
return ax
def plot_selection_box(x_range, y_range, ax, **rectangle_kwargs):
origin = (x_range[0], y_range[0])
widths = (x_range[1]-x_range[0], y_range[1]-y_range[0])
rect = Rectangle(origin, *widths, **rectangle_kwargs)
ax.add_patch(rect)
return ax
# TODO: add support for plotting slices along x and y axes as well.
# Will need to use transpose to swap that dimension with zero and proceed with the rest of the logic
def plot_slices(SIM_image: SIM_3D_Data, ncols: int, nrows: int, slice_dim: int = 0, **imshow_kwargs):
fig, axes = plt.subplots(nrows, ncols, figsize = (10, 10))
num_slices = SIM_image.shape[slice_dim]
plot_inds = np.linspace(0, num_slices, ncols*nrows, endpoint = False)
plot_inds = np.int_(np.floor(plot_inds))
for i, ax in zip(plot_inds, axes.flat):
ax.imshow(SIM_image.data[i], **imshow_kwargs)
ax.set_title(f'Slice #{i}/{num_slices}')
return fig, ax
``` |
{
"source": "jhkuang11/UniTrade",
"score": 2
} |
#### File: databases/extensions/__init__.py
```python
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as databases
from flask import render_template, request, jsonify
from flask_babel import gettext
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, \
make_response as ajax_response, internal_server_error, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
# As unicode type is not available in python3
# If we check a variable is "isinstance(variable, str)
# it breaks in python 3 as variable type is not string its unicode.
# We assign basestring as str type if it is python3, unicode
# if it is python2.
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str, bytes)
else:
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
class ExtensionModule(CollectionNodeModule):
"""
class ExtensionModule(Object):
A collection Node which inherits CollectionNodeModule
class and define methods to get child nodes, to load its own
javascript file.
"""
NODE_TYPE = "extension"
COLLECTION_LABEL = gettext("Extensions")
def __init__(self, *args, **kwargs):
"""
Initialising the base class
"""
super(ExtensionModule, self).__init__(*args, **kwargs)
self.min_gpdbver = 1000000000
def get_nodes(self, gid, sid, did):
"""
Generate the collection node
"""
yield self.generate_browser_collection_node(did)
@property
def node_inode(self):
"""
If a node have child return True otherwise False
"""
return False
@property
def script_load(self):
"""
Load the module script for extension, when any of the database nodes are
initialized.
"""
return databases.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
# Create blueprint of extension module
blueprint = ExtensionModule(__name__)
class ExtensionView(PGChildNodeView):
"""
This is a class for extension nodes which inherits the
properties and methods from NodeView class and define
various methods to list, create, update and delete extension.
Variables:
---------
* node_type - tells which type of node it is
* parent_ids - id with its type and name of parent nodes
* ids - id with type and name of extension module being used.
* operations - function routes mappings defined.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'}
]
ids = [
{'type': 'int', 'id': 'eid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'avails': [{}, {'get': 'avails'}],
'schemas': [{}, {'get': 'schemas'}],
'children': [{'get': 'children'}]
})
def check_precondition(f):
"""
This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
"""
@wraps(f)
def wrap(*args, **kwargs):
# Here args[0] will hold self & kwargs will hold gid,sid,did
self = args[0]
self.manager = get_driver(
PG_DEFAULT_DRIVER
).connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.template_path = 'extensions/sql'
return f(*args, **kwargs)
return wrap
@check_precondition
def list(self, gid, sid, did):
"""
Fetches all extensions properties and render into properties tab
"""
SQL = render_template("/".join([self.template_path, 'properties.sql']))
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@check_precondition
def nodes(self, gid, sid, did):
"""
Lists all extensions under the Extensions Collection node
"""
res = []
SQL = render_template("/".join([self.template_path, 'properties.sql']))
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
res.append(
self.blueprint.generate_browser_node(
row['eid'],
did,
row['name'],
'icon-extension'
))
return make_json_response(
data=res,
status=200
)
@check_precondition
def node(self, gid, sid, did, eid):
"""
This function will fetch the properties of extension
"""
SQL = render_template("/".join([self.template_path, 'properties.sql']),
eid=eid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
return make_json_response(
data=self.blueprint.generate_browser_node(
row['eid'],
did,
row['name'],
'icon-extension'
),
status=200
)
return gone(gettext("Could not find the specified extension."))
@check_precondition
def properties(self, gid, sid, did, eid):
"""
Fetch the properties of a single extension and render in properties tab
"""
SQL = render_template("/".join(
[self.template_path, 'properties.sql']), eid=eid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
gettext("Could not find the extension information.")
)
return ajax_response(
response=res['rows'][0],
status=200
)
@check_precondition
def create(self, gid, sid, did):
"""
Create a new extension object
"""
required_args = [
'name'
]
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for arg in required_args:
if arg not in data:
return make_json_response(
status=410,
success=0,
errormsg=gettext(
"Could not find the required parameter (%s)." % arg
)
)
status, res = self.conn.execute_dict(
render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
)
if not status:
return internal_server_error(errormsg=res)
status, rset = self.conn.execute_dict(
render_template(
"/".join([self.template_path, 'properties.sql']),
ename=data['name']
)
)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
return jsonify(
node=self.blueprint.generate_browser_node(
row['eid'],
did,
row['name'],
'icon-extension'
)
)
@check_precondition
def update(self, gid, sid, did, eid):
"""
This function will update an extension object
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
SQL, name = self.getSQL(gid, sid, data, did, eid)
# Most probably this is due to error
if not isinstance(SQL, (str, unicode)):
return SQL
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return jsonify(
node=self.blueprint.generate_browser_node(
eid,
did,
name,
icon="icon-%s" % self.node_type
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def delete(self, gid, sid, did, eid):
"""
This function will drop/drop cascade a extension object
"""
cascade = True if self.cmd == 'delete' else False
try:
# check if extension with eid exists
SQL = render_template("/".join(
[self.template_path, 'delete.sql']), eid=eid)
status, name = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=name)
if name is None:
return make_json_response(
status=410,
success=0,
errormsg=gettext(
'Error: Object not found.'
),
info=gettext(
'The specified extension could not be found.\n'
)
)
# drop extension
SQL = render_template("/".join(
[self.template_path, 'delete.sql']
), name=name, cascade=cascade)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=gettext("Extension dropped"),
data={
'id': did,
'sid': sid,
'gid': gid,
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def msql(self, gid, sid, did, eid=None):
"""
This function returns modified SQL
"""
data = request.args.copy()
try:
SQL, name = self.getSQL(gid, sid, data, did, eid)
# Most probably this is due to error
if not isinstance(SQL, (str, unicode)):
return SQL
SQL = SQL.strip('\n').strip(' ')
if SQL == '':
SQL = "--modified SQL"
return make_json_response(
data=SQL,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def getSQL(self, gid, sid, data, did, eid=None):
"""
This function will generate sql from model data
"""
required_args = [
'name'
]
if eid is not None:
SQL = render_template("/".join(
[self.template_path, 'properties.sql']
), eid=eid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
gettext("Could not find the extension information.")
)
old_data = res['rows'][0]
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template("/".join(
[self.template_path, 'update.sql']
), data=data, o_data=old_data)
return SQL, data['name'] if 'name' in data else old_data['name']
else:
SQL = render_template("/".join(
[self.template_path, 'create.sql']
), data=data)
return SQL, data['name']
@check_precondition
def avails(self, gid, sid, did):
"""
This function with fetch all the available extensions
"""
SQL = render_template("/".join([self.template_path, 'extensions.sql']))
status, rset = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=rset)
return make_json_response(
data=rset['rows'],
status=200
)
@check_precondition
def schemas(self, gid, sid, did):
"""
This function with fetch all the schemas
"""
SQL = render_template("/".join([self.template_path, 'schemas.sql']))
status, rset = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=rset)
return make_json_response(
data=rset['rows'],
status=200
)
@check_precondition
def sql(self, gid, sid, did, eid):
"""
This function will generate sql for the sql panel
"""
SQL = render_template("/".join(
[self.template_path, 'properties.sql']
), eid=eid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the extension on the server.")
)
result = res['rows'][0]
SQL = render_template("/".join(
[self.template_path, 'create.sql']
),
data=result,
conn=self.conn,
display_comments=True
)
return ajax_response(response=SQL)
@check_precondition
def dependents(self, gid, sid, did, eid):
"""
This function gets the dependents and returns an ajax response
for the extension node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
eid: Extension ID
"""
dependents_result = self.get_dependents(self.conn, eid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, eid):
"""
This function gets the dependencies and returns an ajax response
for the extension node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
lid: Extension ID
"""
dependencies_result = self.get_dependencies(self.conn, eid)
return ajax_response(
response=dependencies_result,
status=200
)
# Register and add ExtensionView as blueprint
ExtensionView.register_node_view(blueprint)
```
#### File: index_constraint/tests/test_index_constraint_get.py
```python
import uuid
from pgadmin.browser.server_groups.servers.databases.schemas.tables.tests \
import utils as tables_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as index_constraint_utils
class IndexConstraintGetTestCase(BaseTestGenerator):
"""This class will fetch the index constraint(primary key or unique key) of
table column"""
primary_key_name = "test_primarykey_delete_%s" % \
(str(uuid.uuid4())[1:6])
unique_key_name = "test_uniquekey_delete_%s" % \
(str(uuid.uuid4())[1:6])
scenarios = [
('Fetch primary Key constraint of table',
dict(url='/browser/primary_key/obj/', name=primary_key_name,
type="PRIMARY KEY")),
('Fetch unique Key constraint of table',
dict(url='/browser/unique_constraint/obj/', name=unique_key_name,
type="UNIQUE"))
]
@classmethod
def setUpClass(cls):
cls.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
cls.server_id = schema_info["server_id"]
cls.db_id = schema_info["db_id"]
db_con = database_utils.connect_database(cls, utils.SERVER_GROUP,
cls.server_id, cls.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database to add a "
"index constraint(primary key or unique key).")
cls.schema_id = schema_info["schema_id"]
cls.schema_name = schema_info["schema_name"]
schema_response = schema_utils.verify_schemas(cls.server,
cls.db_name,
cls.schema_name)
if not schema_response:
raise Exception("Could not find the schema to add a index "
"constraint(primary key or unique key).")
cls.table_name = "table_indexconstraint_%s" % \
(str(uuid.uuid4())[1:6])
cls.table_id = tables_utils.create_table(cls.server,
cls.db_name,
cls.schema_name,
cls.table_name)
def runTest(self):
"""This function will fetch the index constraint(primary key or
unique key) of table column."""
index_constraint_id = \
index_constraint_utils.create_index_constraint(
self.server, self.db_name, self.schema_name, self.table_name,
self.name, self.type)
response = self.tester.get(
"{0}{1}/{2}/{3}/{4}/{5}/{6}".format(self.url, utils.SERVER_GROUP,
self.server_id, self.db_id,
self.schema_id,
self.table_id,
index_constraint_id),
follow_redirects=True
)
self.assertEquals(response.status_code, 200)
@classmethod
def tearDownClass(cls):
# Disconnect the database
database_utils.disconnect_database(cls, cls.server_id, cls.db_id)
```
#### File: tables/triggers/__init__.py
```python
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, request, jsonify
from flask_babel import gettext
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
from pgadmin.utils import IS_PY2
# If we are in Python3
if not IS_PY2:
unicode = str
class TriggerModule(CollectionNodeModule):
"""
class TriggerModule(CollectionNodeModule)
A module class for Trigger node derived from CollectionNodeModule.
Methods:
-------
* __init__(*args, **kwargs)
- Method is used to initialize the Trigger and it's base module.
* get_nodes(gid, sid, did, scid, tid)
- Method is used to generate the browser collection node.
* node_inode()
- Method is overridden from its base class to make the node as leaf node.
* script_load()
- Load the module script for trigger, when any of the server node is
initialized.
"""
NODE_TYPE = 'trigger'
COLLECTION_LABEL = gettext("Triggers")
def __init__(self, *args, **kwargs):
"""
Method is used to initialize the TriggerModule and it's base module.
Args:
*args:
**kwargs:
"""
self.min_ver = None
self.max_ver = None
self.min_gpdbver = 1000000000
super(TriggerModule, self).__init__(*args, **kwargs)
def BackendSupported(self, manager, **kwargs):
"""
Load this module if vid is view, we will not load it under
material view
"""
if manager.server_type == 'gpdb':
return False
if super(TriggerModule, self).BackendSupported(manager, **kwargs):
conn = manager.connection(did=kwargs['did'])
if 'vid' not in kwargs:
return True
template_path = 'trigger/sql/#{0}#'.format(manager.version)
SQL = render_template("/".join(
[template_path, 'backend_support.sql']), vid=kwargs['vid']
)
status, res = conn.execute_scalar(SQL)
# check if any errors
if not status:
return internal_server_error(errormsg=res)
# Check vid is view not material view
# then true, othewise false
return res
def get_nodes(self, gid, sid, did, scid, **kwargs):
"""
Generate the collection node
"""
assert ('tid' in kwargs or 'vid' in kwargs)
yield self.generate_browser_collection_node(
kwargs['tid'] if 'tid' in kwargs else kwargs['vid']
)
@property
def script_load(self):
"""
Load the module script for server, when any of the server-group node is
initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def node_inode(self):
"""
Load the module node as a leaf node
"""
return False
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
snippets = [
render_template(
"trigger/css/trigger.css",
node_type=self.node_type
)
]
for submodule in self.submodules:
snippets.extend(submodule.csssnippets)
return snippets
blueprint = TriggerModule(__name__)
class TriggerView(PGChildNodeView):
"""
This class is responsible for generating routes for Trigger node
Methods:
-------
* __init__(**kwargs)
- Method is used to initialize the TriggerView and it's base view.
* module_js()
- This property defines (if javascript) exists for this node.
Override this property for your own logic
* check_precondition()
- This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
* list()
- This function is used to list all the Trigger nodes within that
collection.
* nodes()
- This function will used to create all the child node within that
collection, Here it will create all the Trigger node.
* node()
- This function will used to create child node within that
collection, Here it will create specific the Trigger node.
* properties(gid, sid, did, scid, tid, trid)
- This function will show the properties of the selected Trigger node
* create(gid, sid, did, scid, tid)
- This function will create the new Trigger object
* update(gid, sid, did, scid, tid, trid)
- This function will update the data for the selected Trigger node
* delete(self, gid, sid, scid, tid, trid):
- This function will drop the Trigger object
* enable(self, gid, sid, scid, tid, trid):
- This function will enable/disable Trigger object
* msql(gid, sid, did, scid, tid, trid)
- This function is used to return modified SQL for the selected
Trigger node
* get_sql(data, scid, tid, trid)
- This function will generate sql from model data
* sql(gid, sid, did, scid, tid, trid):
- This function will generate sql to show it in sql pane for the
selected Trigger node.
* dependency(gid, sid, did, scid, tid, trid):
- This function will generate dependency list show it in dependency
pane for the selected Trigger node.
* dependent(gid, sid, did, scid, tid, trid):
- This function will generate dependent list to show it in dependent
pane for the selected Trigger node.
* get_trigger_functions(gid, sid, did, scid, tid, trid):
- This function will return list of trigger functions available
via AJAX response
* _column_details(tid, clist)::
- This function will fetch the columns for trigger
* _trigger_definition(data):
- This function will set additional trigger definitions in
AJAX response
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'trid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'get_triggerfunctions': [{'get': 'get_trigger_functions'},
{'get': 'get_trigger_functions'}],
'enable': [{'put': 'enable_disable_trigger'}]
})
def check_precondition(f):
"""
This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
"""
@wraps(f)
def wrap(*args, **kwargs):
# Here args[0] will hold self & kwargs will hold gid,sid,did
self = args[0]
self.manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(
kwargs['sid']
)
self.conn = self.manager.connection(did=kwargs['did'])
# We need datlastsysoid to check if current trigger is system trigger
self.datlastsysoid = self.manager.db_info[
kwargs['did']
]['datlastsysoid'] if self.manager.db_info is not None and \
kwargs['did'] in self.manager.db_info else 0
# we will set template path for sql scripts
self.template_path = 'trigger/sql/#{0}#'.format(self.manager.version)
# Store server type
self.server_type = self.manager.server_type
# We need parent's name eg table name and schema name
# when we create new trigger in update we can fetch it using
# property sql
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
self.schema = row['schema']
self.table = row['table']
# Here we are storing trigger definition
# We will use it to check trigger type definition
self.trigger_definition = {
'TRIGGER_TYPE_ROW': (1 << 0),
'TRIGGER_TYPE_BEFORE': (1 << 1),
'TRIGGER_TYPE_INSERT': (1 << 2),
'TRIGGER_TYPE_DELETE': (1 << 3),
'TRIGGER_TYPE_UPDATE': (1 << 4),
'TRIGGER_TYPE_TRUNCATE': (1 << 5),
'TRIGGER_TYPE_INSTEAD': (1 << 6)
}
return f(*args, **kwargs)
return wrap
@check_precondition
def get_trigger_functions(self, gid, sid, did, scid, tid, trid=None):
"""
This function will return list of trigger functions available
via AJAX response
"""
res = [{'label': '', 'value': ''}]
# TODO: REMOVE True Condition , it's just for testing
# If server type is EDB-PPAS then we also need to add
# inline edb-spl along with options fetched by below sql
if self.server_type == 'ppas':
res.append({
'label': 'Inline EDB-SPL',
'value': 'Inline EDB-SPL'
})
try:
SQL = render_template("/".join([self.template_path,
'get_triggerfunctions.sql']),
show_system_objects=self.blueprint.show_system_objects
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
for row in rset['rows']:
res.append(
{'label': row['tfunctions'],
'value': row['tfunctions']}
)
return make_json_response(
data=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def list(self, gid, sid, did, scid, tid):
"""
This function is used to list all the trigger nodes within that collection.
Args:
gid: Server group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
Returns:
JSON of available trigger nodes
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']), tid=tid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@check_precondition
def node(self, gid, sid, did, scid, tid, trid):
"""
This function will used to create the child node within that collection.
Here it will create specific the trigger node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
trid: Trigger ID
Returns:
JSON of available trigger child nodes
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid,
trid=trid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
if len(rset['rows']) == 0:
return gone(gettext("""Could not find the trigger in the table."""))
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon="icon-trigger" if rset['rows'][0]['is_enable_trigger']
else "icon-trigger-bad"
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
This function will used to create all the child node within that collection.
Here it will create all the trigger node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
Returns:
JSON of available trigger child nodes
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']), tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon="icon-trigger" if row['is_enable_trigger']
else "icon-trigger-bad"
))
return make_json_response(
data=res,
status=200
)
def _column_details(self, tid, clist):
"""
This functional will fetch list of column for trigger
Args:
tid: Table OID
clist: List of columns
Returns:
Updated properties data with column
"""
SQL = render_template("/".join([self.template_path,
'get_columns.sql']),
tid=tid, clist=clist)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
# 'tgattr' contains list of columns from table used in trigger
columns = []
for row in rset['rows']:
columns.append(row['name'])
return columns
def _trigger_definition(self, data):
"""
This functional will set the trigger definition
Args:
data: Properties data
Returns:
Updated properties data with trigger definition
"""
# Fires event definition
if data['tgtype'] & self.trigger_definition['TRIGGER_TYPE_BEFORE']:
data['fires'] = 'BEFORE'
elif data['tgtype'] & self.trigger_definition['TRIGGER_TYPE_INSTEAD']:
data['fires'] = 'INSTEAD OF'
else:
data['fires'] = 'AFTER'
# Trigger of type definition
if data['tgtype'] & self.trigger_definition['TRIGGER_TYPE_ROW']:
data['is_row_trigger'] = True
else:
data['is_row_trigger'] = False
# Event definition
if data['tgtype'] & self.trigger_definition['TRIGGER_TYPE_INSERT']:
data['evnt_insert'] = True
else:
data['evnt_insert'] = False
if data['tgtype'] & self.trigger_definition['TRIGGER_TYPE_DELETE']:
data['evnt_delete'] = True
else:
data['evnt_delete'] = False
if data['tgtype'] & self.trigger_definition['TRIGGER_TYPE_UPDATE']:
data['evnt_update'] = True
else:
data['evnt_update'] = False
if data['tgtype'] & self.trigger_definition['TRIGGER_TYPE_TRUNCATE']:
data['evnt_truncate'] = True
else:
data['evnt_truncate'] = False
return data
def _format_args(self, args):
"""
This function will format arguments.
Args:
args: Arguments
Returns:
Formated arguments for function
"""
formatted_args = ["'{0}'".format(arg) for arg in args]
return ', '.join(formatted_args)
@check_precondition
def properties(self, gid, sid, did, scid, tid, trid):
"""
This function will show the properties of the selected trigger node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
scid: Schema ID
tid: Table ID
trid: Trigger ID
Returns:
JSON of selected trigger node
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, trid=trid,
datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""Could not find the trigger in the table."""))
# Making copy of output for future use
data = dict(res['rows'][0])
data = self.get_trigger_function_schema(data)
if len(data['custom_tgargs']) > 1:
# We know that trigger has more than 1 argument, let's join them
# and convert it to string
data['tgargs'] = self._format_args(data['custom_tgargs'])
if len(data['tgattr']) >= 1:
columns = ', '.join(data['tgattr'].split(' '))
data['columns'] = self._column_details(tid, columns)
data = self._trigger_definition(data)
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid):
"""
This function will creates new the trigger object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
required_args = {
'name': 'Name',
'tfunction': 'Trigger function'
}
for arg in required_args:
if arg not in data:
return make_json_response(
status=410,
success=0,
errormsg=gettext("Could not find the required parameter (%s)." % \
required_args[arg])
)
# Adding parent into data dict, will be using it while creating sql
data['schema'] = self.schema
data['table'] = self.table
try:
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data, conn=self.conn)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
# we need oid to to add object in tree at browser
SQL = render_template("/".join([self.template_path,
'get_oid.sql']),
tid=tid, data=data)
status, trid = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=tid)
return jsonify(
node=self.blueprint.generate_browser_node(
trid,
tid,
data['name'],
icon="icon-trigger"
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def delete(self, gid, sid, did, scid, tid, trid):
"""
This function will updates existing the trigger object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
trid: Trigger ID
"""
# Below will decide if it's simple drop or drop with cascade call
if self.cmd == 'delete':
# This is a cascade operation
cascade = True
else:
cascade = False
try:
# We will first fetch the trigger name for current request
# so that we create template for dropping trigger
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, trid=trid,
datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=gettext(
'Error: Object not found.'
),
info=gettext(
'The specified trigger could not be found.\n'
)
)
data = dict(res['rows'][0])
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data, conn=self.conn, cascade=cascade)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=gettext("Trigger is dropped"),
data={
'id': trid,
'tid': tid
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, trid):
"""
This function will updates existing the trigger object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
trid: Trigger ID
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(scid, tid, trid, data)
if not isinstance(SQL, (str, unicode)):
return SQL
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
# We need oid to add object in browser tree and if user
# update the trigger then new OID is getting generated
# so we need to return new OID of trigger.
SQL = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid, data=data)
status, new_trid = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=new_trid)
# Fetch updated properties
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, trid=new_trid,
datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
gettext("""Could not find the trigger in the table."""))
# Making copy of output for future use
data = dict(res['rows'][0])
return jsonify(
node=self.blueprint.generate_browser_node(
new_trid,
tid,
name,
icon="icon-%s" % self.node_type if
data['is_enable_trigger'] else
"icon-%s-bad" % self.node_type
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def msql(self, gid, sid, did, scid, tid, trid=None):
"""
This function will generates modified sql for trigger object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
trid: Trigger ID (When working with existing trigger)
"""
data = dict()
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
# Adding parent into data dict, will be using it while creating sql
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(scid, tid, trid, data)
if not isinstance(sql, (str, unicode)):
return sql
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_trigger_function_schema(self, data):
"""
This function will return trigger function with schema name
"""
# If language is 'edbspl' then trigger function should be 'Inline EDB-SPL'
# else we will find the trigger function with schema name.
if data['lanname'] == 'edbspl':
data['tfunction'] = 'Inline EDB-SPL'
else:
SQL = render_template("/".join([self.template_path,
'get_triggerfunctions.sql']),
tgfoid=data['tgfoid'],
show_system_objects=self.blueprint.show_system_objects)
status, result = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
# Update the trigger function which we have fetched with schema name
if 'rows' in result and len(result['rows']) > 0 and \
'tfunctions' in result['rows'][0]:
data['tfunction'] = result['rows'][0]['tfunctions']
return data
def get_sql(self, scid, tid, trid, data):
"""
This function will genrate sql from model data
"""
if trid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, trid=trid,
datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
gettext("""Could not find the trigger in the table.""")
)
old_data = dict(res['rows'][0])
# If name is not present in data then
# we will fetch it from old data, we also need schema & table name
if 'name' not in data:
data['name'] = old_data['name']
self.trigger_name = data['name']
self.lanname = old_data['lanname']
self.is_trigger_enabled = old_data['is_enable_trigger']
old_data = self.get_trigger_function_schema(old_data)
if len(old_data['custom_tgargs']) > 1:
# We know that trigger has more than 1 argument, let's join them
old_data['tgargs'] = self._format_args(old_data['custom_tgargs'])
if len(old_data['tgattr']) > 1:
columns = ', '.join(old_data['tgattr'].split(' '))
old_data['columns'] = self._column_details(tid, columns)
old_data = self._trigger_definition(old_data)
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = {
'name': 'Name',
'tfunction': 'Trigger function'
}
for arg in required_args:
if arg not in data:
return gettext('-- definition incomplete')
# If the request for new object which do not have did
SQL = render_template("/".join([self.template_path, 'create.sql']),
data=data, conn=self.conn)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def sql(self, gid, sid, did, scid, tid, trid):
"""
This function will generates reverse engineered sql for trigger object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
trid: Trigger ID
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, trid=trid,
datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""Could not find the trigger in the table."""))
data = dict(res['rows'][0])
# Adding parent into data dict, will be using it while creating sql
data['schema'] = self.schema
data['table'] = self.table
data = self.get_trigger_function_schema(data)
if len(data['custom_tgargs']) > 1:
# We know that trigger has more than 1 argument, let's join them
data['tgargs'] = self._format_args(data['custom_tgargs'])
if len(data['tgattr']) >= 1:
columns = ', '.join(data['tgattr'].split(' '))
data['columns'] = self._column_details(tid, columns)
data = self._trigger_definition(data)
SQL, name = self.get_sql(scid, tid, None, data)
sql_header = u"-- Trigger: {0}\n\n-- ".format(data['name'])
sql_header += render_template("/".join([self.template_path,
'delete.sql']),
data=data, conn=self.conn)
SQL = sql_header + '\n\n' + SQL.strip('\n')
# If trigger is disbaled then add sql code for the same
if not data['is_enable_trigger']:
SQL += '\n\n'
SQL += render_template("/".join([self.template_path,
'enable_disable_trigger.sql']),
data=data, conn=self.conn)
return ajax_response(response=SQL)
@check_precondition
def enable_disable_trigger(self, gid, sid, did, scid, tid, trid):
"""
This function will enable OR disable the current trigger object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
trid: Trigger ID
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
# Convert str 'true' to boolean type
is_enable_flag = json.loads(data['enable'])
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, trid=trid,
datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
gettext("""Could not find the trigger in the table.""")
)
o_data = dict(res['rows'][0])
# If enable is set to true means we need SQL to enable
# current trigger which is disabled already so we need to
# alter the 'is_enable_trigger' flag so that we can render
# correct SQL for operation
o_data['is_enable_trigger'] = is_enable_flag
# Adding parent into data dict, will be using it while creating sql
o_data['schema'] = self.schema
o_data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'enable_disable_trigger.sql']),
data=o_data, conn=self.conn)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info="Trigger updated",
data={
'id': trid,
'tid': tid,
'scid': scid
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def dependents(self, gid, sid, did, scid, tid, trid):
"""
This function get the dependents and return ajax response
for the trigger node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
trid: Trigger ID
"""
dependents_result = self.get_dependents(
self.conn, trid
)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, trid):
"""
This function get the dependencies and return ajax response
for the trigger node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
trid: Trigger ID
"""
dependencies_result = self.get_dependencies(
self.conn, trid
)
return ajax_response(
response=dependencies_result,
status=200
)
TriggerView.register_node_view(blueprint)
```
#### File: schemas/tests/test_schema_add.py
```python
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
class SchemaAddTestCase(BaseTestGenerator):
""" This class will add new schema under database node. """
scenarios = [
# Fetching default URL for schema node.
('Check Schema Node URL', dict(url='/browser/schema/obj/'))
]
def runTest(self):
""" This function will add schema under database node. """
database_info = parent_node_dict["database"][-1]
server_id = database_info["server_id"]
db_id = database_info["db_id"]
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
server_id,
db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database to add the schema.")
db_user = self.server["username"]
data = {
"deffuncacl": [],
"defseqacl": [],
"deftblacl": [],
"deftypeacl": [],
"name": "test_schema_{0}".format(str(uuid.uuid4())[1:6]),
"namespaceowner": db_user,
"nspacl": [
{
"grantee": db_user,
"grantor": db_user,
"privileges":
[
{
"privilege_type": "C",
"privilege": True,
"with_grant": False
},
{
"privilege_type": "U",
"privilege": True,
"with_grant": False
}
]
}
],
"seclabels": []
}
response = self.tester.post(self.url + str(utils.SERVER_GROUP) + '/' +
str(server_id) + '/' + str(db_id) +
'/', data=json.dumps(data),
content_type='html/json')
self.assertEquals(response.status_code, 200)
```
#### File: roles/tests/test_role_add.py
```python
import json
from pgadmin.browser.server_groups.servers.tests import utils as server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as roles_utils
class LoginRoleAddTestCase(BaseTestGenerator):
"""This class has add role scenario"""
scenarios = [
# Fetching default URL for roles node.
('Check Role Node', dict(url='/browser/role/obj/'))
]
def setUp(self):
pass
def runTest(self):
"""This function test the add role scenario"""
server_id = parent_node_dict["server"][-1]["server_id"]
server_response = server_utils.connect_server(self, server_id)
if not server_response['data']['connected']:
raise Exception("Server not found to add the role.")
data = roles_utils.get_role_data(self.server['db_password'])
self.role_name = data['rolname']
response = self.tester.post(self.url + str(utils.SERVER_GROUP) + '/'
+ str(server_id) + '/',
data=json.dumps(data),
content_type='html/json')
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.data.decode('utf-8'))
role_id = response_data['node']['_id']
role_dict = {"server_id": server_id, "role_id": role_id,
"role_name": self.role_name}
utils.write_node_info("lrid", role_dict)
def tearDown(self):
"""This function delete the role from added server"""
connection = utils.get_db_connection(self.server['db'],
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'],
self.server['sslmode'])
roles_utils.delete_role(connection, self.role_name)
```
#### File: server_groups/tests/test_sg_get.py
```python
import json
from pgadmin.utils.route import BaseTestGenerator
from regression.test_setup import config_data
class SgNodeTestCase(BaseTestGenerator):
"""
This class will check available server groups in pgAdmin.
"""
scenarios = [
# Fetching the default url for server group node
('Check Server Group Node', dict(url='/browser/server_group/obj/'))
]
def runTest(self):
"""This function will check available server groups."""
server_group_id = config_data['server_group']
response = self.tester.get(self.url + str(server_group_id),
content_type='html/json')
self.assertTrue(response.status_code, 200)
response_data = json.loads(response.data.decode('utf8'))
self.assertTrue(response_data['id'], server_group_id)
```
#### File: browser/tests/test_version_in_range.py
```python
import json
import uuid
from pgadmin.utils.route import BaseTestGenerator
from regression.python_test_utils import test_utils
from regression.test_setup import config_data
from pgadmin.browser.utils import is_version_in_range
class VersionInRangeTestCase(BaseTestGenerator):
"""
This class validates the version in range functionality
by defining different version scenarios; where dict of
parameters describes the scenario appended by test name.
"""
scenarios = [
('TestCase for Validating pgversion 8.23 and min_version is 91000, should not show', dict(
sversion=82300,
min_version=90100,
max_version=1000000000,
scenario=2
)),
('TestCase for Validating pgversion 9.2 and should show by default', dict(
sversion=90200,
min_version=0,
max_version=1000000000,
scenario=1
)),
('TestCase for Validating pgversion 9.2 and min/max are None, should show by default', dict(
sversion=90200,
min_version=None,
max_version=None,
scenario=1
)),
('TestCase for Validating pgversion 9.6 and max is lower, should not show', dict(
sversion=90600,
min_version=None,
max_version=90400,
scenario=2
))
]
@classmethod
def setUpClass(cls):
pass
def runTest(self):
"""This function will check version in range functionality."""
if self.scenario == 1:
self.test_result_is_true()
if self.scenario == 2:
self.test_result_is_false()
def test_result_is_true(self):
self.assertTrue(is_version_in_range(self.sversion, self.min_version, self.max_version))
def test_result_is_false(self):
self.assertFalse(is_version_in_range(self.sversion, self.min_version, self.max_version))
```
#### File: pgadmin/feature_tests/pg_datatype_validation_test.py
```python
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from regression.python_test_utils import test_utils
from regression.feature_utils.base_feature_test import BaseFeatureTest
class PGDataypeFeatureTest(BaseFeatureTest):
"""
This feature test will test the different Postgres
data-type output.
"""
scenarios = [
("Test checks for PG data-types output", dict())
]
def before(self):
connection = test_utils.get_db_connection(self.server['db'],
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'],
self.server['sslmode'])
test_utils.drop_database(connection, "acceptance_test_db")
test_utils.create_database(self.server, "acceptance_test_db")
def runTest(self):
self.page.wait_for_spinner_to_disappear()
self.page.add_server(self.server)
self._schema_node_expandable()
# Check data types
self._check_datatype()
self.page.close_query_tool()
def after(self):
self.page.remove_server(self.server)
connection = test_utils.get_db_connection(self.server['db'],
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'],
self.server['sslmode'])
test_utils.drop_database(connection, "acceptance_test_db")
def _schema_node_expandable(self):
self.page.toggle_open_tree_item(self.server['name'])
self.page.toggle_open_tree_item('Databases')
self.page.toggle_open_tree_item('acceptance_test_db')
def _check_datatype(self):
query = r"SELECT -32767::smallint, 32767::smallint," \
r"-2147483647::integer, 2147483647::integer," \
r"9223372036854775807::bigint, 9223372036854775807::bigint," \
r"922337203685.4775807::decimal, 92203685.477::decimal," \
r"922337203685.922337203685::numeric, " \
r"-92233720368547758.08::numeric," \
r"ARRAY[1, 2, 3]::float[], ARRAY['nan', 'nan', 'nan']::float[]," \
r"'Infinity'::real, '{Infinity}'::real[]," \
r"E'\\xDEADBEEF'::bytea, ARRAY[E'\\xDEADBEEF', E'\\xDEADBEEF']::bytea[];"
expected_output = [
'-32767', '32767', '-2147483647', '2147483647',
'9223372036854775807', '9223372036854775807',
'922337203685.4775807', '92203685.477',
'922337203685.922337203685', '-92233720368547758.08',
'{1,2,3}', '{NaN,NaN,NaN}',
'Infinity', '{Infinity}',
'binary data', 'binary data[]'
]
self.page.open_query_tool()
self.page.fill_codemirror_area_with(query)
self.page.find_by_id("btn-flash").click()
wait = WebDriverWait(self.page.driver, 5)
canvas = wait.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, "#datagrid .slick-viewport .grid-canvas"))
)
# For every sample data-type value, check the expected output.
cnt = 2
cells = canvas.find_elements_by_css_selector('.slick-cell')
# remove first element as it is row number.
cells.pop(0)
for val, cell in zip(expected_output, cells):
try:
source_code = cell.text
PGDataypeFeatureTest.check_result(
source_code,
expected_output[cnt - 2]
)
cnt += 1
except TimeoutException:
assert False, "{0} does not match with {1}".format(
val, expected_output[cnt]
)
@staticmethod
def check_result(source_code, string_to_find):
assert source_code.find(string_to_find) != -1,\
"{0} does not match with {1}".format(
source_code, string_to_find
)
```
#### File: misc/bgprocess/process_executor.py
```python
from __future__ import print_function
# To make print function compatible with python2 & python3
import sys
import os
from datetime import datetime, timedelta, tzinfo
from subprocess import Popen, PIPE
from threading import Thread
import signal
_IS_WIN = (os.name == 'nt')
_IS_PY2 = (sys.version_info[0] == 2)
_ZERO = timedelta(0)
_sys_encoding = None
_fs_encoding = None
_u = None
_out_dir = None
_log_file = None
if _IS_PY2:
def _log(msg):
with open(_log_file, 'a') as fp:
fp.write(('INFO:: %s\n' % str(msg)))
else:
def _log(msg):
with open(_log_file, 'a') as fp:
fp.write(('INFO:: %s\n' % msg.encode('ascii', 'xmlcharrefreplace')))
def _log_exception():
type_, value_, traceback_ = info=sys.exc_info()
with open(_log_file, 'ab') as fp:
from traceback import format_exception
res = ''.join(
format_exception(type_, value_, traceback_)
)
fp.write('EXCEPTION::\n{0}'.format(res))
return res
# Copied the 'UTC' class from the 'pytz' package to allow to run this script
# without any external dependent library, and can be used with any python
# version.
class UTC(tzinfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = _ZERO
_dst = _ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(UTC.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return _ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return _ZERO
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
def get_current_time(format='%Y-%m-%d %H:%M:%S.%f %z'):
return datetime.utcnow().replace(
tzinfo=UTC()
).strftime(format)
class ProcessLogger(Thread):
"""
This class definition is responsible for capturing & logging
stdout & stderr messages from subprocess
Methods:
--------
* __init__(stream_type)
- This method is use to initlize the ProcessLogger class object
* log(msg)
- Log message in the orderly manner.
* run()
- Reads the stdout/stderr for messages and sent them to logger
"""
def __init__(self, stream_type):
"""
This method is use to initialize the ProcessLogger class object
Args:
stream_type: Type of STD (std)
Returns:
None
"""
import codecs
Thread.__init__(self)
self.process = None
self.stream = None
self.logger = open(os.path.join(_out_dir, stream_type), 'wb')
def attach_process_stream(self, process, stream):
"""
This function will attach a process and its stream with this thread.
Args:
process: Process
stream: Stream attached with the process
Returns:
None
"""
self.process = process
self.stream = stream
if not _IS_PY2:
def log(self, msg):
"""
This function will update log file
Args:
msg: message
Returns:
None
"""
# Write into log file
if self.logger:
if msg:
self.logger.write(get_current_time(format='%y%m%d%H%M%S%f').encode('utf-8'))
self.logger.write(b',')
self.logger.write(msg.lstrip(b'\r\n' if _IS_WIN else b'\n'))
self.logger.write(os.linesep.encode('utf-8'))
return True
return False
else:
def log(self, msg):
"""
This function will update log file
Args:
msg: message
Returns:
None
"""
# Write into log file
if self.logger:
if msg:
self.logger.write(
b'{0},{1}{2}'.format(
get_current_time(
format='%y%m%d%H%M%S%f'
),
msg.lstrip(b'\r\n' if _IS_WIN else b'\n'), os.linesep
)
)
return True
return False
def run(self):
if self.process and self.stream:
while True:
nextline = self.stream.readline()
if nextline:
self.log(nextline)
else:
if self.process.poll() is not None:
break
def release(self):
if self.logger:
self.logger.close()
self.logger = None
def update_status(**kw):
"""
This function will updates process stats
Args:
kwargs - Process configuration details
Returns:
None
"""
import json
if _out_dir:
status = dict(
(k, v) for k, v in kw.items() if k in [
'start_time', 'end_time', 'exit_code', 'pid'
]
)
_log('Updating the status:\n{0}'.format(json.dumps(status)))
with open(os.path.join(_out_dir, 'status'), 'w') as fp:
json.dump(status, fp)
else:
raise ValueError("Please verify pid and db_file arguments.")
def execute():
"""
This function will execute the background process
Returns:
None
"""
command = sys.argv[1:]
args = dict()
_log('Initialize the process execution: {0}'.format(command))
# Create seprate thread for stdout and stderr
process_stdout = ProcessLogger('out')
process_stderr = ProcessLogger('err')
process = None
try:
# update start_time
args.update({
'start_time': get_current_time(),
'stdout': process_stdout.log,
'stderr': process_stderr.log,
'pid': os.getpid()
})
# Update start time
update_status(**args)
_log('Status updated...')
if 'PROCID' in os.environ and os.environ['PROCID'] in os.environ:
os.environ['PGPASSWORD'] = os.environ[os.environ['PROCID']]
kwargs = dict()
kwargs['close_fds'] = False
kwargs['shell'] = True if _IS_WIN else False
# We need environment variables & values in string
if _IS_PY2:
_log('Converting the environment variable in the bytes format...')
kwargs['env'] = convert_environment_variables(os.environ.copy())
else:
kwargs['env'] = os.environ.copy()
_log('Starting the command execution...')
process = Popen(
command, stdout=PIPE, stderr=PIPE, stdin=None, **kwargs
)
_log('Attaching the loggers to stdout, and stderr...')
# Attach the stream to the process logger, and start logging.
process_stdout.attach_process_stream(process, process.stdout)
process_stdout.start()
process_stderr.attach_process_stream(process, process.stderr)
process_stderr.start()
# Join both threads together
process_stdout.join()
process_stderr.join()
_log('Waiting for the process to finish...')
# Child process return code
exitCode = process.wait()
if exitCode is None:
exitCode = process.poll()
_log('Process exited with code: {0}'.format(exitCode))
args.update({'exit_code': exitCode})
# Add end_time
args.update({'end_time': get_current_time()})
# Fetch last output, and error from process if it has missed.
data = process.communicate()
if data:
if data[0]:
process_stdout.log(data[0])
if data[1]:
process_stderr.log(data[1])
# If executable not found or invalid arguments passed
except OSError:
info = _log_exception()
args.update({'exit_code': 500})
if process_stderr:
process_stderr.log(info)
else:
print("WARNING: ", e.strerror, file=sys.stderr)
args.update({'end_time': get_current_time()})
args.update({'exit_code': e.errno})
# Unknown errors
except Exception:
info = _log_exception()
args.update({'exit_code': 501})
if process_stderr:
process_stderr.log(info)
else:
print("WARNING: ", str(e), file=sys.stderr)
args.update({'end_time': get_current_time()})
args.update({'exit_code': -1})
finally:
# Update the execution end_time, and exit-code.
update_status(**args)
_log('Exiting the process executor...')
if process_stderr:
process_stderr.release()
if process_stdout:
process_stdout.release()
_log('Bye!')
# Let's ignore all the signal comming to us.
def signal_handler(signal, msg):
pass
def convert_environment_variables(env):
"""
This function is use to convert environment variable to string
because environment variable must be string in popen
:param env: Dict of environment variable
:return: Encoded environment variable as string
"""
temp_env = dict()
for key, value in env.items():
try:
if not isinstance(key, str):
key = key.encode(_sys_encoding)
if not isinstance(value, str):
value = value.encode(_sys_encoding)
temp_env[key] = value
except Exception as e:
_log_exception()
return temp_env
if __name__ == '__main__':
_sys_encoding = sys.getdefaultencoding()
if not _sys_encoding or _sys_encoding == 'ascii':
# Fall back to 'utf-8', if we couldn't determine the default encoding,
# or 'ascii'.
_sys_encoding = 'utf-8'
_fs_encoding = sys.getfilesystemencoding()
if not _fs_encoding or _fs_encoding == 'ascii':
# Fall back to 'utf-8', if we couldn't determine the file-system encoding,
# or 'ascii'.
_fs_encoding = 'utf-8'
def u(_s, _encoding=_sys_encoding):
if _IS_PY2:
if isinstance(_s, str):
return unicode(_s, _encoding)
return _s
_u = u
_out_dir = u(os.environ['OUTDIR'])
_log_file = os.path.join(_out_dir, ('log_%s' % os.getpid()))
_log('Starting the process executor...')
# Ignore any signals
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
_log('Disabled the SIGINT, SIGTERM signals...')
if _IS_WIN:
_log('Disable the SIGBREAKM signal (windows)...')
signal.signal(signal.SIGBREAK, signal_handler)
_log('Disabled the SIGBREAKM signal (windows)...')
# For windows:
# We would run the process_executor in the detached mode again to make
# the child process to run as a daemon. And, it would run without
# depending on the status of the web-server.
if 'PGA_BGP_FOREGROUND' in os.environ and \
os.environ['PGA_BGP_FOREGROUND'] == "1":
_log('[CHILD] Start process execution...')
# This is a child process running as the daemon process.
# Let's do the job assigning to it.
try:
_log('Executing the command now from the detached child...')
execute()
except:
_log_exception()
else:
from subprocess import CREATE_NEW_PROCESS_GROUP
DETACHED_PROCESS = 0x00000008
# Forward the standard input, output, and error stream to the
# 'devnull'.
stdin = open(os.devnull, "r")
stdout = open(os.devnull, "a")
stderr = open(os.devnull, "a")
env = os.environ.copy()
env['PGA_BGP_FOREGROUND'] = "1"
# We need environment variables & values in string
_log('[PARENT] Converting the environment variable in the bytes format...')
try:
env = convert_environment_variables(env)
except Exception as e:
_log_exception()
kwargs = {
'stdin': stdin.fileno(),
'stdout': stdout.fileno(),
'stderr': stderr.fileno(),
'creationflags': CREATE_NEW_PROCESS_GROUP | DETACHED_PROCESS,
'close_fds': False,
'cwd': _out_dir,
'env': env
}
cmd = [sys.executable]
cmd.extend(sys.argv)
_log('[PARENT] Command executings: {0}'.format(cmd))
p = Popen(cmd, **kwargs)
exitCode = p.poll()
if exitCode is not None:
_log(
'[PARENT] Child exited with exit-code#{0}...'.format(
exitCode
)
)
else:
_log('[PARENT] Started the child with PID#{0}'.format(p.pid))
# Question: Should we wait for sometime?
# Answer: Looks the case...
from time import sleep
sleep(2)
_log('[PARENT] Exiting...')
sys.exit(0)
else:
r, w = os.pipe()
# For POSIX:
# We will fork the process, and run the child process as daemon, and
# let it do the job.
if os.fork() == 0:
_log('[CHILD] Forked the child process...')
# Hmm... So - I need to do the job now...
try:
os.close(r)
_log('[CHILD] Make the child process leader...')
# Let me be the process leader first.
os.setsid()
os.umask(0)
_log('[CHILD] Make the child process leader...')
w = os.fdopen(w, 'w')
# Let me inform my parent - I will do the job, do not worry
# now, and die peacefully.
_log('[CHILD] Inform parent about successful child forking...')
w.write('1')
w.close()
_log('[CHILD] Start executing the background process...')
execute()
except Exception:
_log_exception()
sys.exit(1)
else:
os.close(w)
r = os.fdopen(r)
# I do not care, what the child send.
r.read()
_log('[PARENT] Got message from the child...')
r.close()
_log('[PARENT] Exiting...')
sys.exit(0)
```
#### File: team10_project/message/forms.py
```python
from django import forms
from .models import Messages
from onlinestore.models import Item
class ComposeForm(forms.ModelForm):
class Meta:
model = Messages
fields = ('item', 'receiver', 'msg',)
labels = {
'msg': ('Message'),
}
def __init__(self, *args, **kwargs):
super(ComposeForm, self).__init__(*args, **kwargs)
# related items for the messages can only be approved items
self.fields['item'].queryset = Item.objects.filter(approved=True)
```
#### File: team10_project/onlinestore/models.py
```python
from django.core.urlresolvers import reverse
from django.db import models
from django.contrib.auth import get_user_model
from sorl.thumbnail import ImageField
from django.utils.text import slugify # used to generate valid url based on a field of the model
from users.models import User
from onlinestore.formatChecker import ContentTypeRestrictedFileField
class Category(models.Model):
name = models.CharField(max_length=30, unique=True, blank=False)
slug = models.SlugField(unique=True, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
"""Create slug based on category's name and save it for url."""
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse("onlinestore:item_list", kwargs={"slug":self.slug})
class Meta:
# define category's plural form, otherwise Django will use "Categorys"
verbose_name_plural = "Categories"
class Item(models.Model):
title = models.CharField(max_length=100, blank=False)
price = models.DecimalField(decimal_places=2, max_digits=12, blank=False)
description = models.TextField(blank=True, default='')
category = models.ForeignKey(Category, related_name="items", blank=False, null=True)
create_time = models.DateTimeField(auto_now=True)
image = ContentTypeRestrictedFileField(upload_to='uploads/', content_types=['image/jpg', 'image/jpeg', 'image/pdf', 'image/pneg', 'image/png', 'image/tiff'],max_upload_size=3145728,blank=True, null=True)
seller = models.ForeignKey(User, null=True, blank=True)
approved = models.BooleanField(default=False, blank=False)
def __str__(self):
return self.title
class Meta:
ordering = ["title"]
``` |
{
"source": "jhl13/tensorflow-yolov3",
"score": 3
} |
#### File: tensorflow-yolov3/scripts/split_dataset.py
```python
import os
import glob
import numpy as np
from shutil import copyfile
def get_img_paths(dir_path):
img_paths = glob.glob(os.path.join(dir_path, '*.jpeg'))
img_paths.extend(glob.glob(os.path.join(dir_path, '*.png')))
img_paths.extend(glob.glob(os.path.join(dir_path, '*.jpg')))
return img_paths
def get_ann_paths(dir_path):
ann_paths = glob.glob(os.path.join(dir_path, '*.xml'))
return ann_paths
if __name__ == '__main__':
image_dir = "/home/luo13/workspace/datasets/detection/food/VOC2007/JPEGImages"
ann_dir = "/home/luo13/workspace/datasets/detection/food/VOC2007/Annotations"
train_dir = "/home/luo13/workspace/datasets/detection/food/train"
test_dir = "/home/luo13/workspace/datasets/detection/food/test"
train_VOCdevkit_dir = os.path.join(train_dir, "VOCdevkit")
test_VOCdevkit_dir = os.path.join(test_dir, "VOCdevkit")
if not os.path.exists(train_dir):
os.mkdir(train_dir)
if not os.path.exists(test_dir):
os.mkdir(test_dir)
if not os.path.exists(train_VOCdevkit_dir):
os.mkdir(train_VOCdevkit_dir)
if not os.path.exists(test_VOCdevkit_dir):
os.mkdir(test_VOCdevkit_dir)
train_image_dir = os.path.join(train_VOCdevkit_dir, "JPEGImages")
train_ann_dir = os.path.join(train_VOCdevkit_dir, "Annotations")
test_image_dir = os.path.join(test_VOCdevkit_dir, "JPEGImages")
test_ann_dir = os.path.join(test_VOCdevkit_dir, "Annotations")
if not os.path.exists(train_image_dir):
os.mkdir(train_image_dir)
if not os.path.exists(train_ann_dir):
os.mkdir(train_ann_dir)
if not os.path.exists(test_image_dir):
os.mkdir(test_image_dir)
if not os.path.exists(test_ann_dir):
os.mkdir(test_ann_dir)
img_paths = get_img_paths(image_dir)
ann_paths = get_ann_paths(ann_dir)
assert len(img_paths) == len(ann_paths)
print("Length of images are equal to annotations.")
np.random.shuffle(img_paths)
train_size = int(0.7 * len(img_paths))
test_size = len(img_paths) - train_size
print("train_size: ", train_size, "\n", "test_size: ", test_size)
for image_index, image_path in enumerate(img_paths):
if image_index < train_size:
src_image_dir = train_image_dir
scr_ann_dir = train_ann_dir
else:
src_image_dir = test_image_dir
scr_ann_dir = test_ann_dir
image_name = image_path.split('/')[-1][:-4]
xml_path = os.path.join(ann_dir, image_path.split('/')[-1][:-4] + '.xml')
target_image_path = os.path.join(src_image_dir, image_name + '.jpg')
target_ann_path = os.path.join(scr_ann_dir, image_name + '.xml')
copyfile(image_path, target_image_path)
copyfile(xml_path, target_ann_path)
```
#### File: tensorflow-yolov3/scripts/vis_voc_label.py
```python
import os
import cv2
import glob
import numpy as np
from PIL import Image
import xml.etree.ElementTree as ET
def get_img_paths(dir_path):
img_paths = glob.glob(os.path.join(dir_path, '*.jpeg'))
img_paths.extend(glob.glob(os.path.join(dir_path, '*.png')))
img_paths.extend(glob.glob(os.path.join(dir_path, '*.jpg')))
return img_paths
def get_ann_paths(dir_path):
ann_paths = glob.glob(os.path.join(dir_path, '*.xml'))
return ann_paths
if __name__ == "__main__":
image_dir = "/home/luo13/workspace/datasets/detection/food/VOC2007/JPEGImages"
ann_dir = "/home/luo13/workspace/datasets/detection/food/VOC2007/Annotations"
img_paths = get_img_paths(image_dir)
ann_paths = get_ann_paths(ann_dir)
assert len(img_paths) == len(ann_paths)
print("Length of images are equal to annotations.")
print("Visualizing.")
food_names = []
for image_index, image_path in enumerate(img_paths):
image = cv2.imread(image_path)
# cv2.imshow("image", image)
# cv2.waitKey(0)
# print ("%d/%d Drawing %s"%(image_index + 1, len(img_paths), image_path))
xml_path = os.path.join(ann_dir, image_path.split('/')[-1][:-4] + '.xml')
xml_tree = ET.parse(xml_path)
root = xml_tree.getroot()
all_object = []
for ann in root.iter("annotation"):
for ann_object in ann.iter("object"):
object_dict = {"name": "none", "location": []}
location = []
for name in ann_object.iter("name"):
object_dict["name"] = name.text
if name.text == "虫草":
print (image_path)
if name.text not in food_names:
food_names.append(name.text)
for bndbox in ann_object.iter("bndbox"):
for xmin in bndbox.iter("xmin"):
location.append(int(xmin.text))
for ymin in bndbox.iter("ymin"):
location.append(int(ymin.text))
for xmax in bndbox.iter("xmax"):
location.append(int(xmax.text))
for ymax in bndbox.iter("ymax"):
location.append(int(ymax.text))
object_dict["location"] = location
all_object.append(object_dict)
image = cv2.rectangle(image, (location[0], location[1]), (location[2], location[3]), (255, 0, 0), 2)
# cv2.imshow("image", image)
# cv2.waitKey(0)
print (food_names)
```
#### File: jhl13/tensorflow-yolov3/train.py
```python
import os
import time
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from tqdm import trange
from core.dataset import Dataset
from core.yolov3 import YOLOV3
from core.config import cfg
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.TRAIN.GPU
class YoloTrain(object):
def __init__(self):
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.max_bbox_per_scale = 150
self.train_logdir = "./data/log/train"
self.trainset = Dataset('train')
self.testset = Dataset('test')
self.gpus = utils.get_available_gpus(cfg.TRAIN.GPU_NUM)
self.steps_per_period = len(self.trainset)
self.steps_test = len(self.testset)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.batch_size_per_gpu = cfg.TRAIN.BATCH_SIZE // cfg.TRAIN.GPU_NUM
self.clone_scopes = ['clone_%d'%(idx) for idx in range(len(self.gpus))]
# warmup_steps作用:
# 神经网络在刚开始训练的过程中容易出现loss=NaN的情况,为了尽量避免这个情况,因此初始的学习率设置得很低
# 但是这又使得训练速度变慢了。因此,采用逐渐增大的学习率,从而达到既可以尽量避免出现nan,又可以等训练过程稳定了再增大训练速度的目的。
with tf.name_scope('learn_rate'):
self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period,
dtype=tf.float64, name='warmup_steps')
train_steps = tf.constant( (self.first_stage_epochs + self.second_stage_epochs)* self.steps_per_period,
dtype=tf.float64, name='train_steps')
# 判断语句,在tensorflow中为了方便写成了函数
self.learn_rate = tf.cond(
pred=self.global_step < warmup_steps,
true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) *
(1 + tf.cos(
(self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
)
self.global_step_update = tf.assign_add(self.global_step, 1.0)
with tf.name_scope('define_input'):
self.trainable = tf.placeholder(dtype=tf.bool, name='training')
# 只训练指定的层,不会一团糟吗?
with tf.name_scope("define_first_stage_train"):
self.first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate)#.minimize(loss,
#var_list=self.first_stage_trainable_var_list)
with tf.name_scope("define_second_stage_train"):
self.second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate)#.minimize(loss,
#var_list=second_stage_trainable_var_list)
with tf.device('/cpu:0'):
train_dataset = tf.data.Dataset.from_generator(lambda: self.trainset, \
output_types=(tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32))
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.prefetch(buffer_size=50)
train_dataset_iter = train_dataset.make_one_shot_iterator()
test_dataset = tf.data.Dataset.from_generator(lambda: self.testset, \
output_types=(tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32))
test_dataset = test_dataset.repeat()
test_dataset = test_dataset.prefetch(2)
test_dataset_iter = test_dataset.make_one_shot_iterator()
input_data, label_sbbox, label_mbbox, label_lbbox, \
true_sbboxes, true_mbboxes, true_lbboxes, batch_bboxes_gt = \
tf.cond(self.trainable, lambda: train_dataset_iter.get_next(), lambda: test_dataset_iter.get_next())
self.total_loss = 0; # for summary only
self.giou_loss = 0;
self.conf_loss = 0;
self.prob_loss = 0;
first_stage_gradients = []
second_stage_gradients = []
for clone_idx, gpu in enumerate(self.gpus):
reuse = clone_idx > 0
with tf.variable_scope(tf.get_variable_scope(), reuse = reuse):
with tf.name_scope(self.clone_scopes[clone_idx]) as clone_scope:
with tf.device(gpu) as clone_device:
model = YOLOV3(input_data[clone_idx*self.batch_size_per_gpu:(clone_idx+1)*self.batch_size_per_gpu, :, :, :], self.trainable)
# self.net_var = tf.global_variables()
label_sbbox_per_gpu = label_sbbox[clone_idx*self.batch_size_per_gpu:(clone_idx+1)*self.batch_size_per_gpu, :, :, :, :]
label_mbbox_per_gpu = label_mbbox[clone_idx*self.batch_size_per_gpu:(clone_idx+1)*self.batch_size_per_gpu, :, :, :, :]
label_lbbox_per_gpu = label_lbbox[clone_idx*self.batch_size_per_gpu:(clone_idx+1)*self.batch_size_per_gpu, :, :, :, :]
true_sbboxes_per_gpu = true_sbboxes[clone_idx*self.batch_size_per_gpu:(clone_idx+1)*self.batch_size_per_gpu, :, :]
true_mbboxes_per_gpu = true_mbboxes[clone_idx*self.batch_size_per_gpu:(clone_idx+1)*self.batch_size_per_gpu, :, :]
true_lbboxes_per_gpu = true_lbboxes[clone_idx*self.batch_size_per_gpu:(clone_idx+1)*self.batch_size_per_gpu, :, :]
giou_loss, conf_loss, prob_loss = model.compute_loss(
label_sbbox_per_gpu, label_mbbox_per_gpu, label_lbbox_per_gpu,
true_sbboxes_per_gpu, true_mbboxes_per_gpu, true_lbboxes_per_gpu)
loss = giou_loss + conf_loss + prob_loss
self.total_loss += loss
self.giou_loss += giou_loss
self.conf_loss += conf_loss
self.prob_loss += prob_loss
conv_lbbox_p = model.pred_conf_l
conv_mbbox_p = model.pred_conf_m
conv_sbbox_p = model.pred_conf_s
batch_image_gt = input_data[clone_idx*self.batch_size_per_gpu:(clone_idx+1)*self.batch_size_per_gpu, :, :, :]
batch_image_gt = tf.image.draw_bounding_boxes(batch_image_gt, batch_bboxes_gt[clone_idx*self.batch_size_per_gpu:(clone_idx+1)*self.batch_size_per_gpu, :, :])
tf.summary.image("batch_image_gt", batch_image_gt, 3)
tf.summary.image("conv_lbbox_p", tf.reshape(conv_lbbox_p[:, :, :, tf.cast(self.global_step % self.anchor_per_scale, dtype=tf.int32), :], \
(tf.shape(conv_lbbox_p)[0] , tf.shape(conv_lbbox_p)[1], tf.shape(conv_lbbox_p)[1], 1)), 3)
tf.summary.image("conv_mbbox_p", tf.reshape(conv_mbbox_p[:, :, :, tf.cast(self.global_step % self.anchor_per_scale, dtype=tf.int32), :], \
(tf.shape(conv_mbbox_p)[0] , tf.shape(conv_mbbox_p)[1], tf.shape(conv_mbbox_p)[1], 1)), 3)
tf.summary.image("conv_sbbox_p", tf.reshape(conv_sbbox_p[:, :, :, tf.cast(self.global_step % self.anchor_per_scale, dtype=tf.int32), :], \
(tf.shape(conv_sbbox_p)[0] , tf.shape(conv_sbbox_p)[1], tf.shape(conv_sbbox_p)[1], 1)), 3)
# compute clone gradients
self.first_stage_trainable_var_list = []
for var in tf.trainable_variables():
var_name = var.op.name
var_name_mess = str(var_name).split('/')
if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
self.first_stage_trainable_var_list.append(var)
first_clone_gradients = self.first_stage_optimizer.compute_gradients(loss, var_list=self.first_stage_trainable_var_list)
first_stage_gradients.append(first_clone_gradients)
second_stage_trainable_var_list = tf.trainable_variables()
second_clone_gradients = self.second_stage_optimizer.compute_gradients(loss, var_list=second_stage_trainable_var_list)
second_stage_gradients.append(second_clone_gradients)
averaged_first_stage_gradients = self.sum_gradients(first_stage_gradients)
first_stage_apply_grad_op = self.first_stage_optimizer.apply_gradients(averaged_first_stage_gradients)
# shadow_variable = decay * shadow_variable + (1 - decay) * variable
with tf.name_scope("define_weight_decay"):
self.moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())
# 会先执行定义的操作,再执行后续的操作
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([first_stage_apply_grad_op, self.global_step_update]):
with tf.control_dependencies([self.moving_ave]):
self.train_op_with_frozen_variables = tf.no_op()
averaged_second_stage_gradients = self.sum_gradients(second_stage_gradients)
second_stage_apply_grad_op = self.second_stage_optimizer.apply_gradients(averaged_second_stage_gradients)
# 会先执行定义的操作,再执行后续的操作
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([second_stage_apply_grad_op, self.global_step_update]):
with tf.control_dependencies([self.moving_ave]):
self.train_op_with_all_variables = tf.no_op()
with tf.name_scope('summary'):
tf.summary.scalar("learn_rate", self.learn_rate)
tf.summary.scalar("giou_loss", self.giou_loss)
tf.summary.scalar("conf_loss", self.conf_loss)
tf.summary.scalar("prob_loss", self.prob_loss)
tf.summary.scalar("total_loss", self.total_loss)
logdir = "./data/log/"
if os.path.exists(logdir): shutil.rmtree(logdir)
os.mkdir(logdir)
self.write_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)
with tf.name_scope('loader_and_saver'):
self.loader = tf.train.Saver(tf.global_variables())
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
def sum_gradients(self, clone_grads):
"""计算梯度
Arguments:
clone_grads -- 每个GPU所对应的梯度
Returns:
averaged_grads -- 平均梯度
"""
averaged_grads = []
for grad_and_vars in zip(*clone_grads):
grads = []
var = grad_and_vars[0][1]
try:
for g, v in grad_and_vars:
assert v == var
grads.append(g)
grad = tf.add_n(grads, name = v.op.name + '_summed_gradients')
except:
import pdb
pdb.set_trace()
averaged_grads.append((grad, v))
return averaged_grads
def train(self):
test_best_loss = 0;
self.sess.run(tf.global_variables_initializer())
try:
print('=> Restoring weights from: %s ... ' % self.initial_weight)
self.loader.restore(self.sess, self.initial_weight)
except:
print('=> %s does not exist !!!' % self.initial_weight)
print('=> Now it starts to train YOLOV3 from scratch ...')
self.first_stage_epochs = 0
for epoch in range(1, 1+self.first_stage_epochs+self.second_stage_epochs):
if epoch <= self.first_stage_epochs:
print("first")
train_op = self.train_op_with_frozen_variables
else:
print("second")
train_op = self.train_op_with_all_variables
pbar = trange(self.steps_per_period)
test = trange(self.steps_test)
train_epoch_loss, test_epoch_loss = [], []
for i in pbar:
_, summary, train_step_loss, global_step_val = self.sess.run(
[train_op, self.write_op, self.total_loss, self.global_step],feed_dict={self.trainable: True})
train_epoch_loss.append(train_step_loss)
pbar.set_description("train loss: %.2f" %train_step_loss)
if int(global_step_val) % 100 == 0:
self.summary_writer.add_summary(summary, global_step_val)
for j in test:
test_step_loss, test_step_giou_loss, test_step_conf_loss, test_step_prob_loss \
= self.sess.run([self.total_loss, self.giou_loss, self.conf_loss, self.prob_loss],feed_dict={self.trainable: False})
test_epoch_loss.append(test_step_loss)
test.set_description("test loss: %.2f, giou_loss: %.2f, conf_loss: %.2f, prob_loss: %.2f" %(test_step_loss, test_step_giou_loss, test_step_conf_loss, test_step_prob_loss))
train_epoch_loss, test_epoch_loss = np.mean(train_epoch_loss), np.mean(test_epoch_loss)
ckpt_file = "./checkpoint/yolov3_test_loss=%.4f.ckpt" % test_epoch_loss
log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
if epoch == 1:
test_best_loss = test_epoch_loss
if test_epoch_loss <= test_best_loss:
self.saver.save(self.sess, ckpt_file, global_step=epoch)
print("=> Epoch: %2d Time: %s Train loss: %.2f Test loss: %.2f Saving %s ..."
%(epoch, log_time, train_epoch_loss, test_epoch_loss, ckpt_file))
test_best_loss = test_epoch_loss
else:
print("=> Epoch: %2d Time: %s we don't save model this epoch ..."
%(epoch, log_time))
if __name__ == '__main__': YoloTrain().train()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.