code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
"""
Author: <NAME>
Selenium script that logs into banking website
and retrieves banking information
"""
#__Start_script__#
from selenium import webdriver
import time
#Load and open chrome with cookies/data
url = "https://secure07a.chase.com/"
#chase bank info
username = ''#Username
password = ''#password
class Account(object):
def __init__(self,url,username,password,):
self.url = url
self.username = username
self.password = password
self.balance = None
def results(self):
print("Account_Holder: {}".format(self.username))
print("Balance: {}".format(self.balance))
class Scraper():
def __init__(self):
self.driver = None
def driver_setup(self):
options = webdriver.ChromeOptions()
#Note: Change the next line of code if another user other than Author
#How to: If using chrome, copy and paste "chrome://version/" into browser
#and find the appdata path of your machine
options.add_argument("user-data-dir=C:/Users/Chris/AppData/Local/Google/Chrome/User Data'")
self.driver = webdriver.Chrome(chrome_options=options)
self.driver.get("https://secure07a.chase.com/web/auth/dashboard#/dashboard/accounts/summary/dda;params=dda,379901328")
def login(self):
self.driver.switch_to.frame("logonbox")
self.driver.find_element_by_name('userId').send_keys(username)
self.driver.find_element_by_name('password').send_keys(password)
self.driver.find_element_by_id('signin-button').click()
def fetch_balance(self):
#Fetch balance
account_balance = self.driver.find_element_by_class_name("balance").text
return(account_balance)
def main():
chase_account = Account(url,username,password)
chase_scraper = Scraper()
chase_scraper.driver_setup()
time.sleep(5)
chase_scraper.login()
time.sleep(5)
chase_account.balance = chase_scraper.fetch_balance()
chase_account.results()
#__End_script__#
if __name__ == "__main__":
main()
| [
"selenium.webdriver.Chrome",
"selenium.webdriver.ChromeOptions",
"time.sleep"
] | [((1921, 1934), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1931, 1934), False, 'import time\n'), ((1967, 1980), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1977, 1980), False, 'import time\n'), ((788, 813), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (811, 813), False, 'from selenium import webdriver\n'), ((1152, 1192), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'chrome_options': 'options'}), '(chrome_options=options)\n', (1168, 1192), False, 'from selenium import webdriver\n')] |
"""This module contains tests for pandas benchmarks.
"""
import time
import pytest
import numpy as np
import pandas as pd
from pywrangler.benchmark import allocate_memory
from pywrangler.pandas.base import PandasSingleNoFit
from pywrangler.pandas.benchmark import (
PandasMemoryProfiler,
PandasTimeProfiler
)
from pywrangler.util.testing.util import concretize_abstract_wrangler
pytestmark = pytest.mark.pandas
MIB = 2 ** 20
@pytest.fixture
def test_wrangler():
"""Helper fixture to generate PandasWrangler instances with parametrization
of transform output and sleep.
"""
def create_wrangler(size=None, result=None, sleep=0):
"""Return instance of PandasWrangler.
Parameters
----------
size: float
Memory size in MiB to allocate during transform step.
result: pd.DataFrame
Define extact return value of transform step.
sleep: float
Define sleep interval.
"""
class DummyWrangler(PandasSingleNoFit):
def transform(self, df):
if size is not None:
df_out = pd.DataFrame(allocate_memory(size))
else:
df_out = pd.DataFrame(result)
time.sleep(sleep)
return df_out
return concretize_abstract_wrangler(DummyWrangler)()
return create_wrangler
def test_pandas_memory_profiler_memory_usage_dfs():
df1 = pd.DataFrame(np.random.rand(10))
df2 = pd.DataFrame(np.random.rand(10))
test_input = [df1, df2]
test_output = int(df1.memory_usage(index=True, deep=True).sum() +
df2.memory_usage(index=True, deep=True).sum())
assert PandasMemoryProfiler._memory_usage_dfs(*test_input) == test_output
def test_pandas_memory_profiler_return_self(test_wrangler):
memory_profiler = PandasMemoryProfiler(test_wrangler())
assert memory_profiler is memory_profiler.profile(pd.DataFrame())
@pytest.mark.xfail(reason="Succeeds locally but sometimes fails remotely due "
"to non deterministic memory management.")
def test_pandas_memory_profiler_usage_median(test_wrangler):
wrangler = test_wrangler(size=30, sleep=0.01)
memory_profiler = PandasMemoryProfiler(wrangler)
assert memory_profiler.profile(pd.DataFrame()).median > 29 * MIB
def test_pandas_memory_profiler_usage_input_output(test_wrangler):
df_input = pd.DataFrame(np.random.rand(1000))
df_output = pd.DataFrame(np.random.rand(10000))
test_df_input = df_input.memory_usage(index=True, deep=True).sum()
test_df_output = df_output.memory_usage(index=True, deep=True).sum()
wrangler = test_wrangler(result=df_output)
memory_profiler = PandasMemoryProfiler(wrangler).profile(df_input)
assert memory_profiler.input == test_df_input
assert memory_profiler.output == test_df_output
@pytest.mark.xfail(reason="Succeeds locally but sometimes fails remotely due "
"to non deterministic memory management.")
def test_pandas_memory_profiler_ratio(test_wrangler):
usage_mib = 30
df_input = pd.DataFrame(np.random.rand(1000000))
usage_input = df_input.memory_usage(index=True, deep=True).sum()
test_output = ((usage_mib - 1) * MIB) / usage_input
wrangler = test_wrangler(size=usage_mib, sleep=0.01)
memory_profiler = PandasMemoryProfiler(wrangler)
assert memory_profiler.profile(df_input).ratio > test_output
def test_pandas_time_profiler_best(test_wrangler):
"""Basic test for pandas time profiler ensuring fastest timing is slower
than forced sleep.
"""
sleep = 0.0001
wrangler = test_wrangler(sleep=sleep)
time_profiler = PandasTimeProfiler(wrangler, 1).profile(pd.DataFrame())
assert time_profiler.best >= sleep
| [
"numpy.random.rand",
"pytest.mark.xfail",
"pywrangler.util.testing.util.concretize_abstract_wrangler",
"pywrangler.benchmark.allocate_memory",
"pywrangler.pandas.benchmark.PandasMemoryProfiler",
"time.sleep",
"pywrangler.pandas.benchmark.PandasTimeProfiler",
"pandas.DataFrame",
"pywrangler.pandas.benchmark.PandasMemoryProfiler._memory_usage_dfs"
] | [((1987, 2114), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Succeeds locally but sometimes fails remotely due to non deterministic memory management."""'}), "(reason=\n 'Succeeds locally but sometimes fails remotely due to non deterministic memory management.'\n )\n", (2004, 2114), False, 'import pytest\n'), ((2909, 3036), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Succeeds locally but sometimes fails remotely due to non deterministic memory management."""'}), "(reason=\n 'Succeeds locally but sometimes fails remotely due to non deterministic memory management.'\n )\n", (2926, 3036), False, 'import pytest\n'), ((2267, 2297), 'pywrangler.pandas.benchmark.PandasMemoryProfiler', 'PandasMemoryProfiler', (['wrangler'], {}), '(wrangler)\n', (2287, 2297), False, 'from pywrangler.pandas.benchmark import PandasMemoryProfiler, PandasTimeProfiler\n'), ((3388, 3418), 'pywrangler.pandas.benchmark.PandasMemoryProfiler', 'PandasMemoryProfiler', (['wrangler'], {}), '(wrangler)\n', (3408, 3418), False, 'from pywrangler.pandas.benchmark import PandasMemoryProfiler, PandasTimeProfiler\n'), ((1481, 1499), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (1495, 1499), True, 'import numpy as np\n'), ((1524, 1542), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (1538, 1542), True, 'import numpy as np\n'), ((1724, 1775), 'pywrangler.pandas.benchmark.PandasMemoryProfiler._memory_usage_dfs', 'PandasMemoryProfiler._memory_usage_dfs', (['*test_input'], {}), '(*test_input)\n', (1762, 1775), False, 'from pywrangler.pandas.benchmark import PandasMemoryProfiler, PandasTimeProfiler\n'), ((2465, 2485), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (2479, 2485), True, 'import numpy as np\n'), ((2516, 2537), 'numpy.random.rand', 'np.random.rand', (['(10000)'], {}), '(10000)\n', (2530, 2537), True, 'import numpy as np\n'), ((3157, 3180), 'numpy.random.rand', 'np.random.rand', (['(1000000)'], {}), '(1000000)\n', (3171, 3180), True, 'import numpy as np\n'), ((3769, 3783), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3781, 3783), True, 'import pandas as pd\n'), ((1330, 1373), 'pywrangler.util.testing.util.concretize_abstract_wrangler', 'concretize_abstract_wrangler', (['DummyWrangler'], {}), '(DummyWrangler)\n', (1358, 1373), False, 'from pywrangler.util.testing.util import concretize_abstract_wrangler\n'), ((1968, 1982), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1980, 1982), True, 'import pandas as pd\n'), ((2754, 2784), 'pywrangler.pandas.benchmark.PandasMemoryProfiler', 'PandasMemoryProfiler', (['wrangler'], {}), '(wrangler)\n', (2774, 2784), False, 'from pywrangler.pandas.benchmark import PandasMemoryProfiler, PandasTimeProfiler\n'), ((3729, 3760), 'pywrangler.pandas.benchmark.PandasTimeProfiler', 'PandasTimeProfiler', (['wrangler', '(1)'], {}), '(wrangler, 1)\n', (3747, 3760), False, 'from pywrangler.pandas.benchmark import PandasMemoryProfiler, PandasTimeProfiler\n'), ((1266, 1283), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (1276, 1283), False, 'import time\n'), ((2334, 2348), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2346, 2348), True, 'import pandas as pd\n'), ((1228, 1248), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (1240, 1248), True, 'import pandas as pd\n'), ((1154, 1175), 'pywrangler.benchmark.allocate_memory', 'allocate_memory', (['size'], {}), '(size)\n', (1169, 1175), False, 'from pywrangler.benchmark import allocate_memory\n')] |
from fenics import *
from fenics_adjoint import *
import numpy as np
from pyadjoint import Block
from pyadjoint.overloaded_function import overload_function
import scipy.interpolate
def read_rosco_curves():
filename = "Cp_Ct_Cq.DTU10MW.txt"
with open(filename, "r") as f:
datafile = f.readlines()
for idx in range(len(datafile)):
if "Pitch angle" in datafile[idx]:
pitch_array = np.loadtxt(filename, skiprows=idx + 1, max_rows=1)
if "TSR vector" in datafile[idx]:
tsr_array = np.loadtxt(filename, skiprows=idx + 1, max_rows=1)
if "Wind speed" in datafile[idx]:
wind_speed = np.loadtxt(filename, skiprows=idx + 1, max_rows=1)
if "Power coefficient" in datafile[idx]:
cp_array = np.loadtxt(filename, skiprows=idx + 2, max_rows=len(tsr_array))
if "Thrust coefficient" in datafile[idx]:
ct_array = np.loadtxt(filename, skiprows=idx + 2, max_rows=len(tsr_array))
if "Torque coefficent" in datafile[idx]:
cq_array = np.loadtxt(filename, skiprows=idx + 2, max_rows=len(tsr_array))
pitch_grid, tsr_grid = np.meshgrid(pitch_array, tsr_array)
return pitch_grid, tsr_grid, ct_array, cp_array
def lookup_field(pitch_grid, tsr_grid, ct_array, cp_array):
# construct function space
sw_corner = Point(np.min(pitch_grid), np.min(tsr_grid))
ne_corner = Point(np.max(pitch_grid), np.max(tsr_grid))
(n_tsr, n_pitch) = pitch_grid.shape
# set function in function space
m = RectangleMesh(sw_corner, ne_corner, n_pitch + 1, n_tsr + 1)
fe = FiniteElement("Lagrange", m.ufl_cell(), 1)
fs = FunctionSpace(m, fe)
# assign values to function
dof_coords = fs.tabulate_dof_coordinates()
ct = Function(fs)
ct_interp = scipy.interpolate.interp2d(pitch_grid[0, :], tsr_grid[:, 0], ct_array, kind='linear')
ct_values = ct.vector().get_local()
cp = Function(fs)
cp_interp = scipy.interpolate.interp2d(pitch_grid[0, :], tsr_grid[:, 0], cp_array, kind='linear')
cp_values = cp.vector().get_local()
# logger.warning("Limiting 0<=ct<=1 for axial induction calculations")
for idx in range(len(dof_coords)):
pitch, tsr = dof_coords[idx]
ct_values[idx] = np.min((np.max((ct_interp(pitch, tsr), 0.)), 1.))
cp_values[idx] = np.min((np.max((cp_interp(pitch, tsr), 0.)), 1.))
a = 0.5 - 0.5 * (np.sqrt(1 - ct_values[idx]))
# convert to local
ct_values[idx] = ct_values[idx] / (1 - a)
cp_values[idx] = cp_values[idx] / (1 - a) ** 2
ct.vector().set_local(ct_values)
cp.vector().set_local(cp_values)
# write ct and cp field to output file for visual inspection
# ct_file = File("ct.pvd")
# cp_file = File("cp.pvd")
# ct_file.write(ct)
# cp_file.write(cp)
return ct, cp
def get_coefficient(func, pitch, torque, rotor_speed, power_aero, disc_velocity):
dt = 1.
inertia = 10.
wn = (dt / inertia) * (power_aero / rotor_speed - torque) + rotor_speed
radius = 90.
tsr = (wn * radius) / disc_velocity
# tsr = float(torque * rotor_speed)
# tsr = torque*rotor_speed
return func(pitch, tsr)
def get_coefficient_derivative(func_grad, pitch, torque, rotor_speed, power_aero, disc_velocity, idx=0):
dt = 1.
inertia = 10.
radius = 90.
wn = (dt / inertia) * (power_aero / rotor_speed - torque) + rotor_speed
tsr = (wn * radius) / disc_velocity
# tsr = torque*rotor_speed
p1 = func_grad[int(np.min((idx, 1)))]
if idx == 0:
p2 = float(1.) # pitch
elif idx == 1: # torque derivative
# p2 = float(rotor_speed)
p2 = AdjFloat((radius / disc_velocity) * (-1 * dt / inertia))
elif idx == 2: # rotor speed
# p2 = float(torque)
p2 = AdjFloat((radius / disc_velocity) * (- (dt * power_aero) / (inertia * rotor_speed ** 2) + 1.))
elif idx == 3:
p2 = AdjFloat((radius / disc_velocity) * (dt / (inertia * rotor_speed)))
elif idx == 4:
p2 = AdjFloat(-1 * (wn * radius) / disc_velocity ** 2)
else:
raise ValueError("Derivative index out of bounds.")
return p1(pitch, tsr) * p2
backend_get_coefficient = get_coefficient
class CoefficientBlock(Block):
def __init__(self, func, pitch, torque, rotor_speed, power_aero, disc_velocity, **kwargs):
super(CoefficientBlock, self).__init__()
self.kwargs = kwargs
self.func = func
self.add_dependency(pitch)
self.add_dependency(torque)
self.add_dependency(rotor_speed)
self.add_dependency(power_aero)
self.add_dependency(disc_velocity)
# self.rotor_speed = rotor_speed
# self.power_aero = power_aero
# self.disc_velocity = disc_velocity
degree = func.function_space().ufl_element().degree()
family = func.function_space().ufl_element().family()
mesh = func.function_space().mesh()
if np.isin(family, ["CG", "Lagrange"]):
self.V = FunctionSpace(mesh, "DG", degree - 1)
else:
raise NotImplementedError(
"Not implemented for other elements than Lagrange")
self.func_grad = [project(func.dx(x), self.V) for x in range(2)]
def __str__(self):
return "CoefficientBlock"
def evaluate_adj_component(self, inputs, adj_inputs, block_variable, idx, prepared=None):
# output = get_derivative(inputs[0], inputs[1], idx) * adj_inputs[0]
# idx2 = int(np.min((idx, 1)))
# print(idx2)
# grad_idx = project(self.func.dx(idx2), self.V)
# output = grad_idx(inputs[0], inputs[1]) * adj_inputs[0]\
# * rotor_speed_derivative(func=self.func_grad,
# pitch=inputs[0],
# torque=inputs[1],
# rotor_speed=inputs[2],
# power_aero=self.power_aero, #inputs[3],
# disc_velocity=self.disc_velocity, #inputs[4],
# idx=idx) \
output = get_coefficient_derivative(func_grad=self.func_grad,
pitch=inputs[0],
torque=inputs[1],
rotor_speed=inputs[2],
power_aero=inputs[3],
disc_velocity=inputs[4],
idx=idx) * adj_inputs[0]
print(output)
print([float(ix) for ix in inputs])
return output
def recompute_component(self, inputs, block_variable, idx, prepared):
return backend_get_coefficient(func=self.func,
pitch=inputs[0],
torque=inputs[1],
rotor_speed=inputs[2],
power_aero=inputs[3],
disc_velocity=inputs[4])
# return backend_get_coefficient(self.func, inputs[0], inputs[1], inputs[2], self.power_aero, self.disc_velocity)
get_coefficient = overload_function(get_coefficient, CoefficientBlock)
pitch_grid, tsr_grid, ct_array, cp_array = read_rosco_curves()
ct, cp = lookup_field(pitch_grid, tsr_grid, ct_array, cp_array)
# float((1 / inertia) * (power_aero / rotor_speed - torque) + rotor_speed)
w = Constant(5.)
q = Constant(10.)
pa = Constant(10.)
# iner = Constant(5.)
b = Constant(0.)
ud = Constant(10.)
# tsr = w
# wn = (1 / iner) * (pa / q - q) + w
w.assign(0.8)
q.assign(1.)
pa.assign(5.)
b.assign(2.)
ctp = get_coefficient(ct, b, q, w, pa, ud)
J = ctp ** 2
controls = [b, q, w, pa, ud]
m = [Control(c) for c in controls]
Jh = ReducedFunctional(J, m)
Jh.derivative()
# print([float(g) for g in Jh.derivative()])
h = [Constant(0.01 * np.random.rand()) for c in controls]
taylor_test(Jh, controls, h)
| [
"pyadjoint.overloaded_function.overload_function",
"numpy.sqrt",
"numpy.random.rand",
"numpy.isin",
"numpy.max",
"numpy.min",
"numpy.meshgrid",
"numpy.loadtxt"
] | [((7342, 7394), 'pyadjoint.overloaded_function.overload_function', 'overload_function', (['get_coefficient', 'CoefficientBlock'], {}), '(get_coefficient, CoefficientBlock)\n', (7359, 7394), False, 'from pyadjoint.overloaded_function import overload_function\n'), ((1145, 1180), 'numpy.meshgrid', 'np.meshgrid', (['pitch_array', 'tsr_array'], {}), '(pitch_array, tsr_array)\n', (1156, 1180), True, 'import numpy as np\n'), ((1348, 1366), 'numpy.min', 'np.min', (['pitch_grid'], {}), '(pitch_grid)\n', (1354, 1366), True, 'import numpy as np\n'), ((1368, 1384), 'numpy.min', 'np.min', (['tsr_grid'], {}), '(tsr_grid)\n', (1374, 1384), True, 'import numpy as np\n'), ((1408, 1426), 'numpy.max', 'np.max', (['pitch_grid'], {}), '(pitch_grid)\n', (1414, 1426), True, 'import numpy as np\n'), ((1428, 1444), 'numpy.max', 'np.max', (['tsr_grid'], {}), '(tsr_grid)\n', (1434, 1444), True, 'import numpy as np\n'), ((4951, 4986), 'numpy.isin', 'np.isin', (['family', "['CG', 'Lagrange']"], {}), "(family, ['CG', 'Lagrange'])\n", (4958, 4986), True, 'import numpy as np\n'), ((421, 471), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'skiprows': '(idx + 1)', 'max_rows': '(1)'}), '(filename, skiprows=idx + 1, max_rows=1)\n', (431, 471), True, 'import numpy as np\n'), ((538, 588), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'skiprows': '(idx + 1)', 'max_rows': '(1)'}), '(filename, skiprows=idx + 1, max_rows=1)\n', (548, 588), True, 'import numpy as np\n'), ((656, 706), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'skiprows': '(idx + 1)', 'max_rows': '(1)'}), '(filename, skiprows=idx + 1, max_rows=1)\n', (666, 706), True, 'import numpy as np\n'), ((3509, 3525), 'numpy.min', 'np.min', (['(idx, 1)'], {}), '((idx, 1))\n', (3515, 3525), True, 'import numpy as np\n'), ((8047, 8063), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8061, 8063), True, 'import numpy as np\n'), ((2409, 2436), 'numpy.sqrt', 'np.sqrt', (['(1 - ct_values[idx])'], {}), '(1 - ct_values[idx])\n', (2416, 2436), True, 'import numpy as np\n')] |
from pyspark import SparkContext
def func(x,y):
result = x-y
return result
if __name__ == '__main__':
#创建spark context
sc = SparkContext('local[2]','topncount')
rdd1 = sc.textFile('./access.log')
# 按照空格切分 取出列表长度大于10的
rdd0 = rdd1.map(lambda x: x.split()).filter(lambda x: len(x) > 10)
rdd2 = rdd0.map(lambda x:(x[10],1))
rdd3 = rdd2.reduceByKey(lambda a,b:a+b) # (x:2)
rdd4 = rdd3.sortBy(lambda x:x[1],ascending=False).filter(lambda x:len(x[0])>6)
result = rdd4.take(1)
print(rdd0.collect())
print(rdd2.collect())
print(rdd3.collect())
print(rdd4.collect())
print(result)
# rdd1 = sc.parallelize([1,2,3,4,5,6])
# result = rdd1.reduce(func)
# print(result) | [
"pyspark.SparkContext"
] | [((140, 177), 'pyspark.SparkContext', 'SparkContext', (['"""local[2]"""', '"""topncount"""'], {}), "('local[2]', 'topncount')\n", (152, 177), False, 'from pyspark import SparkContext\n')] |
# Bokeh library
from bokeh.plotting import show
from bokeh.io import output_file
from bokeh.layouts import column
# Output to file
output_file(
"east-west-top-2-standings-race.html",
title="Conference Top 2 Teams Wins Race",
)
# Plot the two visualizations in a vertical configuration
show(column(west_fig, east_fig)) # noqa
| [
"bokeh.io.output_file",
"bokeh.layouts.column"
] | [((132, 229), 'bokeh.io.output_file', 'output_file', (['"""east-west-top-2-standings-race.html"""'], {'title': '"""Conference Top 2 Teams Wins Race"""'}), "('east-west-top-2-standings-race.html', title=\n 'Conference Top 2 Teams Wins Race')\n", (143, 229), False, 'from bokeh.io import output_file\n'), ((300, 326), 'bokeh.layouts.column', 'column', (['west_fig', 'east_fig'], {}), '(west_fig, east_fig)\n', (306, 326), False, 'from bokeh.layouts import column\n')] |
import os
from flask import Flask, jsonify
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
app = Flask(__name__)
if 'APP_CONFIG_FILE' in os.environ:
app.config.from_envvar('APP_CONFIG_FILE')
else:
app.config.from_pyfile('config/production.py')
db = SQLAlchemy(app)
@app.before_first_request
def create_tables():
db.create_all()
jwt = JWTManager(app)
import views, models
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return models.RevokedTokenModel.is_jti_blacklisted(jti)
api = Api(app)
api.add_resource(views.ImageProcessor, '/send/image')
api.add_resource(views.UserRegistration, '/registration')
api.add_resource(views.UserLogin, '/login')
api.add_resource(views.UserLogoutAccess, '/logout/access')
api.add_resource(views.UserLogoutRefresh, '/logout/refresh')
api.add_resource(views.TokenRefresh, '/token/refresh')
api.add_resource(views.AllUsers, '/users') | [
"flask_jwt_extended.JWTManager",
"flask_restful.Api",
"flask.Flask",
"flask_sqlalchemy.SQLAlchemy",
"models.RevokedTokenModel.is_jti_blacklisted"
] | [((164, 179), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (169, 179), False, 'from flask import Flask, jsonify\n'), ((320, 335), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (330, 335), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((410, 425), 'flask_jwt_extended.JWTManager', 'JWTManager', (['app'], {}), '(app)\n', (420, 425), False, 'from flask_jwt_extended import JWTManager\n'), ((630, 638), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (633, 638), False, 'from flask_restful import Api\n'), ((574, 622), 'models.RevokedTokenModel.is_jti_blacklisted', 'models.RevokedTokenModel.is_jti_blacklisted', (['jti'], {}), '(jti)\n', (617, 622), False, 'import views, models\n')] |
# Copyright 2015-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import logging
import re
LOG = logging.getLogger(__name__)
def copy_tree(src, dest, ignore=None, include_parent=False):
ignore = ignore or []
if os.path.isfile(src):
raise Exception('Cannot use copy_tree with a file as the src')
LOG.info('Copying source files')
if include_parent:
# if src is foo, make dest/foo and copy files there
nested_dest = os.path.normpath(
os.path.join(dest, os.path.basename(src)))
if not os.path.isdir(nested_dest):
os.makedirs(nested_dest)
else:
nested_dest = dest
# Re-create directory structure
for root, _, files in os.walk(src):
for filename in files:
path = os.path.join(root, filename)
path_relative_to_the_source_dir = os.path.relpath(path, src)
if _ignore_file(path_relative_to_the_source_dir, ignore):
continue
sub_dirs = os.path.dirname(os.path.relpath(path,
start=src))
pkg_path = os.path.join(nested_dest, sub_dirs)
if not os.path.isdir(pkg_path):
os.makedirs(pkg_path)
LOG.debug("Copying %s to %s" % (path, pkg_path))
if os.path.islink(path):
linkto = os.readlink(path)
os.symlink(linkto.replace(src, dest, 1),
os.path.join(pkg_path, filename))
else:
shutil.copy(path, pkg_path)
# Iterate through every item in ignore
# and check for matches in the path
def _ignore_file(path, ignore=None):
ignore = ignore or []
if not ignore:
return False
for ign in ignore:
if re.search(ign, path):
return True
return False
| [
"logging.getLogger",
"os.readlink",
"os.makedirs",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"os.path.basename",
"shutil.copy",
"os.path.relpath",
"os.path.islink",
"os.walk",
"re.search"
] | [((644, 671), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (661, 671), False, 'import logging\n'), ((768, 787), 'os.path.isfile', 'os.path.isfile', (['src'], {}), '(src)\n', (782, 787), False, 'import os\n'), ((1260, 1272), 'os.walk', 'os.walk', (['src'], {}), '(src)\n', (1267, 1272), False, 'import os\n'), ((2327, 2347), 're.search', 're.search', (['ign', 'path'], {}), '(ign, path)\n', (2336, 2347), False, 'import re\n'), ((1095, 1121), 'os.path.isdir', 'os.path.isdir', (['nested_dest'], {}), '(nested_dest)\n', (1108, 1121), False, 'import os\n'), ((1135, 1159), 'os.makedirs', 'os.makedirs', (['nested_dest'], {}), '(nested_dest)\n', (1146, 1159), False, 'import os\n'), ((1324, 1352), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (1336, 1352), False, 'import os\n'), ((1399, 1425), 'os.path.relpath', 'os.path.relpath', (['path', 'src'], {}), '(path, src)\n', (1414, 1425), False, 'import os\n'), ((1673, 1708), 'os.path.join', 'os.path.join', (['nested_dest', 'sub_dirs'], {}), '(nested_dest, sub_dirs)\n', (1685, 1708), False, 'import os\n'), ((1868, 1888), 'os.path.islink', 'os.path.islink', (['path'], {}), '(path)\n', (1882, 1888), False, 'import os\n'), ((1056, 1077), 'os.path.basename', 'os.path.basename', (['src'], {}), '(src)\n', (1072, 1077), False, 'import os\n'), ((1561, 1593), 'os.path.relpath', 'os.path.relpath', (['path'], {'start': 'src'}), '(path, start=src)\n', (1576, 1593), False, 'import os\n'), ((1728, 1751), 'os.path.isdir', 'os.path.isdir', (['pkg_path'], {}), '(pkg_path)\n', (1741, 1751), False, 'import os\n'), ((1769, 1790), 'os.makedirs', 'os.makedirs', (['pkg_path'], {}), '(pkg_path)\n', (1780, 1790), False, 'import os\n'), ((1915, 1932), 'os.readlink', 'os.readlink', (['path'], {}), '(path)\n', (1926, 1932), False, 'import os\n'), ((2085, 2112), 'shutil.copy', 'shutil.copy', (['path', 'pkg_path'], {}), '(path, pkg_path)\n', (2096, 2112), False, 'import shutil\n'), ((2017, 2049), 'os.path.join', 'os.path.join', (['pkg_path', 'filename'], {}), '(pkg_path, filename)\n', (2029, 2049), False, 'import os\n')] |
# This program outputs todays date in the format Monday, January 10th 2019 at 1:15pm
# <NAME> 2019-03-12
# Revisited to add the suffix after the date in the final output
import datetime as dt # I have imported the datetime module and given it the shortened name dt
today= dt. datetime.today() # today() is used to return the local date and time and it is set equal to the variable today
# I need to have a variable to assign a suffix to the date in order to have my output in the required format
daynumber= int(today.strftime("%d")) # I introduce the variable daynumber here and set equal to the integer equivalent of the day of the month
# From my additional reading on strftime formatting I learnt %d returns an integer equivalent of the day of the month
# I had to add int to have the integer value %d be recognized and work properly in the below if, elif, else statements
# First I had just - daynumber= today.strftime(%d) and this didnt work correctly
if daynumber in (1, 21, 31): # I first had == and this didnt work so I have used in keyword instead
daynumber = 'st' # if statement used to assign the value 'st' to the 1st, 21st and 31st day of the month
elif daynumber in (2, 22):
daynumber = 'nd' # elif used to assign the value 'nd' to the 2nd and 22nd day of the month
elif daynumber in (3, 23):
daynumber = 'rd' # Another elif used to assign the value of 'rd' to the 3rd and 23rd day of the month
else:
daynumber = 'th' # else statement used to assign the value 'th' to all other days of the month
print(f"The current date and time is: {today.strftime('%A, %B %d')}{daynumber} {today.strftime('%Y at %H:%M%p')}")
# I have used f strings to print the full sentence to the screen as output
# I have asked for today to be printed using strftime to return the required results
# I read about format tokens to format the date output in strftime here: https://stackabuse.com/how-to-format-dates-in-python/ and http://strftime.org/
# I used these format tokens to print the current date and time to the screen in the format requested in the problem
# %A - Weekday full, %B - month name in full, %d - day of the week, %Y- year in full, %H - hour, %M - minute and %p - AM/PM
# I introduced a variable called daynumber which was used to assign a value for the suffix to the date and printed in output
# I returned to the solution and added if, elif, else to assign value to the variable daynumber and also to add fstring for printing output | [
"datetime.datetime.today"
] | [((275, 294), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (292, 294), True, 'import datetime as dt\n')] |
from Modules_Ex import Ex112
from Modules import my_inputs
from termcolor2 import colored
preco = my_inputs.inputfloat('Escreve o preço de um produto, em euros: ', colored('ERRO: Escreva só o preço!\n', 'red'))
aumento = my_inputs.inputfloat('Escreve o aumento do preço do produto, em percentagem: ', colored('ERRO: Escreva só o número!\n', 'red'))
desconto = my_inputs.inputfloat('Escreve o aumento do preço do produto, em percentagem: ', colored('ERRO: Escreva só o número!\n', 'red'))
print('')
print(Ex112.price_manipulation(preco, aumento, desconto))
| [
"termcolor2.colored",
"Modules_Ex.Ex112.price_manipulation"
] | [((166, 211), 'termcolor2.colored', 'colored', (['"""ERRO: Escreva só o preço!\n"""', '"""red"""'], {}), "('ERRO: Escreva só o preço!\\n', 'red')\n", (173, 211), False, 'from termcolor2 import colored\n'), ((303, 349), 'termcolor2.colored', 'colored', (['"""ERRO: Escreva só o número!\n"""', '"""red"""'], {}), "('ERRO: Escreva só o número!\\n', 'red')\n", (310, 349), False, 'from termcolor2 import colored\n'), ((442, 488), 'termcolor2.colored', 'colored', (['"""ERRO: Escreva só o número!\n"""', '"""red"""'], {}), "('ERRO: Escreva só o número!\\n', 'red')\n", (449, 488), False, 'from termcolor2 import colored\n'), ((505, 555), 'Modules_Ex.Ex112.price_manipulation', 'Ex112.price_manipulation', (['preco', 'aumento', 'desconto'], {}), '(preco, aumento, desconto)\n', (529, 555), False, 'from Modules_Ex import Ex112\n')] |
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
"""manage the bullet fired by ship"""
def __init__(self, ai_settings, screen, ship):
"""create a bullet object at the position of ship"""
super().__init__()
self.screen = screen
# create a bullet rectangle at (0, 0), then set the right location
self.rect = pygame.Rect(0, 0, ai_settings.bullet_width,
ai_settings.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
# store bullet position in float
self.y = float(self.rect.y)
self.color = ai_settings.bullet_color
self.speed_factor = ai_settings.bullet_speed_factor
def update(self):
"""move up"""
# update the bullet y coordinate
self.y -= self.speed_factor
# update the bullet rect
self.rect.y = self.y
def draw_bullet(self):
"""draw bullet on screen"""
pygame.draw.rect(self.screen, self.color, self.rect)
| [
"pygame.draw.rect",
"pygame.Rect"
] | [((391, 461), 'pygame.Rect', 'pygame.Rect', (['(0)', '(0)', 'ai_settings.bullet_width', 'ai_settings.bullet_height'], {}), '(0, 0, ai_settings.bullet_width, ai_settings.bullet_height)\n', (402, 461), False, 'import pygame\n'), ((1039, 1091), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', 'self.color', 'self.rect'], {}), '(self.screen, self.color, self.rect)\n', (1055, 1091), False, 'import pygame\n')] |
import re
import numpy as np
from itertools import combinations
class Treell:
def __init__(self, tnt_file):
self.list = []
self.lengths = {}
self.node_count = 0
self.labels = {}
self.taxa = {}
self.adj_table = None
with open(tnt_file , "r") as fh:
for line in fh:
line = line.strip()
if line.startswith("("):
line = re.sub(r"\s+\)", ")", line)
node_pointer = -1
label = ""
in_br_len = False
br_len = ""
for char in line:
if in_br_len:
if char == " " or char == ")":
self.lengths[node_pointer] = float(br_len)
br_len = ""
in_br_len = False
elif char == "[" or char == "]":
continue
else:
br_len += char
if not in_br_len:
if char == '(':
pa = node_pointer
node_pointer = self.node_count
self.node_count += 1
self.list.append([pa, node_pointer])
#print(f"(: {papa_pointer} to {node_pointer}")
elif char == ')':
if len(label) > 0:
self.labels[node_pointer] = label
label = ""
node_pointer = self.get_parent(node_pointer)
#print(f"): back to {node_pointer}")
elif char == " ":
if len(label) > 0:
self.labels[node_pointer] = label
label = ""
pa = self.get_parent(node_pointer)
node_pointer = self.node_count
self.node_count += 1
self.list.append([pa, node_pointer])
#print(f"Space: {pa} to {node_pointer}")
elif char == "=":
if len(label) > 0:
self.labels[node_pointer] = label
label = ""
in_br_len = True
elif char == ';':
break
else:
label += char
for node in self.labels:
self.taxa[node] = self.labels[node].split('#')[0]
def get_parent(self, node):
for no, des in self.list:
if des == node:
return no
def unroot(self):
root_edges_idx = []
root_descendants = []
for idx, edge in enumerate(self.list):
if edge[0] == -1:
root_descendants.append(edge[1])
root_edges_idx.append(idx)
for i,d in combinations(root_descendants, 2):
self.list.append([i, d])
self.list = [x for i,x in enumerate(self.list) if not i in root_edges_idx]
self.adj_table = np.zeros((self.node_count, self.node_count))
for i,d in self.list:
self.adj_table[i,d] = 1
self.adj_table[d,i] = 1
return None
def orthology_test(self, target_node, excluded_node):
pass_test = True
r = self.adj_table[target_node]
icr = np.where(r == 1)[0]
icr = icr[icr != excluded_node]
print(icr)
names = []
name_origin = {}
for child in icr:
if child in self.taxa:
names.append(self.taxa[child])
name_origin[self.taxa[child]] = 1
else:
thnames = self.orthology_test(child, target_node)
if len(thnames) == 0:
pass_test = False
name_origin = {}
break
else:
names += thnames
for tn in thnames:
if tn in name_origin:
name_origin[tn] += 1
else:
name_origin[tn] = 1
print(names)
print(name_origin)
if len(name_origin) == 1:
pass_test = True
elif len(name_origin) > 1:
for tn in name_origin:
if name_origin[tn] > 1:
pass_test = False
break
else:
pass_test = False
if pass_test:
names = list(set(names))
return names
else:
return []
"""
for descendants that are leaves:
get names
for descendants that are not leaves:
call function on descendants, target_node as excluded_node
get their name lists
if there is at least two taxon names in descendants AND one of them is repeated in different descendant lists:
fail, return an empty list
else:
pass, return list of unique names
"""
def ortholog_finder(self):
pass
"""
for all internal edges:
for each node in the edge:
if node pass orthology property:
append node to ortholog list
"""
# Ortholog identification iin the tree conducted using the maximum inclusion,
# but not inclusion and multiples leaves for the same taxa only if they are
# monophyletic.
# Evaluation different techniques may be done by measuring phylogenetic noise
# in the final tree
if __name__ == "__main__":
tntfile = "toy.tree"
al = Treell(tntfile)
al.unroot()
for pair in al.list:
print(pair)
for la in al.labels:
print(la, al.labels[la])
for a in al.taxa:
print(a, al.taxa[a])
for le in al.lengths:
print(le, al.lengths[le])
print(al.adj_table)
print(al.orthology_test(1, 3))
| [
"itertools.combinations",
"re.sub",
"numpy.zeros",
"numpy.where"
] | [((2149, 2182), 'itertools.combinations', 'combinations', (['root_descendants', '(2)'], {}), '(root_descendants, 2)\n', (2161, 2182), False, 'from itertools import combinations\n'), ((2309, 2353), 'numpy.zeros', 'np.zeros', (['(self.node_count, self.node_count)'], {}), '((self.node_count, self.node_count))\n', (2317, 2353), True, 'import numpy as np\n'), ((2566, 2582), 'numpy.where', 'np.where', (['(r == 1)'], {}), '(r == 1)\n', (2574, 2582), True, 'import numpy as np\n'), ((351, 379), 're.sub', 're.sub', (['"""\\\\s+\\\\)"""', '""")"""', 'line'], {}), "('\\\\s+\\\\)', ')', line)\n", (357, 379), False, 'import re\n')] |
import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.pardir,os.pardir)))
import disaggregator as da
import disaggregator.PecanStreetDatasetAdapter as psda
import pickle
db_url = "postgresql://USERNAME:<EMAIL>@db.<EMAIL>:5432/postgres"
psda.set_url(db_url)
schema = 'shared'
table = 'validated_01_2014'
dataid = '3893'
sample_rate = '15T'
appliance_set = psda.generate_set_by_table_and_dataid(
schema, table, dataid, sample_rate)
appliance_set = appliance_set.generate_non_zero_set()
with open(os.path.join(os.pardir,os.pardir,'data','home_3893_set_01_2014.pkl'),'w') as f:
pickle.dump(appliance_set,f)
| [
"pickle.dump",
"disaggregator.PecanStreetDatasetAdapter.set_url",
"disaggregator.PecanStreetDatasetAdapter.generate_set_by_table_and_dataid"
] | [((257, 277), 'disaggregator.PecanStreetDatasetAdapter.set_url', 'psda.set_url', (['db_url'], {}), '(db_url)\n', (269, 277), True, 'import disaggregator.PecanStreetDatasetAdapter as psda\n'), ((377, 450), 'disaggregator.PecanStreetDatasetAdapter.generate_set_by_table_and_dataid', 'psda.generate_set_by_table_and_dataid', (['schema', 'table', 'dataid', 'sample_rate'], {}), '(schema, table, dataid, sample_rate)\n', (414, 450), True, 'import disaggregator.PecanStreetDatasetAdapter as psda\n'), ((601, 630), 'pickle.dump', 'pickle.dump', (['appliance_set', 'f'], {}), '(appliance_set, f)\n', (612, 630), False, 'import pickle\n')] |
# coding=utf-8
from os import path
import saml2
import saml2.saml
BASEDIR = path.dirname(path.abspath(__file__))
CONFIG = {
# full path to the xmlsec1 binary programm
'xmlsec_binary': '/usr/bin/xmlsec1',
# your entity id, usually your subdomain plus the url to the metadata view
'entityid': 'https://portulanclarin.net/saml2/metadata/',
# directory with attribute mapping
'attribute_map_dir': path.join(BASEDIR, 'attributemaps'),
# this block states what services we provide
'service': {
'sp': {
'name': 'PORTULAN / CLARIN',
'endpoints': {
# url and binding to the assertion consumer service view
# do not change the binding or service name
'assertion_consumer_service': [
('https://portulanclarin.net/saml2/acs/',
saml2.BINDING_HTTP_POST),
],
# url and binding to the single logout service view
# do not change the binding or service name
'single_logout_service': [
('https://portulanclarin.net/saml2/ls/',
saml2.BINDING_HTTP_REDIRECT),
('https://portulanclarin.net/saml2/ls/post',
saml2.BINDING_HTTP_POST),
],
},
'idp': {
# This is the address of a SimpleSAMLphp proxy, which acts as the sole IdP
# known to this repository and as SP to the CLARIN federation
'https://sso.portulanclarin.net/saml2/idp/metadata.php': {
'single_sign_on_service': {
saml2.BINDING_HTTP_REDIRECT: 'https://sso.portulanclarin.net/saml2/idp/SSOService.php',
},
'single_logout_service': {
saml2.BINDING_HTTP_REDIRECT: 'https://sso.portulanclarin.net/saml2/idp/SingleLogoutService.php',
},
},
},
# attributes that this project need to identify a user
'required_attributes': ['uid', 'displayName', 'cn', 'sn'],
# attributes that may be useful to have but not required
# 'optional_attributes': ['eduPersonAffiliation'],
},
},
# where the remote metadata is stored
'metadata': {
'local': [
# this file is not included in the repository
# you should create your own SAML metadata file ;-)
path.join(BASEDIR, 'saml-portulan-idp.xml'),
],
},
# set to 1 to output debugging information
'debug': 1,
# Signing
# these files are not included in the repository; you should create your own ;-)
'key_file': path.join(BASEDIR, 'saml-sp-priv.pem'),
'cert_file': path.join(BASEDIR, 'saml-sp-pub.pem'),
'contact_person': [
{
'given_name': u'Luís',
'sur_name': 'Gomes',
'company': 'University of Lisbon',
'email_address': '<EMAIL>',
'contact_type': 'technical'
},
{
'given_name': u'António',
'sur_name': 'Branco',
'company': 'University of Lisbon',
'email_address': '<EMAIL>',
'contact_type': 'administrative'
},
],
'organization': {
'name': [('PORTULAN / CLARIN -- Infrastructure for Science and Technology of the Portuguese Language', 'en')],
'display_name': [('PORTULAN / CLARIN', 'en')],
'url': [('http://portulanclarin.net/', 'en')],
},
'valid_for': 24, # how long is our metadata valid
}
| [
"os.path.abspath",
"os.path.join"
] | [((89, 111), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'from os import path\n'), ((419, 454), 'os.path.join', 'path.join', (['BASEDIR', '"""attributemaps"""'], {}), "(BASEDIR, 'attributemaps')\n", (428, 454), False, 'from os import path\n'), ((2769, 2807), 'os.path.join', 'path.join', (['BASEDIR', '"""saml-sp-priv.pem"""'], {}), "(BASEDIR, 'saml-sp-priv.pem')\n", (2778, 2807), False, 'from os import path\n'), ((2826, 2863), 'os.path.join', 'path.join', (['BASEDIR', '"""saml-sp-pub.pem"""'], {}), "(BASEDIR, 'saml-sp-pub.pem')\n", (2835, 2863), False, 'from os import path\n'), ((2526, 2569), 'os.path.join', 'path.join', (['BASEDIR', '"""saml-portulan-idp.xml"""'], {}), "(BASEDIR, 'saml-portulan-idp.xml')\n", (2535, 2569), False, 'from os import path\n')] |
from fastapi import APIRouter, HTTPException
import pandas as pd
import sqlite3
# import plotly.express as px
router = APIRouter()
@router.get('/viz/') # check the documentation, assistance with router
async def viz():
def salt_rank():
"""
Querying the database for commenter, text, and sentiment for the text.
Parameters:
-----------
mode: string. query mode. returning the sentiment based on negative or positive
Output:
-----------
results: json format string with format
{"sentiment": str,
"author_comment_count": str,
"comment_text": string,
}
"""
conn = sqlite3.connect('hn_db.db')
# if conn.closed != 0:
# return app.response_class(response=json.dump({}),
# status=400,
# mimetype='application/json')
curs = conn.cursor()
# sql_mode = dict({"average": "AVG", "total": "SUM"})
query = f'''
SELECT comment_author, comment_text, sentiment
FROM hn_comments
LIMIT 100;
'''
curs.execute(query)
data = curs.fetchall()
# return data
| [
"fastapi.APIRouter",
"sqlite3.connect"
] | [((120, 131), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (129, 131), False, 'from fastapi import APIRouter, HTTPException\n'), ((718, 745), 'sqlite3.connect', 'sqlite3.connect', (['"""hn_db.db"""'], {}), "('hn_db.db')\n", (733, 745), False, 'import sqlite3\n')] |
import json
import uuid
from django.http import HttpRequest
from django.test import TestCase
from mock import patch
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.signals import case_post_save
from casexml.apps.case.tests.util import delete_all_cases
from casexml.apps.case.util import post_case_blocks
from corehq import toggles
from corehq.apps.domain.models import Domain
from corehq.apps.userreports import tasks
from corehq.apps.userreports.dbaccessors import delete_all_report_configs
from corehq.apps.userreports.models import (
DataSourceConfiguration,
ReportConfiguration,
)
from corehq.apps.userreports.reports.view import ConfigurableReportView
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.apps.users.models import Permissions, UserRole, WebUser
from corehq.sql_db.connections import Session
from corehq.util.context_managers import drop_connected_signals
class ConfigurableReportTestMixin(object):
domain = "TEST_DOMAIN"
case_type = "CASE_TYPE"
@classmethod
def _new_case(cls, properties):
id = uuid.uuid4().hex
case_block = CaseBlock.deprecated_init(
create=True,
case_id=id,
case_type=cls.case_type,
update=properties,
).as_xml()
with drop_connected_signals(case_post_save):
post_case_blocks([case_block], {'domain': cls.domain})
return CommCareCase.get(id)
@classmethod
def _delete_everything(cls):
delete_all_cases()
for config in DataSourceConfiguration.all():
config.delete()
delete_all_report_configs()
class ConfigurableReportViewTest(ConfigurableReportTestMixin, TestCase):
def _build_report_and_view(self, request=HttpRequest()):
# Create report
data_source_config = DataSourceConfiguration(
domain=self.domain,
display_name='foo',
referenced_doc_type='CommCareCase',
table_id="woop_woop",
configured_filter={
"type": "boolean_expression",
"operator": "eq",
"expression": {
"type": "property_name",
"property_name": "type"
},
"property_value": self.case_type,
},
configured_indicators=[
{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": 'fruit'
},
"column_id": 'indicator_col_id_fruit',
"display_name": 'indicator_display_name_fruit',
"datatype": "string"
},
{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": 'num1'
},
"column_id": 'indicator_col_id_num1',
"datatype": "integer"
},
{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": 'num2'
},
"column_id": 'indicator_col_id_num2',
"datatype": "integer"
},
],
)
data_source_config.validate()
data_source_config.save()
self.addCleanup(data_source_config.delete)
tasks.rebuild_indicators(data_source_config._id)
report_config = ReportConfiguration(
domain=self.domain,
config_id=data_source_config._id,
title='foo',
aggregation_columns=['doc_id'],
columns=[
{
"type": "field",
"display": "report_column_display_fruit",
"field": 'indicator_col_id_fruit',
'column_id': 'report_column_col_id_fruit',
'aggregation': 'simple'
},
{
"type": "percent",
"display": "report_column_display_percent",
'column_id': 'report_column_col_id_percent',
'format': 'percent',
"denominator": {
"type": "field",
"aggregation": "sum",
"field": "indicator_col_id_num1",
"column_id": "report_column_col_id_percent_num1"
},
"numerator": {
"type": "field",
"aggregation": "sum",
"field": "indicator_col_id_num2",
"column_id": "report_column_col_id_percent_num2"
}
},
],
)
report_config.save()
self.addCleanup(report_config.delete)
view = ConfigurableReportView(request=request)
view._domain = self.domain
view._lang = "en"
view._report_config_id = report_config._id
return report_config, view
@classmethod
def tearDownClass(cls):
cls.case.delete()
# todo: understand why this is necessary. the view call uses the session and the
# signal doesn't fire to kill it.
Session.remove()
super(ConfigurableReportViewTest, cls).tearDownClass()
@classmethod
def setUpClass(cls):
super(ConfigurableReportViewTest, cls).setUpClass()
cls.case = cls._new_case({'fruit': 'apple', 'num1': 4, 'num2': 6})
cls.case.save()
def test_export_table(self):
"""
Test the output of ConfigurableReportView.export_table()
"""
report, view = self._build_report_and_view()
expected = [
[
'foo',
[
['report_column_display_fruit', 'report_column_display_percent'],
['apple', '150%']
]
]
]
self.assertEqual(view.export_table, expected)
def test_paginated_build_table(self):
"""
Simulate building a report where chunking occurs
"""
with patch('corehq.apps.userreports.tasks.ID_CHUNK_SIZE', 1):
report, view = self._build_report_and_view()
expected = [
[
'foo',
[
['report_column_display_fruit', 'report_column_display_percent'],
['apple', '150%']
]
]
]
self.assertEqual(view.export_table, expected)
def test_redirect_custom_report(self):
report, view = self._build_report_and_view()
request = HttpRequest()
self.assertFalse(view.should_redirect_to_paywall(request))
def test_redirect_report_builder(self):
report, view = self._build_report_and_view()
report.report_meta.created_by_builder = True
report.save()
request = HttpRequest()
self.assertTrue(view.should_redirect_to_paywall(request))
def test_can_edit_report(self):
"""
Test whether ConfigurableReportView.page_context allows report editing
"""
domain = Domain(name='test_domain', is_active=True)
domain.save()
self.addCleanup(domain.delete)
def create_view(can_edit_reports):
rolename = 'edit_role' if can_edit_reports else 'view_role'
username = 'editor' if can_edit_reports else 'viewer'
toggles.USER_CONFIGURABLE_REPORTS.set(username, True, toggles.NAMESPACE_USER)
user_role = UserRole(
domain=domain.name,
name=rolename,
permissions=Permissions(edit_commcare_users=True,
view_commcare_users=True,
edit_groups=True,
view_groups=True,
edit_locations=True,
view_locations=True,
access_all_locations=False,
edit_data=True,
edit_reports=can_edit_reports,
view_reports=True
)
)
user_role.save()
# user_role should be deleted along with the domain.
web_user = WebUser.create(domain.name, username, '***', None, None)
web_user.set_role(domain.name, user_role.get_qualified_id())
web_user.current_domain = domain.name
web_user.save()
self.addCleanup(web_user.delete, deleted_by=None)
request = HttpRequest()
request.can_access_all_locations = True
request.user = web_user.get_django_user()
request.couch_user = web_user
request.session = {}
_, view = self._build_report_and_view(request=request)
return view
cannot_edit_view = create_view(False)
self.assertEqual(cannot_edit_view.page_context['can_edit_report'], False)
can_edit_view = create_view(True)
self.assertEqual(can_edit_view.page_context['can_edit_report'], True)
| [
"casexml.apps.case.models.CommCareCase.get",
"corehq.apps.userreports.dbaccessors.delete_all_report_configs",
"corehq.apps.userreports.tasks.rebuild_indicators",
"corehq.toggles.USER_CONFIGURABLE_REPORTS.set",
"corehq.sql_db.connections.Session.remove",
"casexml.apps.case.util.post_case_blocks",
"mock.patch",
"corehq.apps.userreports.models.DataSourceConfiguration",
"corehq.apps.userreports.models.DataSourceConfiguration.all",
"corehq.util.context_managers.drop_connected_signals",
"casexml.apps.case.mock.CaseBlock.deprecated_init",
"corehq.apps.userreports.models.ReportConfiguration",
"uuid.uuid4",
"corehq.apps.domain.models.Domain",
"corehq.apps.users.models.WebUser.create",
"django.http.HttpRequest",
"corehq.apps.userreports.reports.view.ConfigurableReportView",
"corehq.apps.users.models.Permissions",
"casexml.apps.case.tests.util.delete_all_cases"
] | [((1484, 1504), 'casexml.apps.case.models.CommCareCase.get', 'CommCareCase.get', (['id'], {}), '(id)\n', (1500, 1504), False, 'from casexml.apps.case.models import CommCareCase\n'), ((1564, 1582), 'casexml.apps.case.tests.util.delete_all_cases', 'delete_all_cases', ([], {}), '()\n', (1580, 1582), False, 'from casexml.apps.case.tests.util import delete_all_cases\n'), ((1605, 1634), 'corehq.apps.userreports.models.DataSourceConfiguration.all', 'DataSourceConfiguration.all', ([], {}), '()\n', (1632, 1634), False, 'from corehq.apps.userreports.models import DataSourceConfiguration, ReportConfiguration\n'), ((1672, 1699), 'corehq.apps.userreports.dbaccessors.delete_all_report_configs', 'delete_all_report_configs', ([], {}), '()\n', (1697, 1699), False, 'from corehq.apps.userreports.dbaccessors import delete_all_report_configs\n'), ((1821, 1834), 'django.http.HttpRequest', 'HttpRequest', ([], {}), '()\n', (1832, 1834), False, 'from django.http import HttpRequest\n'), ((1890, 2753), 'corehq.apps.userreports.models.DataSourceConfiguration', 'DataSourceConfiguration', ([], {'domain': 'self.domain', 'display_name': '"""foo"""', 'referenced_doc_type': '"""CommCareCase"""', 'table_id': '"""woop_woop"""', 'configured_filter': "{'type': 'boolean_expression', 'operator': 'eq', 'expression': {'type':\n 'property_name', 'property_name': 'type'}, 'property_value': self.case_type\n }", 'configured_indicators': "[{'type': 'expression', 'expression': {'type': 'property_name',\n 'property_name': 'fruit'}, 'column_id': 'indicator_col_id_fruit',\n 'display_name': 'indicator_display_name_fruit', 'datatype': 'string'},\n {'type': 'expression', 'expression': {'type': 'property_name',\n 'property_name': 'num1'}, 'column_id': 'indicator_col_id_num1',\n 'datatype': 'integer'}, {'type': 'expression', 'expression': {'type':\n 'property_name', 'property_name': 'num2'}, 'column_id':\n 'indicator_col_id_num2', 'datatype': 'integer'}]"}), "(domain=self.domain, display_name='foo',\n referenced_doc_type='CommCareCase', table_id='woop_woop',\n configured_filter={'type': 'boolean_expression', 'operator': 'eq',\n 'expression': {'type': 'property_name', 'property_name': 'type'},\n 'property_value': self.case_type}, configured_indicators=[{'type':\n 'expression', 'expression': {'type': 'property_name', 'property_name':\n 'fruit'}, 'column_id': 'indicator_col_id_fruit', 'display_name':\n 'indicator_display_name_fruit', 'datatype': 'string'}, {'type':\n 'expression', 'expression': {'type': 'property_name', 'property_name':\n 'num1'}, 'column_id': 'indicator_col_id_num1', 'datatype': 'integer'},\n {'type': 'expression', 'expression': {'type': 'property_name',\n 'property_name': 'num2'}, 'column_id': 'indicator_col_id_num2',\n 'datatype': 'integer'}])\n", (1913, 2753), False, 'from corehq.apps.userreports.models import DataSourceConfiguration, ReportConfiguration\n'), ((3644, 3692), 'corehq.apps.userreports.tasks.rebuild_indicators', 'tasks.rebuild_indicators', (['data_source_config._id'], {}), '(data_source_config._id)\n', (3668, 3692), False, 'from corehq.apps.userreports import tasks\n'), ((3718, 4460), 'corehq.apps.userreports.models.ReportConfiguration', 'ReportConfiguration', ([], {'domain': 'self.domain', 'config_id': 'data_source_config._id', 'title': '"""foo"""', 'aggregation_columns': "['doc_id']", 'columns': "[{'type': 'field', 'display': 'report_column_display_fruit', 'field':\n 'indicator_col_id_fruit', 'column_id': 'report_column_col_id_fruit',\n 'aggregation': 'simple'}, {'type': 'percent', 'display':\n 'report_column_display_percent', 'column_id':\n 'report_column_col_id_percent', 'format': 'percent', 'denominator': {\n 'type': 'field', 'aggregation': 'sum', 'field': 'indicator_col_id_num1',\n 'column_id': 'report_column_col_id_percent_num1'}, 'numerator': {'type':\n 'field', 'aggregation': 'sum', 'field': 'indicator_col_id_num2',\n 'column_id': 'report_column_col_id_percent_num2'}}]"}), "(domain=self.domain, config_id=data_source_config._id,\n title='foo', aggregation_columns=['doc_id'], columns=[{'type': 'field',\n 'display': 'report_column_display_fruit', 'field':\n 'indicator_col_id_fruit', 'column_id': 'report_column_col_id_fruit',\n 'aggregation': 'simple'}, {'type': 'percent', 'display':\n 'report_column_display_percent', 'column_id':\n 'report_column_col_id_percent', 'format': 'percent', 'denominator': {\n 'type': 'field', 'aggregation': 'sum', 'field': 'indicator_col_id_num1',\n 'column_id': 'report_column_col_id_percent_num1'}, 'numerator': {'type':\n 'field', 'aggregation': 'sum', 'field': 'indicator_col_id_num2',\n 'column_id': 'report_column_col_id_percent_num2'}}])\n", (3737, 4460), False, 'from corehq.apps.userreports.models import DataSourceConfiguration, ReportConfiguration\n'), ((5121, 5160), 'corehq.apps.userreports.reports.view.ConfigurableReportView', 'ConfigurableReportView', ([], {'request': 'request'}), '(request=request)\n', (5143, 5160), False, 'from corehq.apps.userreports.reports.view import ConfigurableReportView\n'), ((5520, 5536), 'corehq.sql_db.connections.Session.remove', 'Session.remove', ([], {}), '()\n', (5534, 5536), False, 'from corehq.sql_db.connections import Session\n'), ((6939, 6952), 'django.http.HttpRequest', 'HttpRequest', ([], {}), '()\n', (6950, 6952), False, 'from django.http import HttpRequest\n'), ((7211, 7224), 'django.http.HttpRequest', 'HttpRequest', ([], {}), '()\n', (7222, 7224), False, 'from django.http import HttpRequest\n'), ((7448, 7490), 'corehq.apps.domain.models.Domain', 'Domain', ([], {'name': '"""test_domain"""', 'is_active': '(True)'}), "(name='test_domain', is_active=True)\n", (7454, 7490), False, 'from corehq.apps.domain.models import Domain\n'), ((1148, 1160), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1158, 1160), False, 'import uuid\n'), ((1362, 1400), 'corehq.util.context_managers.drop_connected_signals', 'drop_connected_signals', (['case_post_save'], {}), '(case_post_save)\n', (1384, 1400), False, 'from corehq.util.context_managers import drop_connected_signals\n'), ((1414, 1468), 'casexml.apps.case.util.post_case_blocks', 'post_case_blocks', (['[case_block]', "{'domain': cls.domain}"], {}), "([case_block], {'domain': cls.domain})\n", (1430, 1468), False, 'from casexml.apps.case.util import post_case_blocks\n'), ((6413, 6468), 'mock.patch', 'patch', (['"""corehq.apps.userreports.tasks.ID_CHUNK_SIZE"""', '(1)'], {}), "('corehq.apps.userreports.tasks.ID_CHUNK_SIZE', 1)\n", (6418, 6468), False, 'from mock import patch\n'), ((7746, 7823), 'corehq.toggles.USER_CONFIGURABLE_REPORTS.set', 'toggles.USER_CONFIGURABLE_REPORTS.set', (['username', '(True)', 'toggles.NAMESPACE_USER'], {}), '(username, True, toggles.NAMESPACE_USER)\n', (7783, 7823), False, 'from corehq import toggles\n'), ((8723, 8779), 'corehq.apps.users.models.WebUser.create', 'WebUser.create', (['domain.name', 'username', '"""***"""', 'None', 'None'], {}), "(domain.name, username, '***', None, None)\n", (8737, 8779), False, 'from corehq.apps.users.models import Permissions, UserRole, WebUser\n'), ((9016, 9029), 'django.http.HttpRequest', 'HttpRequest', ([], {}), '()\n', (9027, 9029), False, 'from django.http import HttpRequest\n'), ((1186, 1284), 'casexml.apps.case.mock.CaseBlock.deprecated_init', 'CaseBlock.deprecated_init', ([], {'create': '(True)', 'case_id': 'id', 'case_type': 'cls.case_type', 'update': 'properties'}), '(create=True, case_id=id, case_type=cls.case_type,\n update=properties)\n', (1211, 1284), False, 'from casexml.apps.case.mock import CaseBlock\n'), ((7954, 8203), 'corehq.apps.users.models.Permissions', 'Permissions', ([], {'edit_commcare_users': '(True)', 'view_commcare_users': '(True)', 'edit_groups': '(True)', 'view_groups': '(True)', 'edit_locations': '(True)', 'view_locations': '(True)', 'access_all_locations': '(False)', 'edit_data': '(True)', 'edit_reports': 'can_edit_reports', 'view_reports': '(True)'}), '(edit_commcare_users=True, view_commcare_users=True, edit_groups\n =True, view_groups=True, edit_locations=True, view_locations=True,\n access_all_locations=False, edit_data=True, edit_reports=\n can_edit_reports, view_reports=True)\n', (7965, 8203), False, 'from corehq.apps.users.models import Permissions, UserRole, WebUser\n')] |
"""
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this work except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pandas as pd
import tkinter as tk
from tkinter import *
from tkinter import filedialog
from datetime import datetime
from tkinter.messagebox import showinfo
from tkinter.messagebox import showwarning
from tkinter.font import Font
from os import path
import sys
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
print('running in a PyInstaller bundle')
else:
print('running in a normal Python process')
# ----------------------------------------------- GLOBAL VARIABLES
REF_FILE_NAME = ""
TEST_FILE_NAME = ""
MAIN_FILE_ONE_NAME = ""
MAIN_FILE_TWO_NAME = ""
SEQ_ONE_GAPS = []
SEQ_TWO_GAPS = []
SEQ_THREE_GAPS = []
SEQ_FOUR_GAPS = []
FOUR_SEQ_ALIGN = False
ALIGNMENT_WARNING = False
THRESHOLD = 1
LVL_SEL = "L1&L2"
PEP_COLUMNS = ["peptide", "Peptide", "Peptide sequence"]
START_COLUMNS = ["start", "Start", "Peptide start"]
REF_PEPTIDE_MAX_LENGTH = 50
TEST_PEPTIDE_MAX_LENGTH = 50
MAIN_PEPTIDE_MAX_LENGTH = 50
# ----------------------------------------------- CLASSES
class MainApplication:
def __init__(self, master):
self.master = master
self.canvas = tk.Canvas(master, width=550, height=690) # width=550, height=690
# to make a frame
self.frame = tk.Frame(master, bg='white')
############################################################################################
# Frame Input
# this frame is placed in the original frame
title_font = Font(family="Calibri", size=12, weight="bold")
self.frame_input = tk.Frame(self.frame, bd='10', padx=3, pady=3)
self.label_input_files = tk.Label(self.frame_input, text='Input File Paths', bd='3', fg='blue', font=title_font)
self.label_epitope_predictions = tk.Label(self.frame_input, text='Epitope Predictions', bd='3', fg='blue')
self.label_ref = tk.Label(self.frame_input, text='Sequence A', bd='3')
self.label_test = tk.Label(self.frame_input, text='Sequence B', bd='3')
self.label_database_searches = tk.Label(self.frame_input, text='Database Searches', bd='3', fg='blue')
self.label_main_one = tk.Label(self.frame_input, text='Sequence A', bd='3')
self.label_main_two = tk.Label(self.frame_input, text='Sequence B', bd='3')
self.entry_ref = tk.Entry(self.frame_input, bd='3', justify="center")
self.entry_test = tk.Entry(self.frame_input, bd='3', justify="center")
self.entry_main_one = tk.Entry(self.frame_input, bd='3', justify="center")
self.entry_main_two = tk.Entry(self.frame_input, bd='3', justify="center")
self.button_ref = tk.Button(self.frame_input, text='Browse', command=self.browse_ref)
self.button_test = tk.Button(self.frame_input, text='Browse', command=self.browse_test)
self.button_main_one = tk.Button(self.frame_input, text='Browse', command=self.browse_main_one)
self.button_main_two = tk.Button(self.frame_input, text='Browse', command=self.browse_main_two)
self.label_indels_title = tk.Label(self.frame_input, text='CAVES Indel Search', bd='3', fg='blue')
self.label_indels_alignment = tk.Label(self.frame_input, text='Alignment', bd='3')
self.entry_indels_alignment = tk.Entry(self.frame_input, bd='3', justify="center")
self.button_indels_alignment = tk.Button(self.frame_input, text='Browse', command=self.browse_alignment)
self.label_threshold_title = tk.Label(self.frame_input, text='Minimum Peptide Length', bd='3', fg='blue',
font=title_font)
self.entry_threshold = tk.Entry(self.frame_input, bd='3', justify="center")
self.label_threshold_helper = tk.Label(self.frame_input,
text='Default minimum is 1 amino acid',
bd='3', fg='red')
self.label_radio_title = tk.Label(self.frame_input, text='Level Selection', bd='3', fg='blue',
font=title_font)
self.frame_radio_buttons = tk.Frame(self.frame_input, bd='0', padx=3, pady=3)
self.level_selection = IntVar()
self.level_selection.set(1)
self.radio_both_lvls = Radiobutton(self.frame_radio_buttons, text="Level 1 and 2",
command=self.config_L1L2_entries,
variable=self.level_selection, value=1).grid(row=0, column=1, padx=50)
self.radio_lvl_one_only = Radiobutton(self.frame_radio_buttons, text="Level 1 only",
command=self.config_L1_only_entries,
variable=self.level_selection, value=2).grid(row=0, column=2)
self.radio_lvl_two_only = Radiobutton(self.frame_radio_buttons, text="Level 2 only",
command=self.config_L2_only_entries,
variable=self.level_selection, value=3).grid(row=0, column=3, padx=50)
self.label_result_file_title = tk.Label(self.frame_input, text='Results File', bd='3', fg='blue',
font=title_font)
self.entry_result_file = tk.Entry(self.frame_input, bd='3', justify="center")
self.button_result_path = tk.Button(self.frame_input, text='Browse', command=self.browse_result_path)
# place used to place the widgets in the frame
self.label_input_files.place(relx=-0.005, rely=-0.01, relheight=0.05)
self.label_epitope_predictions.place(relx=0.025, rely=0.06, relheight=0.035)
self.label_ref.place(relx=0.05, rely=0.12, relheight=0.035)
self.entry_ref.place(relx=0.20, rely=0.12, relwidth=0.55, relheight=0.035)
self.button_ref.place(relx=0.80, rely=0.12, relheight=0.030)
self.label_test.place(relx=0.05, rely=0.18, relheight=0.035)
self.entry_test.place(relx=0.20, rely=0.18, relwidth=0.55, relheight=0.035)
self.button_test.place(relx=0.80, rely=0.18, relheight=0.030)
self.label_database_searches.place(relx=0.025, rely=0.26, relheight=0.035)
self.label_main_one.place(relx=0.05, rely=0.32, relheight=0.035)
self.entry_main_one.place(relx=0.20, rely=0.32, relwidth=0.55, relheight=0.035)
self.button_main_one.place(relx=0.80, rely=0.32, relheight=0.030)
self.label_main_two.place(relx=0.05, rely=0.38, relheight=0.035)
self.entry_main_two.place(relx=0.20, rely=0.38, relwidth=0.55, relheight=0.035)
self.button_main_two.place(relx=0.80, rely=0.38, relheight=0.030)
self.label_indels_title.place(relx=0.025, rely=0.46, relheight=0.035)
self.label_indels_alignment.place(relx=0.06, rely=0.52, relheight=0.035)
self.entry_indels_alignment.place(relx=0.20, rely=0.52, relwidth=0.55, relheight=0.035)
self.button_indels_alignment.place(relx=0.80, rely=0.52, relheight=0.030)
self.label_threshold_title.place(relx=-0.005, rely=0.60, relheight=0.05)
self.entry_threshold.place(relx=0.10, rely=0.69, relwidth=0.05, relheight=0.030)
self.label_threshold_helper.place(relx=0.175, rely=0.69, relheight=0.030)
self.label_radio_title.place(relx=-0.005, rely=0.76, relheight=0.05)
# Radio buttons are placed in their own frame (self.frame_radio_buttons)
self.label_result_file_title.place(relx=-0.005, rely=0.90, relheight=0.035)
self.entry_result_file.place(relx=0.20, rely=0.955, relwidth=0.55, relheight=0.035)
self.button_result_path.place(relx=0.80, rely=0.955, relheight=0.030)
############################################################################################
# placing the buttons below
submit_font = Font(family="Calibri", size=12)
self.frame_button = tk.Frame(self.frame, bd='3', padx=3, pady=3)
self.button_start = tk.Button(self.frame_button, text='Compare', font=submit_font, command=self.start_clicked)
self.button_cancel = tk.Button(self.frame_button, text='Cancel', font=submit_font, command=master.destroy)
self.button_cancel.place(relx=0.6, rely=0.22, relheight=0.6, relwidth=0.18)
self.button_start.place(relx=0.8, rely=0.22, relheight=0.6, relwidth=0.18)
###############################################################################################
# all the frames are placed in their respective positions
self.frame_input.place(relx=0.005, rely=0.005, relwidth=0.99, relheight=0.906)
self.frame_radio_buttons.place(relx=0.005, rely=0.8275, relwidth=1, relheight=1)
self.frame_button.place(relx=0.005, rely=0.915, relwidth=0.99, relheight=0.08)
self.frame.place(relx=0.02, rely=0.02, relwidth=0.96, relheight=0.96)
self.canvas.pack()
##############################################################################################
def start_clicked(self):
print("Compare Start")
init_objects(self.level_selection.get())
global LVL_SEL
print("Reading epitope predictions: Sequence A file")
ref_raw = init_ref_raw(self.entry_ref.get().strip())
if ref_raw is None:
return
if LVL_SEL != "L2Only":
print("Reading epitope predictions: Sequence B file")
test_raw = init_test_raw(self.entry_test.get().strip())
if test_raw is None:
return
if LVL_SEL != "L1Only":
print("Reading database searches: Sequence A file")
main_raw_one = init_main_raw(self.entry_main_one.get().strip())
if main_raw_one is None:
print("Unable to read database searches: Sequence A file")
return
global MAIN_FILE_ONE_NAME
MAIN_FILE_ONE_NAME = self.entry_main_one.get().split("/").pop()
if LVL_SEL == "L1&L2":
print("Reading database searches: Sequence B file")
main_raw_two = init_main_raw(self.entry_main_two.get().strip())
if main_raw_two is None:
print("Unable to read database searches: Sequence B file")
return
global MAIN_FILE_TWO_NAME
MAIN_FILE_TWO_NAME = self.entry_main_two.get().split("/").pop()
if self.entry_indels_alignment.get().strip() != "":
print("Reading alignment file")
if not init_alignment(self.entry_indels_alignment.get().strip()):
print("Unable to create gap character lists")
return
else:
print("Empty alignment file path")
return
if not init_threshold(self.entry_threshold.get().strip()):
print("Minimum peptide length input error: minimum length set to 1")
result_file = generate_result_file(self.entry_result_file.get())
ref_dictionary = create_test_comparison_dict(ref_raw.to_dict('split'), REF_FILE_NAME)
if LVL_SEL == "L1&L2":
test_dictionary = create_test_comparison_dict(test_raw.to_dict('split'), TEST_FILE_NAME)
main_dict_one = create_main_comparison_dict(main_raw_one.to_dict('split'), MAIN_FILE_ONE_NAME)
main_dict_two = create_main_comparison_dict(main_raw_two.to_dict('split'), MAIN_FILE_TWO_NAME)
generate_test_comparison_results(ref_dictionary, test_dictionary)
generate_main_comparison_results(L1_matched_dict, "L1m", main_dict_one, main_dict_two)
generate_main_comparison_results(L1_partial_dict, "L1p", main_dict_one, main_dict_two)
generate_main_comparison_results(L1_novel_dict, "L1n", main_dict_one, main_dict_two)
finalize_L1L2_results(result_file)
if LVL_SEL == "L1Only":
test_dictionary = create_test_comparison_dict(test_raw.to_dict('split'), TEST_FILE_NAME)
generate_test_comparison_results(ref_dictionary, test_dictionary)
finalize_L1Only_results(result_file)
if LVL_SEL == "L2Only":
main_dict_one = create_main_comparison_dict(main_raw_one.to_dict('split'), MAIN_FILE_ONE_NAME)
generate_main_comparison_results(ref_dictionary, "L2", main_dict_one)
finalize_L2Only_results(result_file)
print("Compared")
showinfo("CAVES", "Comparison Complete!")
def browse_ref(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_ref.delete(0, tk.END)
self.entry_ref.insert(0, filename)
def browse_test(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_test.delete(0, tk.END)
self.entry_test.insert(0, filename)
def browse_main_one(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_main_one.delete(0, tk.END)
self.entry_main_one.insert(0, filename)
def browse_main_two(self):
filename = filedialog.askopenfilename(title="Select a File", filetypes=[("CSV files", "*.csv")])
self.entry_main_two.delete(0, tk.END)
self.entry_main_two.insert(0, filename)
def browse_alignment(self):
fasta_exts = [("FASTA files", "*.fasta"), ("FASTA files", "*.fna"), ("FASTA files", "*.ffn"),
("FASTA files", "*.faa"), ("FASTA files", "*.frn"), ("FASTA files", "*.fa"),
("FASTA files", "*.fsa")]
filename = filedialog.askopenfilename(title="Select a File", filetypes=fasta_exts)
self.entry_indels_alignment.delete(0, tk.END)
self.entry_indels_alignment.insert(0, filename)
def browse_result_path(self):
time = datetime.now().strftime("%Y-%m-%d_%H%M%S")
filename = filedialog.asksaveasfilename(initialfile="results_"+time, title="Results File",
filetypes=[("Excel files", "*.xlsx")])
self.entry_result_file.delete(0, tk.END)
self.entry_result_file.insert(0, filename)
def config_L1L2_entries(self):
self.entry_ref.config(state='normal')
self.entry_test.config(state='normal')
self.entry_main_one.config(state='normal')
self.entry_main_two.config(state='normal')
def config_L1_only_entries(self):
self.entry_main_one.delete(0, tk.END)
self.entry_main_two.delete(0, tk.END)
self.entry_ref.config(state='normal')
self.entry_test.config(state='normal')
self.entry_main_one.config(state='disabled')
self.entry_main_two.config(state='disabled')
def config_L2_only_entries(self):
self.entry_test.delete(0, tk.END)
self.entry_main_two.delete(0, tk.END)
self.entry_ref.config(state='normal')
self.entry_test.config(state='disabled')
self.entry_main_one.config(state='normal')
self.entry_main_two.config(state='disabled')
class ResultSheetObject:
def __init__(self):
self.origin_file_one = []
self.peptide_one = []
self.start_one = []
self.end_one = []
self.length_one = []
self.letters_matched = []
self.letters_matched_length = []
self.origin_file_two = []
self.peptide_two = []
self.start_two = []
self.end_two = []
self.length_two = []
self.mutated_pos = []
class PeptideObject:
def __init__(self, new_file, new_pep, new_start, new_end, new_length, new_suffix):
self.origin_file = new_file
self.peptide = new_pep
self.start = new_start
self.end = new_end
self.length = new_length
self.suffix = new_suffix
# ----------------------------------------------- RESULT OBJECTS
L1_novel = ResultSheetObject()
L1_partial = ResultSheetObject()
L1_matched = ResultSheetObject()
L2_novel = ResultSheetObject()
L2_partial = ResultSheetObject()
L2_matched = ResultSheetObject()
L1_novel_L2_novel = ResultSheetObject()
L1_novel_L2_partial = ResultSheetObject()
L1_novel_L2_matched = ResultSheetObject()
L1_partial_L2_novel = ResultSheetObject()
L1_partial_L2_partial = ResultSheetObject()
L1_partial_L2_matched = ResultSheetObject()
L1_matched_L2_novel = ResultSheetObject()
L1_matched_L2_partial = ResultSheetObject()
L1_matched_L2_matched = ResultSheetObject()
# ----------------------------------------------- LEVEL 1 DICTIONARIES
L1_novel_dict = {}
L1_partial_dict = {}
L1_matched_dict = {}
# ----------------------------------------------- FUNCTIONS
def init_objects(lvl_sel):
global REF_FILE_NAME
REF_FILE_NAME = ""
global TEST_FILE_NAME
TEST_FILE_NAME = ""
global MAIN_FILE_ONE_NAME
MAIN_FILE_ONE_NAME = ""
global MAIN_FILE_TWO_NAME
MAIN_FILE_TWO_NAME = ""
global SEQ_ONE_GAPS
SEQ_ONE_GAPS = []
global SEQ_TWO_GAPS
SEQ_TWO_GAPS = []
global SEQ_THREE_GAPS
SEQ_THREE_GAPS = []
global SEQ_FOUR_GAPS
SEQ_FOUR_GAPS = []
global FOUR_SEQ_ALIGN
FOUR_SEQ_ALIGN = False
global LVL_SEL
if lvl_sel == 1:
LVL_SEL = "L1&L2"
elif lvl_sel == 2:
LVL_SEL = "L1Only"
else:
LVL_SEL = "L2Only"
global L1_novel
L1_novel = ResultSheetObject()
global L1_partial
L1_partial = ResultSheetObject()
global L1_matched
L1_matched = ResultSheetObject()
global L2_novel
L2_novel = ResultSheetObject()
global L2_partial
L2_partial = ResultSheetObject()
global L2_matched
L2_matched = ResultSheetObject()
global L1_novel_L2_novel
L1_novel_L2_novel = ResultSheetObject()
global L1_novel_L2_partial
L1_novel_L2_partial = ResultSheetObject()
global L1_novel_L2_matched
L1_novel_L2_matched = ResultSheetObject()
global L1_partial_L2_novel
L1_partial_L2_novel = ResultSheetObject()
global L1_partial_L2_partial
L1_partial_L2_partial = ResultSheetObject()
global L1_partial_L2_matched
L1_partial_L2_matched = ResultSheetObject()
global L1_matched_L2_novel
L1_matched_L2_novel = ResultSheetObject()
global L1_matched_L2_partial
L1_matched_L2_partial = ResultSheetObject()
global L1_matched_L2_matched
L1_matched_L2_matched = ResultSheetObject()
global L1_novel_dict
L1_novel_dict = {}
global L1_partial_dict
L1_partial_dict = {}
global L1_matched_dict
L1_matched_dict = {}
def init_ref_raw(file_path):
if not path.exists(file_path):
print("Unable to find predictions file: " + file_path)
return None
global REF_FILE_NAME
REF_FILE_NAME = file_path.strip().split("/").pop() # gives last item in list which is file
ref_raw = None
for pep_col in PEP_COLUMNS:
for start_col in START_COLUMNS:
try:
ref_raw = pd.read_csv(file_path, index_col=False, usecols={start_col, pep_col})
break
except ValueError:
ref_raw = None
else:
continue
break
if ref_raw is None:
print("Unable to read epitope predictions: Sequence A file")
print("Value Error: Check to make sure the column names are among the following:")
print("Start Columns:", START_COLUMNS)
print("Peptide Columns:", PEP_COLUMNS)
return ref_raw
def init_test_raw(file_path):
if not path.exists(file_path):
print("Unable to find predictions file: " + file_path)
return None
global TEST_FILE_NAME
TEST_FILE_NAME = file_path.strip().split("/").pop() # gives last item in list which is file
test_raw = None
for pep_col in PEP_COLUMNS:
for start_col in START_COLUMNS:
try:
test_raw = pd.read_csv(file_path, index_col=False, usecols={start_col, pep_col})
break
except ValueError:
test_raw = None
else:
continue
break
if test_raw is None:
print("Unable to read epitope predictions: Sequence B file")
print("Value Error: Check to make sure the column names are among the following:")
print("Start Columns:", START_COLUMNS)
print("Peptide Columns:", PEP_COLUMNS)
return test_raw
def init_main_raw(file_path):
if not path.exists(file_path):
print("Unable to find database search file: " + file_path)
return None
try:
main_raw = pd.read_csv(file_path, index_col=False, skiprows=1, usecols={"Description", "Starting Position"})
return main_raw
except ValueError:
print("Value Error: Check to make sure the column names are: 'Description' and 'Starting Position'")
return None
def init_alignment(file_path):
if not path.exists(file_path):
print("Unable to find alignment file from path: " + file_path)
return False
result = init_gap_chars(file_path)
return result
def init_gap_chars(file_path):
try:
with open(file_path) as my_file:
sequences = build_sequences(my_file)
global ALIGNMENT_WARNING
ALIGNMENT_WARNING = False
global SEQ_ONE_GAPS
SEQ_ONE_GAPS = find_gap_chars(sequences[0])
if LVL_SEL != "L2Only":
global SEQ_TWO_GAPS
SEQ_TWO_GAPS = find_gap_chars(sequences[1])
if LVL_SEL != "L1Only":
global SEQ_THREE_GAPS
SEQ_THREE_GAPS = find_gap_chars(sequences[2])
if sequences[3] and LVL_SEL == "L1&L2":
global SEQ_FOUR_GAPS
SEQ_FOUR_GAPS = find_gap_chars(sequences[3])
global FOUR_SEQ_ALIGN
FOUR_SEQ_ALIGN = True
if ALIGNMENT_WARNING:
showwarning("WARNING", "CAVES has detected a large amount of successive gap characters in your alignment "
"file. Epitopes predicted from highly dissimilar sequences are unlikely to produce "
"biologically relevant matches when compared due to inherent differences in the "
"amino acid composition. \n\nCAVES will still run but we caution against using "
"these results.")
except:
print("Alignment file processing error")
return False
return True
def build_sequences(file):
sequences = ["", "", "", ""]
curr_seq = -1
for line in file:
if line[0] == ">":
curr_seq += 1
else:
line = line.rstrip("\n")
sequences[curr_seq] += line
return sequences
def find_gap_chars(seq):
gaps = []
amino_acid_count = 1
row = 0
for char in seq:
if char == '-':
row += 1
if row >= 10:
global ALIGNMENT_WARNING
ALIGNMENT_WARNING = True
gaps.append(amino_acid_count)
else:
row = 0
amino_acid_count += 1
return gaps
def init_threshold(threshold_entry):
global THRESHOLD
if not threshold_entry:
THRESHOLD = 1
return True
try:
if not str.isdigit(threshold_entry):
raise
THRESHOLD = int(threshold_entry)
except:
THRESHOLD = 1
return False
return True
def generate_result_file(file_path):
try:
filename, file_extension = path.splitext(file_path)
if file_extension and file_extension == ".xlsx":
result_file = pd.ExcelWriter(file_path)
else:
result_file = pd.ExcelWriter(filename + ".xlsx")
except:
time = datetime.now().strftime("%Y-%m-%d_%H%M%S")
print("Results file input error: Default file name used "
"(" + "results_" + time + ".xlsx). File placed in directory with CAVES executable.")
result_file = pd.ExcelWriter("results_" + time + ".xlsx")
return result_file
def create_main_comparison_dict(main_dict_raw, main_file_name):
main_list = list(main_dict_raw['data'])
result_dict = {}
for item in main_list:
if isinstance(item[0], int):
result_dict = main_comparison_dict_insert(item[0], item[1], main_file_name, result_dict)
else:
result_dict = main_comparison_dict_insert(item[1], item[0], main_file_name, result_dict)
return result_dict
def main_comparison_dict_insert(start, pep, main_file_name, res_dict):
split = pep.split(" ")
suffix = ""
length = len(split[0])
if len(split) > 1:
suffix = " " + split[1] + " " + split[2]
new_peptide = PeptideObject(main_file_name, split[0], start, start + length - 1, length, suffix)
if start not in res_dict:
res_dict[start] = [new_peptide]
else:
res_dict[start].append(new_peptide)
global MAIN_PEPTIDE_MAX_LENGTH
MAIN_PEPTIDE_MAX_LENGTH = max(length, MAIN_PEPTIDE_MAX_LENGTH)
return res_dict
def create_test_comparison_dict(sample_dict_raw, test_file_name):
sample_list = list(sample_dict_raw['data'])
result_dict = {}
for item in sample_list:
if isinstance(item[0], int):
result_dict = test_comparison_dict_insert(item[0], item[1], test_file_name, result_dict)
else:
result_dict = test_comparison_dict_insert(item[1], item[0], test_file_name, result_dict)
return result_dict
def test_comparison_dict_insert(start, pep, test_file_name, res_dict):
global THRESHOLD
if len(pep) >= THRESHOLD:
res_dict[start] = PeptideObject(test_file_name, pep, start, start + len(pep) - 1, len(pep), "")
if test_file_name == REF_FILE_NAME:
global REF_PEPTIDE_MAX_LENGTH
REF_PEPTIDE_MAX_LENGTH = max(len(pep), REF_PEPTIDE_MAX_LENGTH)
else:
global TEST_PEPTIDE_MAX_LENGTH
TEST_PEPTIDE_MAX_LENGTH = max(len(pep), TEST_PEPTIDE_MAX_LENGTH)
return res_dict
def align_pos_gaps(ref_position, seq_one, seq_two):
test_position = ref_position
for gap_one in seq_one:
if ref_position > gap_one:
test_position += 1
for gap_two in seq_two:
if ref_position > gap_two:
test_position -= 1
return test_position
def rel_diff_between_pos(ref_pos, seq_one_gaps, test_pos, seq_two_gaps):
total_ref_gaps = 0
total_test_gaps = 0
for gap_one in seq_one_gaps:
if ref_pos > gap_one:
total_ref_gaps += 1
for gap_two in seq_two_gaps:
if test_pos > gap_two:
total_test_gaps += 1
rel_dist = abs((test_pos+total_test_gaps) - (ref_pos+total_ref_gaps))
final_dist = rel_dist
for gap_one in seq_one_gaps:
if ref_pos <= gap_one <= ref_pos + rel_dist:
final_dist -= 1
for gap_two in seq_two_gaps:
if ref_pos <= gap_two <= ref_pos + rel_dist:
final_dist += 1
return final_dist
def get_result_object(result_type, input_file, level):
if result_type == "matched" and level == 1:
return L1_matched
if result_type == "partial" and level == 1:
return L1_partial
if result_type == "novel" and level == 1:
return L1_novel
if result_type == "matched" and level == 2 and input_file == "L1m":
return L1_matched_L2_matched
if result_type == "partial" and level == 2 and input_file == "L1m":
return L1_matched_L2_partial
if result_type == "novel" and level == 2 and input_file == "L1m":
return L1_matched_L2_novel
if result_type == "matched" and level == 2 and input_file == "L1p":
return L1_partial_L2_matched
if result_type == "partial" and level == 2 and input_file == "L1p":
return L1_partial_L2_partial
if result_type == "novel" and level == 2 and input_file == "L1p":
return L1_partial_L2_novel
if result_type == "matched" and level == 2 and input_file == "L1n":
return L1_novel_L2_matched
if result_type == "partial" and level == 2 and input_file == "L1n":
return L1_novel_L2_partial
if result_type == "novel" and level == 2 and input_file == "L1n":
return L1_novel_L2_novel
if result_type == "matched" and level == 2 and input_file == "L2":
return L2_matched
if result_type == "partial" and level == 2 and input_file == "L2":
return L2_partial
if result_type == "novel" and level == 2 and input_file == "L2":
return L2_novel
def create_match_df(obj):
return pd.DataFrame({'Origin 1': obj.origin_file_one, 'Peptide 1': obj.peptide_one, 'Start 1': obj.start_one,
'End 1': obj.end_one, 'Length 1': obj.length_one, 'Origin 2': obj.origin_file_two,
'Peptide 2': obj.peptide_two, 'Start 2': obj.start_two, 'End 2': obj.end_two,
'Length 2': obj.length_two})
def create_partial_df(obj):
return pd.DataFrame({'Origin 1': obj.origin_file_one, 'Peptide 1': obj.peptide_one, 'Start 1': obj.start_one,
'End 1': obj.end_one, 'Length 1': obj.length_one, 'Letters Matched': obj.letters_matched,
'Matched Length': obj.letters_matched_length, 'Origin 2': obj.origin_file_two,
'Peptide 2': obj.peptide_two, 'Start 2': obj.start_two, 'End 2': obj.end_two,
'Length 2': obj.length_two})
def create_novel_df(obj, level):
if level == "L1":
return pd.DataFrame({'Origin': obj.origin_file_one, 'Peptide': obj.peptide_one, 'Start': obj.start_one,
'End': obj.end_one, 'Length': obj.length_one, 'Mutated Positions': obj.mutated_pos})
else:
return pd.DataFrame({'Origin': obj.origin_file_one, 'Peptide': obj.peptide_one, 'Start': obj.start_one,
'End': obj.end_one, 'Length': obj.length_one})
def insert_level_one_obj(lvl_one_dict, curr_pep):
if curr_pep.start in lvl_one_dict:
matched = False
for item in lvl_one_dict[curr_pep.start]:
if curr_pep.peptide == item.peptide and curr_pep.origin_file == item.origin_file\
and curr_pep.length == item.length:
matched = True
if not matched:
lvl_one_dict[curr_pep.start].append(curr_pep)
return True
else:
lvl_one_dict[curr_pep.start] = [curr_pep]
return True
return False
def insert_matched(result_obj, pep_one, pep_two):
result_obj.origin_file_one.append(pep_one.origin_file + pep_one.suffix)
result_obj.peptide_one.append(pep_one.peptide)
result_obj.start_one.append(pep_one.start)
result_obj.end_one.append(pep_one.end)
result_obj.length_one.append(pep_one.length)
result_obj.origin_file_two.append(pep_two.origin_file + pep_two.suffix)
result_obj.peptide_two.append(pep_two.peptide)
result_obj.start_two.append(pep_two.start)
result_obj.end_two.append(pep_two.end)
result_obj.length_two.append(pep_two.length)
def insert_partial(result_obj, pep_one, partial):
result_obj.origin_file_one.append(pep_one.origin_file + pep_one.suffix)
result_obj.peptide_one.append(pep_one.peptide)
result_obj.start_one.append(pep_one.start)
result_obj.end_one.append(pep_one.end)
result_obj.length_one.append(pep_one.length)
matched_letters = ""
for pos, letter in sorted(partial[1].items()):
matched_letters += letter
result_obj.letters_matched.append(matched_letters)
result_obj.letters_matched_length.append(len(matched_letters))
result_obj.origin_file_two.append(partial[0].origin_file + partial[0].suffix)
result_obj.peptide_two.append(partial[0].peptide)
result_obj.start_two.append(partial[0].start)
result_obj.end_two.append(partial[0].end)
result_obj.length_two.append(partial[0].length)
def insert_novel(result_obj, pep_one, mutated_pos):
result_obj.origin_file_one.append(pep_one.origin_file + pep_one.suffix)
result_obj.peptide_one.append(pep_one.peptide)
result_obj.start_one.append(pep_one.start)
result_obj.end_one.append(pep_one.end)
result_obj.length_one.append(pep_one.length)
result_obj.mutated_pos.append(mutated_pos)
def all_novel_positions_covered(partial_matches, novel_positions):
for partial_match in partial_matches:
for key in partial_match[1].keys():
if key in novel_positions:
del novel_positions[key]
return len(novel_positions) == 0
def find_most_mutations(novel_peps):
if not novel_peps:
return ""
longest_mutation_list = []
longest = 0
for pep in novel_peps:
if len(pep[1]) > longest:
longest = len(pep[1])
longest_mutation_list = pep[1]
longest_mutation_list_pos = ""
for pos in longest_mutation_list:
longest_mutation_list_pos += str(pos) + ", "
return longest_mutation_list_pos.rstrip(", ")
def input_main_comparison_result(curr_pep, results, input_file):
if "matched" in results:
res_obj = get_result_object("matched", input_file, 2)
for match in results["matched"]:
insert_matched(res_obj, curr_pep, match)
elif "partial" in results:
res_obj = get_result_object("partial", input_file, 2)
for partial in results["partial"]:
insert_partial(res_obj, curr_pep, partial)
else:
res_obj = get_result_object("novel", input_file, 2)
insert_novel(res_obj, curr_pep, "")
def input_test_comparison_result(curr_pep, results):
if "novel" in results:
insert_level_one_obj(L1_novel_dict, curr_pep)
most_mutations = find_most_mutations(results["novel"])
res_obj = get_result_object("novel", "", 1)
insert_novel(res_obj, curr_pep, most_mutations)
for novel in results["novel"]:
if insert_level_one_obj(L1_novel_dict, novel[0]):
mutated_list = ""
for pos in novel[2]:
mutated_list += str(pos) + ", "
insert_novel(res_obj, novel[0], mutated_list.rstrip(", "))
else:
if "matched" in results:
insert_level_one_obj(L1_matched_dict, curr_pep)
result_obj = get_result_object("matched", "", 1)
for match in results["matched"]:
insert_level_one_obj(L1_matched_dict, match)
insert_matched(result_obj, curr_pep, match)
if "partial" in results:
insert_level_one_obj(L1_partial_dict, curr_pep)
result_obj = get_result_object("partial", "", 1)
for partial in results["partial"]:
insert_level_one_obj(L1_partial_dict, partial[0])
insert_partial(result_obj, curr_pep, partial)
def calculate_main_comparison_parameters(test_peptide, main_peptide):
result = {}
if test_peptide.origin_file == TEST_FILE_NAME:
if FOUR_SEQ_ALIGN:
if align_pos_gaps(test_peptide.start, SEQ_TWO_GAPS, SEQ_FOUR_GAPS) > main_peptide.start:
result["test_start"] = test_peptide.start
result["main_start"] = align_pos_gaps(test_peptide.start, SEQ_TWO_GAPS, SEQ_FOUR_GAPS)
elif align_pos_gaps(test_peptide.start, SEQ_TWO_GAPS, SEQ_FOUR_GAPS) < main_peptide.start:
result["test_start"] = test_peptide.start + \
rel_diff_between_pos(test_peptide.start, SEQ_TWO_GAPS,
main_peptide.start, SEQ_FOUR_GAPS)
result["main_start"] = main_peptide.start
else:
result["test_start"] = test_peptide.start
result["main_start"] = main_peptide.start
else:
if align_pos_gaps(test_peptide.start, SEQ_TWO_GAPS, SEQ_THREE_GAPS) > main_peptide.start:
result["test_start"] = test_peptide.start
result["main_start"] = align_pos_gaps(test_peptide.start, SEQ_TWO_GAPS, SEQ_THREE_GAPS)
elif align_pos_gaps(test_peptide.start, SEQ_TWO_GAPS, SEQ_THREE_GAPS) < main_peptide.start:
result["test_start"] = test_peptide.start + rel_diff_between_pos(test_peptide.start, SEQ_TWO_GAPS,
main_peptide.start, SEQ_THREE_GAPS)
result["main_start"] = main_peptide.start
else:
result["test_start"] = test_peptide.start
result["main_start"] = main_peptide.start
else:
if align_pos_gaps(test_peptide.start, SEQ_ONE_GAPS, SEQ_THREE_GAPS) > main_peptide.start:
result["test_start"] = test_peptide.start
result["main_start"] = align_pos_gaps(test_peptide.start, SEQ_ONE_GAPS, SEQ_THREE_GAPS)
elif align_pos_gaps(test_peptide.start, SEQ_ONE_GAPS, SEQ_THREE_GAPS) < main_peptide.start:
result["test_start"] = test_peptide.start + rel_diff_between_pos(test_peptide.start, SEQ_ONE_GAPS,
main_peptide.start, SEQ_THREE_GAPS)
result["main_start"] = main_peptide.start
else:
result["test_start"] = test_peptide.start
result["main_start"] = main_peptide.start
if main_peptide.end - result["main_start"] <= test_peptide.end - result["test_start"]:
result["num_comp"] = main_peptide.end - result["main_start"] + 1
else:
result["num_comp"] = test_peptide.end - result["test_start"] + 1
return result
def calculate_test_comparison_parameters(ref_peptide, test_peptide):
result = {}
if align_pos_gaps(ref_peptide.start, SEQ_ONE_GAPS, SEQ_TWO_GAPS) > test_peptide.start:
result["ref_start"] = ref_peptide.start
result["test_start"] = align_pos_gaps(ref_peptide.start, SEQ_ONE_GAPS, SEQ_TWO_GAPS)
elif align_pos_gaps(ref_peptide.start, SEQ_ONE_GAPS, SEQ_TWO_GAPS) < test_peptide.start:
result["ref_start"] = ref_peptide.start + \
rel_diff_between_pos(ref_peptide.start, SEQ_ONE_GAPS, test_peptide.start, SEQ_TWO_GAPS)
result["test_start"] = test_peptide.start
else:
result["ref_start"] = ref_peptide.start
result["test_start"] = test_peptide.start
if ref_peptide.end - result["ref_start"] <= test_peptide.end - result["test_start"]:
result["num_comp"] = ref_peptide.end - result["ref_start"] + 1
else:
result["num_comp"] = test_peptide.end - result["test_start"] + 1
return result
def compare_to_test_string(ref_peptide, test_peptide):
results = []
novel_ref_positions = {}
novel_test_positions = {}
matched_positions = {}
smallest_max_length = ref_peptide.length if ref_peptide.length < test_peptide.length else test_peptide.length
comp_params = calculate_test_comparison_parameters(ref_peptide, test_peptide)
global SEQ_ONE_GAPS
global SEQ_TWO_GAPS
ref_curr = comp_params["ref_start"] - ref_peptide.start
test_curr = comp_params["test_start"] - test_peptide.start
for i in range(0, comp_params["num_comp"]):
overall_ref_pos = comp_params["ref_start"] + i
overall_test_pos = comp_params["test_start"] + i
if overall_ref_pos in SEQ_TWO_GAPS and overall_test_pos not in SEQ_ONE_GAPS:
novel_ref_positions[comp_params["ref_start"] + i] = ref_peptide.peptide[ref_curr]
novel_test_positions[comp_params["test_start"] + i] = '-'
ref_curr += 1
elif overall_ref_pos not in SEQ_TWO_GAPS and overall_test_pos in SEQ_ONE_GAPS:
novel_ref_positions[comp_params["ref_start"] + i] = '-'
novel_test_positions[comp_params["test_start"] + i] = test_peptide.peptide[test_curr]
test_curr += 1
elif ref_peptide.peptide[ref_curr] == test_peptide.peptide[test_curr]:
matched_positions[comp_params["ref_start"]+i] = ref_peptide.peptide[ref_curr]
ref_curr += 1
test_curr += 1
else:
novel_ref_positions[comp_params["ref_start"]+i] = ref_peptide.peptide[ref_curr]
novel_test_positions[comp_params["test_start"]+i] = test_peptide.peptide[test_curr]
ref_curr += 1
test_curr += 1
if len(matched_positions) == smallest_max_length:
results.append("matched")
elif len(novel_ref_positions) > 0:
results.append("novel")
results.append(novel_ref_positions)
results.append(novel_test_positions)
else:
results.append("partial")
results.append(matched_positions)
return results
def compare_to_main_string(test_peptide, main_peptide):
results = []
novel_positions = {}
matched_positions = {}
comp_params = calculate_main_comparison_parameters(test_peptide, main_peptide)
test_curr = comp_params["test_start"] - test_peptide.start
main_curr = comp_params["main_start"] - main_peptide.start
for i in range(0, comp_params["num_comp"]):
if test_peptide.peptide[test_curr] == main_peptide.peptide[main_curr]:
matched_positions[comp_params["test_start"]+i] = test_peptide.peptide[test_curr]
test_curr += 1
main_curr += 1
else:
novel_positions[comp_params["test_start"]+i] = test_peptide.peptide[test_curr]
test_curr += 1
main_curr += 1
if len(matched_positions) == test_peptide.length:
results.append("matched")
elif len(novel_positions) > 0:
results.append("novel")
results.append(novel_positions)
else:
results.append("partial")
results.append(matched_positions)
return results
def generate_test_comparisons(dictionary, ref_peptide):
result = {}
comp_results = {"matched": [], "partial": [], "novel_test_peps": []}
aligned_test_start = align_pos_gaps(ref_peptide.start, SEQ_ONE_GAPS, SEQ_TWO_GAPS)
aligned_test_end = align_pos_gaps(ref_peptide.end, SEQ_ONE_GAPS, SEQ_TWO_GAPS)
curr_pos = max(1, aligned_test_start-TEST_PEPTIDE_MAX_LENGTH)
# gather all possible comparisons for testing string
while curr_pos <= aligned_test_end:
if curr_pos in dictionary:
test_peptide = dictionary[curr_pos]
if ((test_peptide.start <= aligned_test_start <= test_peptide.end) or
(test_peptide.start <= aligned_test_end <= test_peptide.end)) or \
((aligned_test_start <= test_peptide.start <= aligned_test_end) or
(aligned_test_start <= test_peptide.end <= aligned_test_end)):
comparison = compare_to_test_string(ref_peptide, test_peptide)
if comparison[0] == "matched":
comp_results["matched"].append(test_peptide)
elif comparison[0] == "partial":
comp_results["partial"].append([test_peptide, comparison[1]])
elif comparison[0] == "novel":
comp_results["novel_test_peps"].append([test_peptide, comparison[1], comparison[2]])
curr_pos += 1
if len(comp_results["novel_test_peps"]) > 0:
result["novel"] = []
for novel_test_pep in comp_results["novel_test_peps"]:
result["novel"].append(novel_test_pep)
elif len(comp_results["matched"]) == 0 and len(comp_results["partial"]) == 0:
result["novel"] = []
else:
if len(comp_results["matched"]) > 0:
result["matched"] = comp_results["matched"]
if len(comp_results["partial"]) > 0:
result["partial"] = comp_results["partial"]
return result
def generate_main_comparisons(dictionary, test_peptide):
result = {}
comp_results = {"matched": [], "partial": [], "novel_pos_dict": {}}
if LVL_SEL == "L2Only":
aligned_test_start = align_pos_gaps(test_peptide.start, SEQ_ONE_GAPS, SEQ_TWO_GAPS)
aligned_test_end = align_pos_gaps(test_peptide.end, SEQ_ONE_GAPS, SEQ_TWO_GAPS)
else:
if test_peptide.origin_file != TEST_FILE_NAME:
aligned_test_start = align_pos_gaps(test_peptide.start, SEQ_ONE_GAPS, SEQ_THREE_GAPS)
aligned_test_end = align_pos_gaps(test_peptide.end, SEQ_ONE_GAPS, SEQ_THREE_GAPS)
else:
if FOUR_SEQ_ALIGN:
aligned_test_start = align_pos_gaps(test_peptide.start, SEQ_TWO_GAPS, SEQ_FOUR_GAPS)
aligned_test_end = align_pos_gaps(test_peptide.end, SEQ_TWO_GAPS, SEQ_FOUR_GAPS)
else:
aligned_test_start = align_pos_gaps(test_peptide.start, SEQ_TWO_GAPS, SEQ_THREE_GAPS)
aligned_test_end = align_pos_gaps(test_peptide.end, SEQ_TWO_GAPS, SEQ_THREE_GAPS)
curr_pos = max(1, aligned_test_start-MAIN_PEPTIDE_MAX_LENGTH)
# gather all possible comparisons for testing string
while curr_pos <= aligned_test_end:
if curr_pos in dictionary:
for main_peptide in dictionary[curr_pos]:
if ((main_peptide.start <= aligned_test_start <= main_peptide.end) or
(main_peptide.start <= aligned_test_end <= main_peptide.end)) or \
((aligned_test_start <= main_peptide.start <= aligned_test_end) or
(aligned_test_start <= main_peptide.end <= aligned_test_end)):
comparison = \
compare_to_main_string(test_peptide, main_peptide)
if comparison[0] == "matched":
comp_results["matched"].append(main_peptide)
elif comparison[0] == "partial":
comp_results["partial"].append([main_peptide, comparison[1]])
elif comparison[0] == "novel":
for pos, letter in comparison[1].items():
comp_results["novel_pos_dict"][pos] = letter
curr_pos += 1
if len(comp_results["matched"]) > 0:
result["matched"] = comp_results["matched"]
elif len(comp_results["matched"]) == 0 and len(comp_results["partial"]) == 0:
result["novel"] = test_peptide
elif len(comp_results["novel_pos_dict"]) > 0:
if len(comp_results["partial"]) == 0:
result["novel"] = test_peptide
else:
if all_novel_positions_covered(comp_results["partial"], comp_results["novel_pos_dict"]):
result["partial"] = comp_results["partial"]
else:
result["novel"] = test_peptide
elif len(comp_results["partial"]) > 0 and len(comp_results["matched"]) == 0 \
and len(comp_results["novel_pos_dict"]) == 0:
result["partial"] = comp_results["partial"]
return result
def calculate_input_novel_test_peps(test_dict):
for key, value in sorted(test_dict.items()):
matched = False
if key in L1_matched_dict:
for potential in L1_matched_dict[key]:
if potential.peptide == value.peptide and potential.origin_file == value.origin_file \
and potential.length == value.length:
matched = True
if key in L1_partial_dict:
for potential in L1_partial_dict[key]:
if potential.peptide == value.peptide and potential.origin_file == value.origin_file \
and potential.length == value.length:
matched = True
if key in L1_novel_dict:
for potential in L1_novel_dict[key]:
if potential.peptide == value.peptide and potential.origin_file == value.origin_file \
and potential.length == value.length:
matched = True
if not matched:
insert_level_one_obj(L1_novel_dict, value)
res_obj = get_result_object("novel", "", 1)
insert_novel(res_obj, value, "")
def generate_main_comparison_results(test_dict, input_name, main_dict_one, main_dict_two=None):
for key, value in sorted(test_dict.items()):
if isinstance(value, list):
for pep in value:
if pep.origin_file == REF_FILE_NAME:
results = generate_main_comparisons(main_dict_one, pep)
else: # pep.origin_file == TEST_FILE_NAME
results = generate_main_comparisons(main_dict_two, pep)
input_main_comparison_result(pep, results, input_name)
else:
if value.origin_file == REF_FILE_NAME:
results = generate_main_comparisons(main_dict_one, value)
else: # pep.origin_file == TEST_FILE_NAME
results = generate_main_comparisons(main_dict_two, value)
input_main_comparison_result(value, results, input_name)
def generate_test_comparison_results(ref_dict, test_dict):
for key, value in sorted(ref_dict.items()):
results = generate_test_comparisons(test_dict, value)
input_test_comparison_result(value, results)
calculate_input_novel_test_peps(test_dict)
def finalize_L1L2_results(result_file):
L1m_df = create_match_df(L1_matched)
L1p_df = create_partial_df(L1_partial)
L1n_df = create_novel_df(L1_novel, "L1")
L1m_L2m_df = create_match_df(L1_matched_L2_matched)
L1m_L2p_df = create_partial_df(L1_matched_L2_partial)
L1m_L2n_df = create_novel_df(L1_matched_L2_novel, "L2")
L1p_L2m_df = create_match_df(L1_partial_L2_matched)
L1p_L2p_df = create_partial_df(L1_partial_L2_partial)
L1p_L2n_df = create_novel_df(L1_partial_L2_novel, "L2")
L1n_L2m_df = create_match_df(L1_novel_L2_matched)
L1n_L2p_df = create_partial_df(L1_novel_L2_partial)
L1n_L2n_df = create_novel_df(L1_novel_L2_novel, "L2")
L1m_df.to_excel(result_file, sheet_name="L1E", index=False)
L1p_df.to_excel(result_file, sheet_name="L1P", index=False)
L1n_df.to_excel(result_file, sheet_name="L1N", index=False)
L1m_L2m_df.to_excel(result_file, sheet_name="L1E_L2E", index=False)
L1m_L2p_df.to_excel(result_file, sheet_name="L1E_L2P", index=False)
L1m_L2n_df.to_excel(result_file, sheet_name="L1E_L2N", index=False)
L1p_L2m_df.to_excel(result_file, sheet_name="L1P_L2E", index=False)
L1p_L2p_df.to_excel(result_file, sheet_name="L1P_L2P", index=False)
L1p_L2n_df.to_excel(result_file, sheet_name="L1P_L2N", index=False)
L1n_L2m_df.to_excel(result_file, sheet_name="L1N_L2E", index=False)
L1n_L2p_df.to_excel(result_file, sheet_name="L1N_L2P", index=False)
L1n_L2n_df.to_excel(result_file, sheet_name="L1N_L2N", index=False)
result_file.save()
def finalize_L1Only_results(result_file):
L1m_df = create_match_df(L1_matched)
L1p_df = create_partial_df(L1_partial)
L1n_df = create_novel_df(L1_novel, "L1")
L1m_df.to_excel(result_file, sheet_name="L1E", index=False)
L1p_df.to_excel(result_file, sheet_name="L1P", index=False)
L1n_df.to_excel(result_file, sheet_name="L1N", index=False)
result_file.save()
def finalize_L2Only_results(result_file):
L2m_df = create_match_df(L2_matched)
L2p_df = create_partial_df(L2_partial)
L2n_df = create_novel_df(L2_novel, "L2")
L2m_df.to_excel(result_file, sheet_name="L2E", index=False)
L2p_df.to_excel(result_file, sheet_name="L2P", index=False)
L2n_df.to_excel(result_file, sheet_name="L2N", index=False)
result_file.save()
# ----------------------------------------------- MAIN
if __name__ == '__main__':
window = tk.Tk()
font = Font(family="Calibri", size=10)
window.option_add("*Font", font)
window.title("CAVES 1.0")
app = MainApplication(window)
window.mainloop() | [
"tkinter.messagebox.showwarning",
"os.path.exists",
"pandas.ExcelWriter",
"tkinter.Entry",
"pandas.read_csv",
"tkinter.filedialog.asksaveasfilename",
"os.path.splitext",
"tkinter.Button",
"tkinter.font.Font",
"tkinter.Canvas",
"tkinter.Tk",
"datetime.datetime.now",
"tkinter.Label",
"pandas.DataFrame",
"tkinter.messagebox.showinfo",
"tkinter.Frame",
"tkinter.filedialog.askopenfilename"
] | [((29060, 29370), 'pandas.DataFrame', 'pd.DataFrame', (["{'Origin 1': obj.origin_file_one, 'Peptide 1': obj.peptide_one, 'Start 1':\n obj.start_one, 'End 1': obj.end_one, 'Length 1': obj.length_one,\n 'Origin 2': obj.origin_file_two, 'Peptide 2': obj.peptide_two,\n 'Start 2': obj.start_two, 'End 2': obj.end_two, 'Length 2': obj.length_two}"], {}), "({'Origin 1': obj.origin_file_one, 'Peptide 1': obj.peptide_one,\n 'Start 1': obj.start_one, 'End 1': obj.end_one, 'Length 1': obj.\n length_one, 'Origin 2': obj.origin_file_two, 'Peptide 2': obj.\n peptide_two, 'Start 2': obj.start_two, 'End 2': obj.end_two, 'Length 2':\n obj.length_two})\n", (29072, 29370), True, 'import pandas as pd\n'), ((29469, 29869), 'pandas.DataFrame', 'pd.DataFrame', (["{'Origin 1': obj.origin_file_one, 'Peptide 1': obj.peptide_one, 'Start 1':\n obj.start_one, 'End 1': obj.end_one, 'Length 1': obj.length_one,\n 'Letters Matched': obj.letters_matched, 'Matched Length': obj.\n letters_matched_length, 'Origin 2': obj.origin_file_two, 'Peptide 2':\n obj.peptide_two, 'Start 2': obj.start_two, 'End 2': obj.end_two,\n 'Length 2': obj.length_two}"], {}), "({'Origin 1': obj.origin_file_one, 'Peptide 1': obj.peptide_one,\n 'Start 1': obj.start_one, 'End 1': obj.end_one, 'Length 1': obj.\n length_one, 'Letters Matched': obj.letters_matched, 'Matched Length':\n obj.letters_matched_length, 'Origin 2': obj.origin_file_two,\n 'Peptide 2': obj.peptide_two, 'Start 2': obj.start_two, 'End 2': obj.\n end_two, 'Length 2': obj.length_two})\n", (29481, 29869), True, 'import pandas as pd\n'), ((52022, 52029), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (52027, 52029), True, 'import tkinter as tk\n'), ((52041, 52072), 'tkinter.font.Font', 'Font', ([], {'family': '"""Calibri"""', 'size': '(10)'}), "(family='Calibri', size=10)\n", (52045, 52072), False, 'from tkinter.font import Font\n'), ((1670, 1710), 'tkinter.Canvas', 'tk.Canvas', (['master'], {'width': '(550)', 'height': '(690)'}), '(master, width=550, height=690)\n', (1679, 1710), True, 'import tkinter as tk\n'), ((1783, 1811), 'tkinter.Frame', 'tk.Frame', (['master'], {'bg': '"""white"""'}), "(master, bg='white')\n", (1791, 1811), True, 'import tkinter as tk\n'), ((2011, 2057), 'tkinter.font.Font', 'Font', ([], {'family': '"""Calibri"""', 'size': '(12)', 'weight': '"""bold"""'}), "(family='Calibri', size=12, weight='bold')\n", (2015, 2057), False, 'from tkinter.font import Font\n'), ((2086, 2131), 'tkinter.Frame', 'tk.Frame', (['self.frame'], {'bd': '"""10"""', 'padx': '(3)', 'pady': '(3)'}), "(self.frame, bd='10', padx=3, pady=3)\n", (2094, 2131), True, 'import tkinter as tk\n'), ((2165, 2257), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Input File Paths"""', 'bd': '"""3"""', 'fg': '"""blue"""', 'font': 'title_font'}), "(self.frame_input, text='Input File Paths', bd='3', fg='blue', font\n =title_font)\n", (2173, 2257), True, 'import tkinter as tk\n'), ((2295, 2368), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Epitope Predictions"""', 'bd': '"""3"""', 'fg': '"""blue"""'}), "(self.frame_input, text='Epitope Predictions', bd='3', fg='blue')\n", (2303, 2368), True, 'import tkinter as tk\n'), ((2394, 2447), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Sequence A"""', 'bd': '"""3"""'}), "(self.frame_input, text='Sequence A', bd='3')\n", (2402, 2447), True, 'import tkinter as tk\n'), ((2474, 2527), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Sequence B"""', 'bd': '"""3"""'}), "(self.frame_input, text='Sequence B', bd='3')\n", (2482, 2527), True, 'import tkinter as tk\n'), ((2568, 2639), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Database Searches"""', 'bd': '"""3"""', 'fg': '"""blue"""'}), "(self.frame_input, text='Database Searches', bd='3', fg='blue')\n", (2576, 2639), True, 'import tkinter as tk\n'), ((2670, 2723), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Sequence A"""', 'bd': '"""3"""'}), "(self.frame_input, text='Sequence A', bd='3')\n", (2678, 2723), True, 'import tkinter as tk\n'), ((2754, 2807), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Sequence B"""', 'bd': '"""3"""'}), "(self.frame_input, text='Sequence B', bd='3')\n", (2762, 2807), True, 'import tkinter as tk\n'), ((2834, 2886), 'tkinter.Entry', 'tk.Entry', (['self.frame_input'], {'bd': '"""3"""', 'justify': '"""center"""'}), "(self.frame_input, bd='3', justify='center')\n", (2842, 2886), True, 'import tkinter as tk\n'), ((2913, 2965), 'tkinter.Entry', 'tk.Entry', (['self.frame_input'], {'bd': '"""3"""', 'justify': '"""center"""'}), "(self.frame_input, bd='3', justify='center')\n", (2921, 2965), True, 'import tkinter as tk\n'), ((2996, 3048), 'tkinter.Entry', 'tk.Entry', (['self.frame_input'], {'bd': '"""3"""', 'justify': '"""center"""'}), "(self.frame_input, bd='3', justify='center')\n", (3004, 3048), True, 'import tkinter as tk\n'), ((3079, 3131), 'tkinter.Entry', 'tk.Entry', (['self.frame_input'], {'bd': '"""3"""', 'justify': '"""center"""'}), "(self.frame_input, bd='3', justify='center')\n", (3087, 3131), True, 'import tkinter as tk\n'), ((3159, 3226), 'tkinter.Button', 'tk.Button', (['self.frame_input'], {'text': '"""Browse"""', 'command': 'self.browse_ref'}), "(self.frame_input, text='Browse', command=self.browse_ref)\n", (3168, 3226), True, 'import tkinter as tk\n'), ((3254, 3322), 'tkinter.Button', 'tk.Button', (['self.frame_input'], {'text': '"""Browse"""', 'command': 'self.browse_test'}), "(self.frame_input, text='Browse', command=self.browse_test)\n", (3263, 3322), True, 'import tkinter as tk\n'), ((3354, 3426), 'tkinter.Button', 'tk.Button', (['self.frame_input'], {'text': '"""Browse"""', 'command': 'self.browse_main_one'}), "(self.frame_input, text='Browse', command=self.browse_main_one)\n", (3363, 3426), True, 'import tkinter as tk\n'), ((3458, 3530), 'tkinter.Button', 'tk.Button', (['self.frame_input'], {'text': '"""Browse"""', 'command': 'self.browse_main_two'}), "(self.frame_input, text='Browse', command=self.browse_main_two)\n", (3467, 3530), True, 'import tkinter as tk\n'), ((3566, 3638), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""CAVES Indel Search"""', 'bd': '"""3"""', 'fg': '"""blue"""'}), "(self.frame_input, text='CAVES Indel Search', bd='3', fg='blue')\n", (3574, 3638), True, 'import tkinter as tk\n'), ((3678, 3730), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Alignment"""', 'bd': '"""3"""'}), "(self.frame_input, text='Alignment', bd='3')\n", (3686, 3730), True, 'import tkinter as tk\n'), ((3769, 3821), 'tkinter.Entry', 'tk.Entry', (['self.frame_input'], {'bd': '"""3"""', 'justify': '"""center"""'}), "(self.frame_input, bd='3', justify='center')\n", (3777, 3821), True, 'import tkinter as tk\n'), ((3861, 3934), 'tkinter.Button', 'tk.Button', (['self.frame_input'], {'text': '"""Browse"""', 'command': 'self.browse_alignment'}), "(self.frame_input, text='Browse', command=self.browse_alignment)\n", (3870, 3934), True, 'import tkinter as tk\n'), ((3973, 4070), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Minimum Peptide Length"""', 'bd': '"""3"""', 'fg': '"""blue"""', 'font': 'title_font'}), "(self.frame_input, text='Minimum Peptide Length', bd='3', fg='blue',\n font=title_font)\n", (3981, 4070), True, 'import tkinter as tk\n'), ((4144, 4196), 'tkinter.Entry', 'tk.Entry', (['self.frame_input'], {'bd': '"""3"""', 'justify': '"""center"""'}), "(self.frame_input, bd='3', justify='center')\n", (4152, 4196), True, 'import tkinter as tk\n'), ((4235, 4323), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Default minimum is 1 amino acid"""', 'bd': '"""3"""', 'fg': '"""red"""'}), "(self.frame_input, text='Default minimum is 1 amino acid', bd='3',\n fg='red')\n", (4243, 4323), True, 'import tkinter as tk\n'), ((4448, 4539), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Level Selection"""', 'bd': '"""3"""', 'fg': '"""blue"""', 'font': 'title_font'}), "(self.frame_input, text='Level Selection', bd='3', fg='blue', font=\n title_font)\n", (4456, 4539), True, 'import tkinter as tk\n'), ((4613, 4663), 'tkinter.Frame', 'tk.Frame', (['self.frame_input'], {'bd': '"""0"""', 'padx': '(3)', 'pady': '(3)'}), "(self.frame_input, bd='0', padx=3, pady=3)\n", (4621, 4663), True, 'import tkinter as tk\n'), ((5639, 5727), 'tkinter.Label', 'tk.Label', (['self.frame_input'], {'text': '"""Results File"""', 'bd': '"""3"""', 'fg': '"""blue"""', 'font': 'title_font'}), "(self.frame_input, text='Results File', bd='3', fg='blue', font=\n title_font)\n", (5647, 5727), True, 'import tkinter as tk\n'), ((5804, 5856), 'tkinter.Entry', 'tk.Entry', (['self.frame_input'], {'bd': '"""3"""', 'justify': '"""center"""'}), "(self.frame_input, bd='3', justify='center')\n", (5812, 5856), True, 'import tkinter as tk\n'), ((5891, 5966), 'tkinter.Button', 'tk.Button', (['self.frame_input'], {'text': '"""Browse"""', 'command': 'self.browse_result_path'}), "(self.frame_input, text='Browse', command=self.browse_result_path)\n", (5900, 5966), True, 'import tkinter as tk\n'), ((8352, 8383), 'tkinter.font.Font', 'Font', ([], {'family': '"""Calibri"""', 'size': '(12)'}), "(family='Calibri', size=12)\n", (8356, 8383), False, 'from tkinter.font import Font\n'), ((8413, 8457), 'tkinter.Frame', 'tk.Frame', (['self.frame'], {'bd': '"""3"""', 'padx': '(3)', 'pady': '(3)'}), "(self.frame, bd='3', padx=3, pady=3)\n", (8421, 8457), True, 'import tkinter as tk\n'), ((8486, 8581), 'tkinter.Button', 'tk.Button', (['self.frame_button'], {'text': '"""Compare"""', 'font': 'submit_font', 'command': 'self.start_clicked'}), "(self.frame_button, text='Compare', font=submit_font, command=self\n .start_clicked)\n", (8495, 8581), True, 'import tkinter as tk\n'), ((8606, 8696), 'tkinter.Button', 'tk.Button', (['self.frame_button'], {'text': '"""Cancel"""', 'font': 'submit_font', 'command': 'master.destroy'}), "(self.frame_button, text='Cancel', font=submit_font, command=\n master.destroy)\n", (8615, 8696), True, 'import tkinter as tk\n'), ((12880, 12921), 'tkinter.messagebox.showinfo', 'showinfo', (['"""CAVES"""', '"""Comparison Complete!"""'], {}), "('CAVES', 'Comparison Complete!')\n", (12888, 12921), False, 'from tkinter.messagebox import showinfo\n'), ((12968, 13057), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'title': '"""Select a File"""', 'filetypes': "[('CSV files', '*.csv')]"}), "(title='Select a File', filetypes=[('CSV files',\n '*.csv')])\n", (12994, 13057), False, 'from tkinter import filedialog\n'), ((13185, 13274), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'title': '"""Select a File"""', 'filetypes': "[('CSV files', '*.csv')]"}), "(title='Select a File', filetypes=[('CSV files',\n '*.csv')])\n", (13211, 13274), False, 'from tkinter import filedialog\n'), ((13408, 13497), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'title': '"""Select a File"""', 'filetypes': "[('CSV files', '*.csv')]"}), "(title='Select a File', filetypes=[('CSV files',\n '*.csv')])\n", (13434, 13497), False, 'from tkinter import filedialog\n'), ((13639, 13728), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'title': '"""Select a File"""', 'filetypes': "[('CSV files', '*.csv')]"}), "(title='Select a File', filetypes=[('CSV files',\n '*.csv')])\n", (13665, 13728), False, 'from tkinter import filedialog\n'), ((14120, 14191), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'title': '"""Select a File"""', 'filetypes': 'fasta_exts'}), "(title='Select a File', filetypes=fasta_exts)\n", (14146, 14191), False, 'from tkinter import filedialog\n'), ((14414, 14539), 'tkinter.filedialog.asksaveasfilename', 'filedialog.asksaveasfilename', ([], {'initialfile': "('results_' + time)", 'title': '"""Results File"""', 'filetypes': "[('Excel files', '*.xlsx')]"}), "(initialfile='results_' + time, title=\n 'Results File', filetypes=[('Excel files', '*.xlsx')])\n", (14442, 14539), False, 'from tkinter import filedialog\n'), ((19064, 19086), 'os.path.exists', 'path.exists', (['file_path'], {}), '(file_path)\n', (19075, 19086), False, 'from os import path\n'), ((19973, 19995), 'os.path.exists', 'path.exists', (['file_path'], {}), '(file_path)\n', (19984, 19995), False, 'from os import path\n'), ((20889, 20911), 'os.path.exists', 'path.exists', (['file_path'], {}), '(file_path)\n', (20900, 20911), False, 'from os import path\n'), ((21029, 21130), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'index_col': '(False)', 'skiprows': '(1)', 'usecols': "{'Description', 'Starting Position'}"}), "(file_path, index_col=False, skiprows=1, usecols={'Description',\n 'Starting Position'})\n", (21040, 21130), True, 'import pandas as pd\n'), ((21347, 21369), 'os.path.exists', 'path.exists', (['file_path'], {}), '(file_path)\n', (21358, 21369), False, 'from os import path\n'), ((24009, 24033), 'os.path.splitext', 'path.splitext', (['file_path'], {}), '(file_path)\n', (24022, 24033), False, 'from os import path\n'), ((30020, 30209), 'pandas.DataFrame', 'pd.DataFrame', (["{'Origin': obj.origin_file_one, 'Peptide': obj.peptide_one, 'Start': obj.\n start_one, 'End': obj.end_one, 'Length': obj.length_one,\n 'Mutated Positions': obj.mutated_pos}"], {}), "({'Origin': obj.origin_file_one, 'Peptide': obj.peptide_one,\n 'Start': obj.start_one, 'End': obj.end_one, 'Length': obj.length_one,\n 'Mutated Positions': obj.mutated_pos})\n", (30032, 30209), True, 'import pandas as pd\n'), ((30256, 30403), 'pandas.DataFrame', 'pd.DataFrame', (["{'Origin': obj.origin_file_one, 'Peptide': obj.peptide_one, 'Start': obj.\n start_one, 'End': obj.end_one, 'Length': obj.length_one}"], {}), "({'Origin': obj.origin_file_one, 'Peptide': obj.peptide_one,\n 'Start': obj.start_one, 'End': obj.end_one, 'Length': obj.length_one})\n", (30268, 30403), True, 'import pandas as pd\n'), ((22356, 22727), 'tkinter.messagebox.showwarning', 'showwarning', (['"""WARNING"""', '"""CAVES has detected a large amount of successive gap characters in your alignment file. Epitopes predicted from highly dissimilar sequences are unlikely to produce biologically relevant matches when compared due to inherent differences in the amino acid composition. \n\nCAVES will still run but we caution against using these results."""'], {}), '(\'WARNING\',\n """CAVES has detected a large amount of successive gap characters in your alignment file. Epitopes predicted from highly dissimilar sequences are unlikely to produce biologically relevant matches when compared due to inherent differences in the amino acid composition. \n\nCAVES will still run but we caution against using these results."""\n )\n', (22367, 22727), False, 'from tkinter.messagebox import showwarning\n'), ((24117, 24142), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['file_path'], {}), '(file_path)\n', (24131, 24142), True, 'import pandas as pd\n'), ((24183, 24217), 'pandas.ExcelWriter', 'pd.ExcelWriter', (["(filename + '.xlsx')"], {}), "(filename + '.xlsx')\n", (24197, 24217), True, 'import pandas as pd\n'), ((24475, 24518), 'pandas.ExcelWriter', 'pd.ExcelWriter', (["('results_' + time + '.xlsx')"], {}), "('results_' + time + '.xlsx')\n", (24489, 24518), True, 'import pandas as pd\n'), ((14352, 14366), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14364, 14366), False, 'from datetime import datetime\n'), ((19428, 19497), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'index_col': '(False)', 'usecols': '{start_col, pep_col}'}), '(file_path, index_col=False, usecols={start_col, pep_col})\n', (19439, 19497), True, 'import pandas as pd\n'), ((20341, 20410), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'index_col': '(False)', 'usecols': '{start_col, pep_col}'}), '(file_path, index_col=False, usecols={start_col, pep_col})\n', (20352, 20410), True, 'import pandas as pd\n'), ((24245, 24259), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (24257, 24259), False, 'from datetime import datetime\n')] |
from django import forms
from django.db import models
from django.utils.translation import gettext as _
from django_filters import FilterSet,DateFilter
from django_filters import BooleanFilter
from irekua_database.models import Annotation
class Filter(FilterSet):
is_own = BooleanFilter(
method='user_owns_object',
label=_('Mine'),
widget=forms.CheckboxInput())
class Meta:
model = Annotation
fields = {
'item': ['exact'],
'annotation_type': ['exact'],
'event_type': ['exact'],
'created_by__username': ['icontains'],
'created_by__first_name': ['icontains'],
'created_by__last_name': ['icontains'],
'certainty' : ['gt', 'lt'],
'created_on': ['gt', 'lt'],
}
filter_overrides = {
models.DateTimeField: {
'filter_class': DateFilter,
'extra': lambda f: {
'widget': forms.DateInput(attrs={'class': 'datepicker'})
}
}
}
def user_owns_object(self, queryset, name, value):
if value:
user = self.request.user
return queryset.filter(created_by=user)
return queryset
search_fields = (
'annotation_type',
'event_type',
'created_by__username',
'created_on'
)
ordering_fields = (
('created_on', _('added on')),
('annotation_type', _('annotation type')),
('event_type', _('event type')),
)
| [
"django.forms.DateInput",
"django.forms.CheckboxInput",
"django.utils.translation.gettext"
] | [((1415, 1428), 'django.utils.translation.gettext', '_', (['"""added on"""'], {}), "('added on')\n", (1416, 1428), True, 'from django.utils.translation import gettext as _\n'), ((1455, 1475), 'django.utils.translation.gettext', '_', (['"""annotation type"""'], {}), "('annotation type')\n", (1456, 1475), True, 'from django.utils.translation import gettext as _\n'), ((1497, 1512), 'django.utils.translation.gettext', '_', (['"""event type"""'], {}), "('event type')\n", (1498, 1512), True, 'from django.utils.translation import gettext as _\n'), ((344, 353), 'django.utils.translation.gettext', '_', (['"""Mine"""'], {}), "('Mine')\n", (345, 353), True, 'from django.utils.translation import gettext as _\n'), ((370, 391), 'django.forms.CheckboxInput', 'forms.CheckboxInput', ([], {}), '()\n', (389, 391), False, 'from django import forms\n'), ((992, 1038), 'django.forms.DateInput', 'forms.DateInput', ([], {'attrs': "{'class': 'datepicker'}"}), "(attrs={'class': 'datepicker'})\n", (1007, 1038), False, 'from django import forms\n')] |
from .message import Message
from aioimaplib import aioimaplib
import asyncio
from asyncio import CancelledError, TimeoutError
import logging
import signal
# import ssl
import threading
import traceback
from typing import Any, Callable
HANDLED_SIGNALS = (
signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.
signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.
)
RECEIVER_STOPPED = "RECEIVER_STOPPED"
class Receiver(object):
def __init__(self):
self.imap_client_lock = asyncio.Lock()
self.should_exit = asyncio.Event()
self.exit_event = asyncio.Event()
async def run(
self,
host: str,
user: str,
password: str,
callback: Callable[[Message], Any],
mailbox: str = "INBOX",
install_signal_handlers: bool = True,
):
"""
Start running the main receiver loop.
"""
# create the imap client
self.imap_client = aioimaplib.IMAP4_SSL(host=host)
# callback for when connection is lost
def conn_lost_cb(exc):
loop = self.imap_client.protocol.loop
loop.create_task(self.reconnect())
self.imap_client.protocol.conn_lost_cb = conn_lost_cb
# signal handlers
if install_signal_handlers:
logging.debug("Receiver:run: Installing signal handlers")
self.install_signal_handlers()
logging.debug("Receiver:run: Clear exit and should_exit events")
self.exit_event.clear()
self.should_exit.clear()
self._task_wfnm = None
while not self.should_exit.is_set():
if await self.login(user, password):
# start waiting for new messages
try:
self._task_wfnm = asyncio.create_task(
self.wait_for_new_message(callback, mailbox))
await self._task_wfnm
except Exception as e:
if isinstance(e, CancelledError):
logging.debug("Receiver:run: wait_for_new_message task cancelled")
else:
logging.error(traceback.format_exc())
logging.debug("Receiver:run: Set should_exit event")
self.should_exit.set()
# send a receiver stopped
# message to the callback
logging.info("Receiver:run: Called callback with RECEIVER_STOPPED message")
try:
callback(RECEIVER_STOPPED)
except:
logging.error(traceback.format_exc())
# logout
try:
await self.logout()
except:
logging.error(traceback.format_exc())
logging.debug("Receiver:run: Set should_exit event")
self.should_exit.set()
if self.should_exit.is_set():
logging.info("Receiver:run: Graceful shutdown")
else:
logging.info("Receiver:run: Retrying")
logging.debug("Receiver:run: Set exit event")
self.exit_event.set()
async def login(self, user: str, password: str):
"""
Login to the IMAP server.
"""
try:
logging.debug("Receiver:login: Waiting for imap client lock")
async with self.imap_client_lock:
logging.debug("Receiver:login: Obtained imap client lock")
await self.imap_client.wait_hello_from_server()
response = await self.imap_client.login(user, password)
if response.result != "OK":
raise RuntimeError("Login failed.")
logging.info("Receiver:login: Logged in as {}".format(user))
except:
logging.error(traceback.format_exc())
return False
return True
async def change_mailbox(self, mailbox: str):
"""
Switch to another mailbox.
"""
if not (
isinstance(mailbox, str)
and len(mailbox) > 0
):
raise ValueError(
"Invalid input. `mailbox` must"
" be a non-empty string.")
# add double-quotes around mailbox name if it contains
# spaces
mailbox = f'"{mailbox}"' if " " in mailbox else mailbox
logging.debug("Receiver:change_mailbox: Waiting for imap client lock")
async with self.imap_client_lock:
logging.debug("Receiver:change_mailbox: Obtained imap client lock")
response = await self.imap_client.select(mailbox=mailbox)
if response.result != "OK":
raise RuntimeError(
f"Selecting mailbox '{mailbox}' failed with status '{response.result}'.")
logging.info(f"Receiver:change_mailbox: Selected mailbox '{mailbox}'")
self.current_mailbox = mailbox
return response
async def search_unseen(self):
"""
Get IDs of unseen messages in the current mailbox.
URL: https://tools.ietf.org/html/rfc3501#section-6.4.4
"""
unseen_ids = []
status, response = await self.imap_client.search("(UNSEEN)", charset=None)
if status == "OK":
unseen_ids.extend(response[0].split())
logging.info(f"Receiver:search_unseen: Number of unseen messages: {len(unseen_ids)}")
else:
logging.error(f"Receiver:search_unseen: Search for unseen messages completed with status '{status}'")
return unseen_ids
async def wait_for_new_message(
self, callback: Callable[[Message], Any], mailbox: str = "INBOX",
):
"""
Receiver infinite loop waiting for new messages.
"""
# select the mailbox
await self.change_mailbox(mailbox)
# if new messages are available, fetch
# them and let the callback handle it
for id in await self.search_unseen():
response = await self.imap_client.fetch(
str(id), "(RFC822)")
if len(response.lines) > 1:
try:
callback(Message(response.lines[1]))
except:
logging.error(traceback.format_exc())
while True:
logging.debug("Receiver:wait_for_new_message: Waiting for imap client lock")
async with self.imap_client_lock:
logging.debug("Receiver:wait_for_new_message: Obtained imap client lock")
# start IDLE waiting
# idle queue must be empty, otherwise we get race
# conditions between idle command status update
# and unsolicited server messages
if (
(not self.imap_client.has_pending_idle())
and self.imap_client.protocol.idle_queue.empty()
):
logging.debug("Receiver:wait_for_new_message: Start IDLE waiting")
self._idle = await self.imap_client.idle_start()
# wait for status update
msg = await self.imap_client.wait_server_push()
logging.debug(f"Receiver:wait_for_new_message: Received IDLE message: {msg}")
# send IDLE done to server; this has to happen
# before search or fetch or any other command
# for some reason.
# https://tools.ietf.org/html/rfc2177
if self.imap_client.has_pending_idle():
logging.debug("Receiver:wait_for_new_message: Send IDLE done")
self.imap_client.idle_done()
await asyncio.wait_for(self._idle, 10)
# https://tools.ietf.org/html/rfc3501#section-7.3.1
# EXISTS response occurs when size of the mailbox changes
if isinstance(msg, list) and any("EXISTS" in m for m in msg):
logging.debug("Receiver:wait_for_new_message: Mailbox size changed")
# if new messages are available, fetch
# them and let the callback handle it
for id in await self.search_unseen():
response = await self.imap_client.fetch(
str(id), "(RFC822)")
if len(response.lines) > 1:
try:
callback(Message(response.lines[1]))
except:
logging.error(traceback.format_exc())
logging.debug("Receiver:wait_for_new_message: Loop complete")
async def logout(self):
"""
Logout from the IMAP server.
"""
valid_states = aioimaplib.Commands.get('LOGOUT').valid_states
logging.debug("Receiver:logout: Waiting for lock")
async with self.imap_client_lock:
logging.debug("Receiver:logout: Obtained lock")
if self.imap_client.protocol.state in valid_states:
if self.imap_client.has_pending_idle():
# send IDLE done message to server
logging.debug("Receiver:logout: Send IDLE done")
self.imap_client.idle_done()
try:
if hasattr(self, '_idle') and self._idle is not None:
await asyncio.wait_for(self._idle, 10)
except TimeoutError:
logging.error(traceback.format_exc())
try:
await self.imap_client.logout()
logging.info("Receiver:logout: Logged out")
except TimeoutError:
logging.error(traceback.format_exc())
else:
logging.debug(f"Receiver:logout: Invalid state '{self.imap_client.protocol.state}'")
def install_signal_handlers(self):
"""
Install signal handlers to shut down
the receiver main loop when SIGINT / SIGTERM
signals are received.
"""
if threading.current_thread() is not threading.main_thread():
# Signals can only be listened to from the main thread.
return
loop = asyncio.get_running_loop()
try:
for sig in HANDLED_SIGNALS:
loop.add_signal_handler(sig, self.handle_exit, sig, None)
except NotImplementedError:
# Windows
for sig in HANDLED_SIGNALS:
signal.signal(sig, self.handle_exit)
def handle_exit(self, sig, frame):
"""
Handle exiting the receiver main loop.
"""
if not self.should_exit.is_set():
logging.debug("Receiver:handle_exit: Set should_exit event")
self.should_exit.set()
# cancel the wait_for_new_message task
if hasattr(self, '_task_wfnm') and self._task_wfnm is not None:
self._task_wfnm.cancel()
self._task_wfnm = None
async def reconnect(self):
# cancel the wait_for_new_message task
if hasattr(self, '_task_wfnm') and self._task_wfnm is not None:
logging.debug("Receiver:reconnect: Cancel wait_for_new_message task")
self._task_wfnm.cancel()
self._task_wfnm = None
# give some time for the consequential actions
# of cancelling the task to run
logging.debug("Receiver:reconnect: Sleep")
await asyncio.sleep(1)
if not self.should_exit.is_set():
# try reconnecting to the server every 5 seconds
logging.debug("Receiver:reconnect: Waiting for lock")
async with self.imap_client_lock:
logging.debug("Receiver:reconnect: Obtained lock")
while True:
try:
conn_lost_cb = self.imap_client.protocol.conn_lost_cb
self.imap_client = aioimaplib.IMAP4_SSL(
host=self.imap_client.host,
port=self.imap_client.port,
timeout=self.imap_client.timeout)
self.imap_client.protocol.conn_lost_cb = conn_lost_cb
logging.info("Receiver:reconnect: Connection recreated")
break
except OSError:
logging.error(traceback.format_exc())
await asyncio.sleep(5)
except:
logging.error(traceback.format_exc())
self.should_exit.set()
break
| [
"traceback.format_exc",
"signal.signal",
"logging.debug",
"threading.current_thread",
"asyncio.sleep",
"aioimaplib.aioimaplib.Commands.get",
"asyncio.Lock",
"aioimaplib.aioimaplib.IMAP4_SSL",
"asyncio.Event",
"threading.main_thread",
"asyncio.wait_for",
"logging.error",
"logging.info",
"asyncio.get_running_loop"
] | [((495, 509), 'asyncio.Lock', 'asyncio.Lock', ([], {}), '()\n', (507, 509), False, 'import asyncio\n'), ((537, 552), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (550, 552), False, 'import asyncio\n'), ((579, 594), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (592, 594), False, 'import asyncio\n'), ((949, 980), 'aioimaplib.aioimaplib.IMAP4_SSL', 'aioimaplib.IMAP4_SSL', ([], {'host': 'host'}), '(host=host)\n', (969, 980), False, 'from aioimaplib import aioimaplib\n'), ((1404, 1468), 'logging.debug', 'logging.debug', (['"""Receiver:run: Clear exit and should_exit events"""'], {}), "('Receiver:run: Clear exit and should_exit events')\n", (1417, 1468), False, 'import logging\n'), ((3100, 3145), 'logging.debug', 'logging.debug', (['"""Receiver:run: Set exit event"""'], {}), "('Receiver:run: Set exit event')\n", (3113, 3145), False, 'import logging\n'), ((4395, 4465), 'logging.debug', 'logging.debug', (['"""Receiver:change_mailbox: Waiting for imap client lock"""'], {}), "('Receiver:change_mailbox: Waiting for imap client lock')\n", (4408, 4465), False, 'import logging\n'), ((4827, 4897), 'logging.info', 'logging.info', (['f"""Receiver:change_mailbox: Selected mailbox \'{mailbox}\'"""'], {}), '(f"Receiver:change_mailbox: Selected mailbox \'{mailbox}\'")\n', (4839, 4897), False, 'import logging\n'), ((8840, 8890), 'logging.debug', 'logging.debug', (['"""Receiver:logout: Waiting for lock"""'], {}), "('Receiver:logout: Waiting for lock')\n", (8853, 8890), False, 'import logging\n'), ((10279, 10305), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (10303, 10305), False, 'import asyncio\n'), ((1294, 1351), 'logging.debug', 'logging.debug', (['"""Receiver:run: Installing signal handlers"""'], {}), "('Receiver:run: Installing signal handlers')\n", (1307, 1351), False, 'import logging\n'), ((3313, 3374), 'logging.debug', 'logging.debug', (['"""Receiver:login: Waiting for imap client lock"""'], {}), "('Receiver:login: Waiting for imap client lock')\n", (3326, 3374), False, 'import logging\n'), ((4520, 4587), 'logging.debug', 'logging.debug', (['"""Receiver:change_mailbox: Obtained imap client lock"""'], {}), "('Receiver:change_mailbox: Obtained imap client lock')\n", (4533, 4587), False, 'import logging\n'), ((5453, 5564), 'logging.error', 'logging.error', (['f"""Receiver:search_unseen: Search for unseen messages completed with status \'{status}\'"""'], {}), '(\n f"Receiver:search_unseen: Search for unseen messages completed with status \'{status}\'"\n )\n', (5466, 5564), False, 'import logging\n'), ((6315, 6391), 'logging.debug', 'logging.debug', (['"""Receiver:wait_for_new_message: Waiting for imap client lock"""'], {}), "('Receiver:wait_for_new_message: Waiting for imap client lock')\n", (6328, 6391), False, 'import logging\n'), ((8609, 8670), 'logging.debug', 'logging.debug', (['"""Receiver:wait_for_new_message: Loop complete"""'], {}), "('Receiver:wait_for_new_message: Loop complete')\n", (8622, 8670), False, 'import logging\n'), ((8784, 8817), 'aioimaplib.aioimaplib.Commands.get', 'aioimaplib.Commands.get', (['"""LOGOUT"""'], {}), "('LOGOUT')\n", (8807, 8817), False, 'from aioimaplib import aioimaplib\n'), ((8945, 8992), 'logging.debug', 'logging.debug', (['"""Receiver:logout: Obtained lock"""'], {}), "('Receiver:logout: Obtained lock')\n", (8958, 8992), False, 'import logging\n'), ((10117, 10143), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (10141, 10143), False, 'import threading\n'), ((10151, 10174), 'threading.main_thread', 'threading.main_thread', ([], {}), '()\n', (10172, 10174), False, 'import threading\n'), ((10750, 10810), 'logging.debug', 'logging.debug', (['"""Receiver:handle_exit: Set should_exit event"""'], {}), "('Receiver:handle_exit: Set should_exit event')\n", (10763, 10810), False, 'import logging\n'), ((11217, 11286), 'logging.debug', 'logging.debug', (['"""Receiver:reconnect: Cancel wait_for_new_message task"""'], {}), "('Receiver:reconnect: Cancel wait_for_new_message task')\n", (11230, 11286), False, 'import logging\n'), ((11475, 11517), 'logging.debug', 'logging.debug', (['"""Receiver:reconnect: Sleep"""'], {}), "('Receiver:reconnect: Sleep')\n", (11488, 11517), False, 'import logging\n'), ((11669, 11722), 'logging.debug', 'logging.debug', (['"""Receiver:reconnect: Waiting for lock"""'], {}), "('Receiver:reconnect: Waiting for lock')\n", (11682, 11722), False, 'import logging\n'), ((2400, 2475), 'logging.info', 'logging.info', (['"""Receiver:run: Called callback with RECEIVER_STOPPED message"""'], {}), "('Receiver:run: Called callback with RECEIVER_STOPPED message')\n", (2412, 2475), False, 'import logging\n'), ((2970, 3017), 'logging.info', 'logging.info', (['"""Receiver:run: Graceful shutdown"""'], {}), "('Receiver:run: Graceful shutdown')\n", (2982, 3017), False, 'import logging\n'), ((3052, 3090), 'logging.info', 'logging.info', (['"""Receiver:run: Retrying"""'], {}), "('Receiver:run: Retrying')\n", (3064, 3090), False, 'import logging\n'), ((3437, 3495), 'logging.debug', 'logging.debug', (['"""Receiver:login: Obtained imap client lock"""'], {}), "('Receiver:login: Obtained imap client lock')\n", (3450, 3495), False, 'import logging\n'), ((6455, 6528), 'logging.debug', 'logging.debug', (['"""Receiver:wait_for_new_message: Obtained imap client lock"""'], {}), "('Receiver:wait_for_new_message: Obtained imap client lock')\n", (6468, 6528), False, 'import logging\n'), ((7196, 7273), 'logging.debug', 'logging.debug', (['f"""Receiver:wait_for_new_message: Received IDLE message: {msg}"""'], {}), "(f'Receiver:wait_for_new_message: Received IDLE message: {msg}')\n", (7209, 7273), False, 'import logging\n'), ((9829, 9918), 'logging.debug', 'logging.debug', (['f"""Receiver:logout: Invalid state \'{self.imap_client.protocol.state}\'"""'], {}), '(\n f"Receiver:logout: Invalid state \'{self.imap_client.protocol.state}\'")\n', (9842, 9918), False, 'import logging\n'), ((11536, 11552), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (11549, 11552), False, 'import asyncio\n'), ((11785, 11835), 'logging.debug', 'logging.debug', (['"""Receiver:reconnect: Obtained lock"""'], {}), "('Receiver:reconnect: Obtained lock')\n", (11798, 11835), False, 'import logging\n'), ((3842, 3864), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3862, 3864), False, 'import traceback\n'), ((6938, 7004), 'logging.debug', 'logging.debug', (['"""Receiver:wait_for_new_message: Start IDLE waiting"""'], {}), "('Receiver:wait_for_new_message: Start IDLE waiting')\n", (6951, 7004), False, 'import logging\n'), ((7565, 7627), 'logging.debug', 'logging.debug', (['"""Receiver:wait_for_new_message: Send IDLE done"""'], {}), "('Receiver:wait_for_new_message: Send IDLE done')\n", (7578, 7627), False, 'import logging\n'), ((7977, 8045), 'logging.debug', 'logging.debug', (['"""Receiver:wait_for_new_message: Mailbox size changed"""'], {}), "('Receiver:wait_for_new_message: Mailbox size changed')\n", (7990, 8045), False, 'import logging\n'), ((9189, 9237), 'logging.debug', 'logging.debug', (['"""Receiver:logout: Send IDLE done"""'], {}), "('Receiver:logout: Send IDLE done')\n", (9202, 9237), False, 'import logging\n'), ((9655, 9698), 'logging.info', 'logging.info', (['"""Receiver:logout: Logged out"""'], {}), "('Receiver:logout: Logged out')\n", (9667, 9698), False, 'import logging\n'), ((10548, 10584), 'signal.signal', 'signal.signal', (['sig', 'self.handle_exit'], {}), '(sig, self.handle_exit)\n', (10561, 10584), False, 'import signal\n'), ((2815, 2867), 'logging.debug', 'logging.debug', (['"""Receiver:run: Set should_exit event"""'], {}), "('Receiver:run: Set should_exit event')\n", (2828, 2867), False, 'import logging\n'), ((7703, 7735), 'asyncio.wait_for', 'asyncio.wait_for', (['self._idle', '(10)'], {}), '(self._idle, 10)\n', (7719, 7735), False, 'import asyncio\n'), ((12011, 12125), 'aioimaplib.aioimaplib.IMAP4_SSL', 'aioimaplib.IMAP4_SSL', ([], {'host': 'self.imap_client.host', 'port': 'self.imap_client.port', 'timeout': 'self.imap_client.timeout'}), '(host=self.imap_client.host, port=self.imap_client.port,\n timeout=self.imap_client.timeout)\n', (12031, 12125), False, 'from aioimaplib import aioimaplib\n'), ((12310, 12366), 'logging.info', 'logging.info', (['"""Receiver:reconnect: Connection recreated"""'], {}), "('Receiver:reconnect: Connection recreated')\n", (12322, 12366), False, 'import logging\n'), ((2020, 2086), 'logging.debug', 'logging.debug', (['"""Receiver:run: wait_for_new_message task cancelled"""'], {}), "('Receiver:run: wait_for_new_message task cancelled')\n", (2033, 2086), False, 'import logging\n'), ((2199, 2251), 'logging.debug', 'logging.debug', (['"""Receiver:run: Set should_exit event"""'], {}), "('Receiver:run: Set should_exit event')\n", (2212, 2251), False, 'import logging\n'), ((2602, 2624), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2622, 2624), False, 'import traceback\n'), ((2771, 2793), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2791, 2793), False, 'import traceback\n'), ((6258, 6280), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6278, 6280), False, 'import traceback\n'), ((9770, 9792), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9790, 9792), False, 'import traceback\n'), ((2151, 2173), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2171, 2173), False, 'import traceback\n'), ((9425, 9457), 'asyncio.wait_for', 'asyncio.wait_for', (['self._idle', '(10)'], {}), '(self._idle, 10)\n', (9441, 9457), False, 'import asyncio\n'), ((9537, 9559), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9557, 9559), False, 'import traceback\n'), ((12472, 12494), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (12492, 12494), False, 'import traceback\n'), ((12526, 12542), 'asyncio.sleep', 'asyncio.sleep', (['(5)'], {}), '(5)\n', (12539, 12542), False, 'import asyncio\n'), ((12610, 12632), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (12630, 12632), False, 'import traceback\n'), ((8572, 8594), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8592, 8594), False, 'import traceback\n')] |
""" Module classes:
SignupHandler - Handler for the signup page.
"""
from base import BaseHandler
from models.user import UserAccount
import validation
##############################################################################
class SignupHandler(BaseHandler):
"""Handler for the signup page."""
def get(self):
if self.logged_in():
self.render("error.html", account=self.account)
else:
self.render("signup.html")
def post(self):
form_ok = True
# Retrieve user data and strip white space from front and back:
self.username = self.request.get("username").strip()
self.password = self.request.get("password").strip()
self.verify = self.request.get("verify").strip()
self.email = self.request.get("email").strip()
display_data = dict(username = self.username,
email = self.email)
# Check username:
if not validation.valid_username(self.username):
display_data["username_error"] = "That's not a valid username."
form_ok = False
# Check password:
if not validation.valid_password(self.password):
display_data["password_error"] = "That's not a valid password."
form_ok = False
elif self.password != self.verify:
display_data["verify_error"] = "Your passwords didn't match."
form_ok = False
# Only check email if there is an email:
if self.email != "":
if not validation.valid_email(self.email):
display_data["email_error"] = "That's not a valid email."
form_ok = False
if form_ok:
self.done(display_data)
else:
# Re-render, keeping the username and email:
self.render("signup.html", **display_data)
def done(self, display_data):
user = UserAccount.by_username(self.username)
if user:
display_data["username_error"] = "That user already exists."
self.render("signup.html", **display_data)
else:
# Create new datastore entity, set secure Cookie for this
# user, and redirect to the user's home page.
user = UserAccount.create(self.username,
self.password,
self.email)
self.set_secure_userid_cookie(user)
self.redirect("/home/%s" % user.username)
| [
"models.user.UserAccount.by_username",
"validation.valid_email",
"validation.valid_username",
"validation.valid_password",
"models.user.UserAccount.create"
] | [((1920, 1958), 'models.user.UserAccount.by_username', 'UserAccount.by_username', (['self.username'], {}), '(self.username)\n', (1943, 1958), False, 'from models.user import UserAccount\n'), ((968, 1008), 'validation.valid_username', 'validation.valid_username', (['self.username'], {}), '(self.username)\n', (993, 1008), False, 'import validation\n'), ((1156, 1196), 'validation.valid_password', 'validation.valid_password', (['self.password'], {}), '(self.password)\n', (1181, 1196), False, 'import validation\n'), ((2266, 2326), 'models.user.UserAccount.create', 'UserAccount.create', (['self.username', 'self.password', 'self.email'], {}), '(self.username, self.password, self.email)\n', (2284, 2326), False, 'from models.user import UserAccount\n'), ((1545, 1579), 'validation.valid_email', 'validation.valid_email', (['self.email'], {}), '(self.email)\n', (1567, 1579), False, 'import validation\n')] |
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 <NAME>, <NAME>, <NAME>,
# <NAME>
# Copyright (c) 2009-2019 <NAME>, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# Python Interpreter
# 95% of the code from 'Bruce: the presentation tool' by <NAME>
# http://code.google.com/p/bruce-tpt/
#
#
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
import sys
import os
import code
import pyglet
from pyglet import graphics
from pyglet import text
from pyglet.text import caret, document, layout
import cocos
from cocos.director import director
from .base_layers import Layer
from .util_layers import ColorLayer
__all__ = ['PythonInterpreterLayer']
class Output:
def __init__(self, display, realstdout):
self.out = display
self.realstdout = realstdout
self.data = ''
def write(self, data):
self.out(data)
class MyInterpreter(code.InteractiveInterpreter):
def __init__(self, locals, display):
self.write = display
code.InteractiveInterpreter.__init__(self, locals=locals)
def execute(self, input):
old_stdout = sys.stdout
sys.stdout = Output(self.write, old_stdout)
more = self.runsource(input)
sys.stdout = old_stdout
return more
class PythonInterpreterLayer(ColorLayer):
"""Runs an interactive Python interpreter as a child `Layer` of the current `Scene`.
"""
cfg = {'code.font_name': 'Arial',
'code.font_size': 12,
'code.color': (255, 255, 255, 255),
'caret.color': (255, 255, 255), }
name = 'py'
prompt = ">>> " #: python prompt
prompt_more = "... " #: python 'more' prompt
doing_more = False
is_event_handler = True #: enable pyglet's events
def __init__(self):
super(PythonInterpreterLayer, self).__init__(32, 32, 32, 192)
self.content = self.prompt
local_vars = director.interpreter_locals
local_vars["self"] = self
self.interpreter = MyInterpreter(
local_vars, self._write)
self.current_input = []
self.history = ['']
self.history_pos = 0
def on_enter(self):
super(PythonInterpreterLayer, self).on_enter()
vw, vh = cocos.director.director.get_window_size()
# format the code
self.document = document.FormattedDocument(self.content)
self.document.set_style(0, len(self.document.text), {
'font_name': self.cfg['code.font_name'],
'font_size': self.cfg['code.font_size'],
'color': self.cfg['code.color'],
})
self.batch = graphics.Batch()
# generate the document
self.layout = layout.IncrementalTextLayout(self.document,
vw, vh, multiline=True,
batch=self.batch)
self.layout.anchor_y = 'top'
self.caret = caret.Caret(self.layout, color=self.cfg['caret.color'])
self.caret.on_activate()
self.on_resize(vw, vh)
self.start_of_line = len(self.document.text)
def on_resize(self, x, y):
vw, vh = director.get_window_size()
self.layout.begin_update()
self.layout.height = vh
self.layout.x = 2
self.layout.width = vw - 4
self.layout.y = vh
self.layout.end_update()
# XXX: hack
x, y = director.window.width, director.window.height
self.layout.top_group._scissor_width = x - 4
self.caret.position = len(self.document.text)
def on_exit(self):
super(PythonInterpreterLayer, self).on_exit()
self.content = self.document.text
self.document = None
self.layout = None
self.batch = None
self.caret = None
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.TAB:
return self.caret.on_text('\t')
elif symbol in (pyglet.window.key.ENTER, pyglet.window.key.NUM_ENTER):
# write the newline
self._write('\n')
line = self.document.text[self.start_of_line:]
if line.strip() == 'help()':
line = 'print "help() not supported, sorry!"'
self.current_input.append(line)
self.history_pos = len(self.history)
if line.strip():
self.history[self.history_pos-1] = line.strip()
self.history.append('')
more = False
if not self.doing_more:
more = self.interpreter.execute('\n'.join(self.current_input))
if self.doing_more and not line.strip():
self.doing_more = False
self.interpreter.execute('\n'.join(self.current_input))
more = more or self.doing_more
if not more:
self.current_input = []
self._write(self.prompt)
else:
self.doing_more = True
self._write(self.prompt_more)
self.start_of_line = len(self.document.text)
self.caret.position = len(self.document.text)
elif symbol == pyglet.window.key.SPACE:
pass
else:
return pyglet.event.EVENT_UNHANDLED
return pyglet.event.EVENT_HANDLED
def on_text(self, symbol):
# squash carriage return - we already handle them above
if symbol == '\r':
return pyglet.event.EVENT_HANDLED
self._scroll_to_bottom()
return self.caret.on_text(symbol)
def on_text_motion(self, motion):
at_sol = self.caret.position == self.start_of_line
if motion == pyglet.window.key.MOTION_UP:
# move backward in history, storing the current line of input
# if we're at the very end of time
line = self.document.text[self.start_of_line:]
if self.history_pos == len(self.history)-1:
self.history[self.history_pos] = line
self.history_pos = max(0, self.history_pos-1)
self.document.delete_text(self.start_of_line,
len(self.document.text))
self._write(self.history[self.history_pos])
self.caret.position = len(self.document.text)
elif motion == pyglet.window.key.MOTION_DOWN:
# move forward in the history
self.history_pos = min(len(self.history)-1, self.history_pos+1)
self.document.delete_text(self.start_of_line,
len(self.document.text))
self._write(self.history[self.history_pos])
self.caret.position = len(self.document.text)
elif motion == pyglet.window.key.MOTION_BACKSPACE:
# can't delete the prompt
if not at_sol:
return self.caret.on_text_motion(motion)
elif motion == pyglet.window.key.MOTION_LEFT:
# can't move back beyond start of line
if not at_sol:
return self.caret.on_text_motion(motion)
elif motion == pyglet.window.key.MOTION_PREVIOUS_WORD:
# can't move back word beyond start of line
if not at_sol:
return self.caret.on_text_motion(motion)
else:
return self.caret.on_text_motion(motion)
return pyglet.event.EVENT_HANDLED
def _write(self, s):
self.document.insert_text(len(self.document.text), s, {
'font_name': self.cfg['code.font_name'],
'font_size': self.cfg['code.font_size'],
'color': self.cfg['code.color'],
})
self._scroll_to_bottom()
def _scroll_to_bottom(self):
# on key press always move the view to the bottom of the screen
if self.layout.height < self.layout.content_height:
self.layout.anchor_y = 'bottom'
self.layout.y = 0
self.layout.view_y = 0
if self.caret.position < self.start_of_line:
self.caret.position = len(self.document.text)
def draw(self):
super(PythonInterpreterLayer, self).draw()
self.batch.draw()
| [
"code.InteractiveInterpreter.__init__",
"pyglet.graphics.Batch",
"pyglet.text.document.FormattedDocument",
"pyglet.text.layout.IncrementalTextLayout",
"pyglet.text.caret.Caret",
"cocos.director.director.get_window_size"
] | [((2652, 2709), 'code.InteractiveInterpreter.__init__', 'code.InteractiveInterpreter.__init__', (['self'], {'locals': 'locals'}), '(self, locals=locals)\n', (2688, 2709), False, 'import code\n'), ((3906, 3947), 'cocos.director.director.get_window_size', 'cocos.director.director.get_window_size', ([], {}), '()\n', (3945, 3947), False, 'import cocos\n'), ((3999, 4039), 'pyglet.text.document.FormattedDocument', 'document.FormattedDocument', (['self.content'], {}), '(self.content)\n', (4025, 4039), False, 'from pyglet.text import caret, document, layout\n'), ((4286, 4302), 'pyglet.graphics.Batch', 'graphics.Batch', ([], {}), '()\n', (4300, 4302), False, 'from pyglet import graphics\n'), ((4358, 4448), 'pyglet.text.layout.IncrementalTextLayout', 'layout.IncrementalTextLayout', (['self.document', 'vw', 'vh'], {'multiline': '(True)', 'batch': 'self.batch'}), '(self.document, vw, vh, multiline=True, batch=\n self.batch)\n', (4386, 4448), False, 'from pyglet.text import caret, document, layout\n'), ((4605, 4660), 'pyglet.text.caret.Caret', 'caret.Caret', (['self.layout'], {'color': "self.cfg['caret.color']"}), "(self.layout, color=self.cfg['caret.color'])\n", (4616, 4660), False, 'from pyglet.text import caret, document, layout\n'), ((4829, 4855), 'cocos.director.director.get_window_size', 'director.get_window_size', ([], {}), '()\n', (4853, 4855), False, 'from cocos.director import director\n')] |
from hlwtadmin.models import Artist, GigFinderUrl, GigFinder, ConcertAnnouncement, Venue, Location, Organisation, Country, Concert, RelationConcertOrganisation, Location
from django.core.management.base import BaseCommand, CommandError
from pandas import read_excel
from json import load, dump
from codecs import open
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
i = 1
for organisation in Organisation.objects.filter(name__icontains="unknown"):
for rel in RelationConcertOrganisation.objects.filter(organisation=organisation):
concert = rel.concert
for ca in concert.concertannouncements():
if "songkick" in ca.gigfinder.name and ca.is_festival:
raw_venue = ca.raw_venue
venue_organisation = raw_venue.organisation
if venue_organisation and "unknown" not in venue_organisation.name.lower():
line = [str(i), "concert", str(concert), str(concert.pk), "with organisation", str(organisation), str(organisation.pk), "has ca", str(ca), str(ca.pk), "with raw venue", str(raw_venue), str(raw_venue.pk), "which has organisation", str(venue_organisation), str(venue_organisation.pk) if venue_organisation else str(None)]
print("\t".join(line))
i += 1
rel.organisation = venue_organisation
rel.save()
| [
"hlwtadmin.models.RelationConcertOrganisation.objects.filter",
"hlwtadmin.models.Organisation.objects.filter"
] | [((482, 536), 'hlwtadmin.models.Organisation.objects.filter', 'Organisation.objects.filter', ([], {'name__icontains': '"""unknown"""'}), "(name__icontains='unknown')\n", (509, 536), False, 'from hlwtadmin.models import Artist, GigFinderUrl, GigFinder, ConcertAnnouncement, Venue, Location, Organisation, Country, Concert, RelationConcertOrganisation, Location\n'), ((561, 630), 'hlwtadmin.models.RelationConcertOrganisation.objects.filter', 'RelationConcertOrganisation.objects.filter', ([], {'organisation': 'organisation'}), '(organisation=organisation)\n', (603, 630), False, 'from hlwtadmin.models import Artist, GigFinderUrl, GigFinder, ConcertAnnouncement, Venue, Location, Organisation, Country, Concert, RelationConcertOrganisation, Location\n')] |
import networkx as nx
from typing import List
def get_channel_with_minimal_fee_base(subgraph: nx.MultiGraph, source, target):
"""
Get a sub-graph containing exactly two nodes - one is the source and the other is the destination.
The function calculate the channel with the minimal base-fee and returns it.
:param subgraph: The MultiGraph containing the nodes src and dest.
:param source: The source node.
:param target: The target node.
:return: The channel with the minimal base-fee.
"""
assert set(subgraph.nodes) == {source, target}, \
"BAD USAGE - you should give a graph containing only the two given nodes"
min_fee: float = float('inf')
min_fee_channel_id = None
for node1, node2, channel_data in subgraph.edges(data=True):
if source == channel_data['node1_pub']:
source_i, target_i = 1, 2
elif source == channel_data['node2_pub']:
source_i, target_i = 2, 1
else:
assert False, "WTF? Neither 'source' nor 'target' are in the channel."
# When money is transferred from the source node to the target node, the fee is paid for the target node
# (i.e. that target node keeps some of the money to himself, and passes the rest forwards).
channel_fee: float = channel_data[f'node{target_i}_policy']['fee_base_msat']
if channel_fee < min_fee:
min_fee = channel_fee
min_fee_channel_id = (channel_data[f'node{source_i}_pub'], channel_data[f'node{target_i}_pub'],
channel_data['channel_id'])
assert (min_fee_channel_id is not None) and (min_fee != float('inf')), "ERROR: no channel was chosen."
return min_fee_channel_id
def nodes_list_to_edges(graph: nx.MultiGraph, nodes_list: List) -> List:
"""
Get a graph and a list of nodes - the first node in the list is the source node, and the last is the target node.
The function calculates for every two adjacent nodes the edge with the minimal base-fee
(since there might be multiple edges between two nodes - it's a MultiGraph).
:param graph: The MultiGraph to process.
:param nodes_list: The list of nodes describing a path.
:return: A list of edges in the graph, each one is the edge in the path.
"""
edges_list = list()
for i in range(len(nodes_list) - 1):
node1 = nodes_list[i]
node2 = nodes_list[i + 1]
subgraph = graph.subgraph(nodes=(node1, node2))
min_fee_channel = get_channel_with_minimal_fee_base(subgraph, source=node1, target=node2)
edges_list.append(min_fee_channel)
return edges_list
def get_route(graph: nx.MultiGraph, source, target):
"""
A naive approach for getting the route between the source node and the destination node.
It first gets the path of minimal length, and then for each one of the possibilities for channels between the
two nodes, get the channel with the minimal base-fee.
:param graph: The Graph.
:param source: The source node.
:param target: The target node.
:return: The route chosen from the source to the target.
"""
try:
nodes_list = nx.shortest_path(graph, source, target)
except nx.exception.NetworkXNoPath:
print("Warning: | get_route | no path found between nodes")
return None
edges_list = nodes_list_to_edges(graph, nodes_list)
return edges_list
| [
"networkx.shortest_path"
] | [((3183, 3222), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'source', 'target'], {}), '(graph, source, target)\n', (3199, 3222), True, 'import networkx as nx\n')] |
from natto import MeCab
from gensim import corpora, matutils
import pandas as pd
import csv
import os
import sys
import time
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
class Dict2dense:
def __init__(self,
datapath=None,
dictionary_save_path=None,
no_below=None,
no_above=None,
keep_n=None,
data_number=None,
dict_filter=False):
self.datapath = datapath
self.dictionary_save_path=dictionary_save_path
self.no_below=no_below
self.no_above=no_above
self.keep_n=keep_n
self.data_number=data_number
self.dict_filter=dict_filter
def create_dict(self):
nm = MeCab('-Owakati')
train_raw_data =[]
train_label =[]
for k in range(self.data_number):
csvdata = pd.read_csv(os.path.join(self.datapath,'data{}.csv'.format(k)), header=None)
for i in csvdata[0]:
train_raw_data.append(i)
for i in csvdata[1]:
train_label.append(i)
raw_word_list =[]
with open(os.path.join(self.datapath,'sentences.csv'), 'r') as f:
reader = csv.reader(f)
header = next(reader)
for row in reader:
raw_word_list.append(row)
wakati_by_sentnces_list = []
for i in range(len(raw_word_list)):
wakati_by_sentnces_list.append([n.surface for n in nm.parse(raw_word_list[i][0], as_nodes=True) if n.is_nor()])
full_wakati_words = []
for i in range(len(raw_word_list)):
for j in [n.surface for n in nm.parse(raw_word_list[i][0], as_nodes=True) if n.is_nor()]:
full_wakati_words.append(j)
print("number of scentences is ",len(raw_word_list))
print("number of wakati_gaki",len(full_wakati_words))
dictionary = corpora.Dictionary(wakati_by_sentnces_list)
print(dictionary)
print()
if self.dict_filter:
print("filter is on. dictionaly will be fillter")
dictionary.filter_extremes(no_below=self.no_below, no_above=self.no_above, keep_n=self.keep_n)
print(dictionary)
else:
print("filter is off. dictionaly will not fillter")
print()
key_index=[]
for i in dictionary.token2id.keys():
key_index.append(i)
print()
print("part of new dict contents")
print("===========")
print("word:ID")
try:
for i in range(5):
print(key_index[i],":",dictionary.token2id[key_index[i]])
except:
print("number of words is very small")
print("===========")
print()
dictionary.save_as_text(os.path.join(self.dictionary_save_path,'dictionary.txt'))
print("new dict saved at {}".format(os.path.join(self.dictionary_save_path,'dictionary.txt')))
print()
dense =[]
for j in train_raw_data:
tmp = dictionary.doc2bow(list(j))
dense.append(list(matutils.corpus2dense([tmp], num_terms=len(dictionary)).T[0]))
print()
print("length of dense:",len(dense))
print("length of train_label:",len(train_label))
print("all procedure is successfully finished")
return dense, train_label, dictionary
def load_dict(self):
return corpora.Dictionary.load_from_text(os.path.join(self.dictionary_save_path,'dictionary.txt'))
class Predictor:
def __init__(self,
dense=None,
train_label=None,
dictionary=None,
estimator=None):
self.dense = dense
self.train_label = train_label
self.dictionary = dictionary
self.estimator = estimator
def randomforestmodel_initial_train(self):
self.estimator = RandomForestClassifier()
print("Random forest model initialized...")
self.estimator.fit(self.dense, self.train_label)
print("First-training of Random forest model is completed.")
def load_dict(self,dictionary_save_path=None):
return corpora.Dictionary.load_from_text(os.path.join(dictionary_save_path,'dictionary.txt'))
def randomforestmodel_save(self, estimator=None, path=None):
save_path = os.path.join(path,'estimator.pkl')
if os.path.isdir(path):
pass
else:
os.makedirs(path)
print("save-path is not exist. save-path is created successfully.")
joblib.dump(self.estimator, save_path)
print("existing model is saved successfully.")
def randomforestmodel_load(self, path=None):
load_path = os.path.join(path,'estimator.pkl')
self.estimator = joblib.load(load_path)
print("existing model is loaded successfully.")
def randomforestmodel_retrain(self,dense=None, train_label=None, dictionary=None, estimator=None):
self.estimator.fit(dense, train_label)
print("Re-training of Random forest model is completed.")
def onebyonepredict(self,analisys_words=None):
test_dense = []
test_tmp = self.dictionary.doc2bow(list(analisys_words))
test_dense.append(list(matutils.corpus2dense([test_tmp], num_terms=len(self.dictionary)).T[0])) #vector by doc<NAME>
label_predict = self.estimator.predict(test_dense[0])
print("Label prediction is complete. answer is :",label_predict[0])
return label_predict
| [
"gensim.corpora.Dictionary",
"os.makedirs",
"sklearn.externals.joblib.load",
"os.path.join",
"sklearn.ensemble.RandomForestClassifier",
"natto.MeCab",
"os.path.isdir",
"sklearn.externals.joblib.dump",
"csv.reader"
] | [((636, 653), 'natto.MeCab', 'MeCab', (['"""-Owakati"""'], {}), "('-Owakati')\n", (641, 653), False, 'from natto import MeCab\n'), ((1680, 1723), 'gensim.corpora.Dictionary', 'corpora.Dictionary', (['wakati_by_sentnces_list'], {}), '(wakati_by_sentnces_list)\n', (1698, 1723), False, 'from gensim import corpora, matutils\n'), ((3338, 3362), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (3360, 3362), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3745, 3780), 'os.path.join', 'os.path.join', (['path', '"""estimator.pkl"""'], {}), "(path, 'estimator.pkl')\n", (3757, 3780), False, 'import os\n'), ((3785, 3804), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (3798, 3804), False, 'import os\n'), ((3916, 3954), 'sklearn.externals.joblib.dump', 'joblib.dump', (['self.estimator', 'save_path'], {}), '(self.estimator, save_path)\n', (3927, 3954), False, 'from sklearn.externals import joblib\n'), ((4066, 4101), 'os.path.join', 'os.path.join', (['path', '"""estimator.pkl"""'], {}), "(path, 'estimator.pkl')\n", (4078, 4101), False, 'import os\n'), ((4120, 4142), 'sklearn.externals.joblib.load', 'joblib.load', (['load_path'], {}), '(load_path)\n', (4131, 4142), False, 'from sklearn.externals import joblib\n'), ((1050, 1063), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1060, 1063), False, 'import csv\n'), ((2405, 2462), 'os.path.join', 'os.path.join', (['self.dictionary_save_path', '"""dictionary.txt"""'], {}), "(self.dictionary_save_path, 'dictionary.txt')\n", (2417, 2462), False, 'import os\n'), ((2991, 3048), 'os.path.join', 'os.path.join', (['self.dictionary_save_path', '"""dictionary.txt"""'], {}), "(self.dictionary_save_path, 'dictionary.txt')\n", (3003, 3048), False, 'import os\n'), ((3615, 3667), 'os.path.join', 'os.path.join', (['dictionary_save_path', '"""dictionary.txt"""'], {}), "(dictionary_save_path, 'dictionary.txt')\n", (3627, 3667), False, 'import os\n'), ((3825, 3842), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (3836, 3842), False, 'import os\n'), ((979, 1023), 'os.path.join', 'os.path.join', (['self.datapath', '"""sentences.csv"""'], {}), "(self.datapath, 'sentences.csv')\n", (991, 1023), False, 'import os\n'), ((2501, 2558), 'os.path.join', 'os.path.join', (['self.dictionary_save_path', '"""dictionary.txt"""'], {}), "(self.dictionary_save_path, 'dictionary.txt')\n", (2513, 2558), False, 'import os\n')] |
from typing import Any
from apps.accounts.models import PendingAction
from apps.accounts.response_codes import INVALID_TOKEN
from apps.contrib.api.exceptions import SimpleValidationError
class PendingActionSelector(object):
@classmethod
def get_by_token(cls, token: str, category: str) -> Any:
try:
return PendingAction.objects.get(token=token, category=category)
except PendingAction.DoesNotExist:
raise SimpleValidationError(**INVALID_TOKEN)
| [
"apps.accounts.models.PendingAction.objects.get",
"apps.contrib.api.exceptions.SimpleValidationError"
] | [((338, 395), 'apps.accounts.models.PendingAction.objects.get', 'PendingAction.objects.get', ([], {'token': 'token', 'category': 'category'}), '(token=token, category=category)\n', (363, 395), False, 'from apps.accounts.models import PendingAction\n'), ((458, 496), 'apps.contrib.api.exceptions.SimpleValidationError', 'SimpleValidationError', ([], {}), '(**INVALID_TOKEN)\n', (479, 496), False, 'from apps.contrib.api.exceptions import SimpleValidationError\n')] |
#!/usr/bin/python
## 6/16/2017 - remove PyQuery dependency
## 5/19/2016 - update to allow for authentication based on api-key, rather than username/pw
## See https://documentation.uts.nlm.nih.gov/rest/authentication.html for full explanation
import requests
# from pyquery import PyQuery as pq
from lxml.html import fromstring
from SmartAnno.utils.ConfigReader import ConfigReader
uri = "https://utslogin.nlm.nih.gov"
# option 1 - username/pw authentication at /cas/v1/tickets
# auth_endpoint = "/cas/v1/tickets/"
# option 2 - api key authentication at /cas/v1/api-key
auth_endpoint = "/cas/v1/api-key"
class Authentication:
# def __init__(self, username,password):
def __init__(self, apikey=None):
# self.username=username
# self.password=password
if apikey is not None:
self.apikey = apikey
else:
self.apikey = ConfigReader.getValue('api_key')
self.service = "http://umlsks.nlm.nih.gov"
def gettgt(self):
# params = {'username': self.username,'password': self.password}
params = {'apikey': self.apikey}
h = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain", "User-Agent": "python"}
r = requests.post(uri + auth_endpoint, data=params, headers=h)
response = fromstring(r.text)
## extract the entire URL needed from the HTML form (action attribute) returned - looks similar to https://utslogin.nlm.nih.gov/cas/v1/tickets/TGT-36471-aYqNLN2rFIJPXKzxwdTNC5ZT7z3B3cTAKfSc5ndHQcUxeaDOLN-cas
## we make a POST call to this URL in the getst method
tgt = response.xpath('//form/@action')[0]
return tgt
def getst(self, tgt):
params = {'service': self.service}
h = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain", "User-Agent": "python"}
r = requests.post(tgt, data=params, headers=h)
st = r.text
return st
| [
"SmartAnno.utils.ConfigReader.ConfigReader.getValue",
"requests.post",
"lxml.html.fromstring"
] | [((1230, 1288), 'requests.post', 'requests.post', (['(uri + auth_endpoint)'], {'data': 'params', 'headers': 'h'}), '(uri + auth_endpoint, data=params, headers=h)\n', (1243, 1288), False, 'import requests\n'), ((1308, 1326), 'lxml.html.fromstring', 'fromstring', (['r.text'], {}), '(r.text)\n', (1318, 1326), False, 'from lxml.html import fromstring\n'), ((1871, 1913), 'requests.post', 'requests.post', (['tgt'], {'data': 'params', 'headers': 'h'}), '(tgt, data=params, headers=h)\n', (1884, 1913), False, 'import requests\n'), ((883, 915), 'SmartAnno.utils.ConfigReader.ConfigReader.getValue', 'ConfigReader.getValue', (['"""api_key"""'], {}), "('api_key')\n", (904, 915), False, 'from SmartAnno.utils.ConfigReader import ConfigReader\n')] |
from functools import partial
import pickle
import numpy as np
from ...utils import common_utils
from . import augmentor_utils, database_sampler, best_match_querier, multi_best_match_querier, sup_gt_sampling
SPECIAL_NAMES = ["bm_points", "miss_points", "self_points", "other_points", "miss_occ_points", "self_occ_points", "other_occ_points", "self_limit_occ_mask", "miss_full_occ_points", "other_full_occ_points"]
class DataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.data_augmentor_queue = []
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.AUG_CONFIG_LIST
self.db_infos = {}
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
if (cur_cfg.NAME in ["waymo_gt_sampling", "gt_sampling", "add_best_match", "sup_gt_sampling"]) and len(self.db_infos.keys()) == 0:
for class_name in class_names:
self.db_infos[class_name] = []
for db_info_path in cur_cfg.DB_INFO_PATH:
db_info_path = self.root_path.resolve() / db_info_path
with open(str(db_info_path), 'rb') as f:
infos = pickle.load(f)
[self.db_infos[cur_class].extend(infos[cur_class]) for cur_class in class_names]
print("self.db_infos", self.db_infos.keys())
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def sup_gt_sampling(self, config=None):
db_sampler = sup_gt_sampling.SupGTSampling(
root_path=self.root_path,
sampler_cfg=config,
db_infos=self.db_infos,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def gt_sampling(self, config=None):
db_sampler = database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
db_infos=self.db_infos,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def waymo_gt_sampling(self, config=None):
db_sampler = waymo_database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
db_infos=self.db_infos,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def waymo_obj_gt_sampling(self, config=None):
db_sampler = waymo_obj_database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def add_best_match(self, config=None):
bm_querier = best_match_querier.BestMatchQuerier(
root_path=self.root_path,
querier_cfg=config,
class_names=self.class_names,
db_infos=self.db_infos,
logger=self.logger
)
return bm_querier
def add_multi_best_match(self, config=None):
bm_querier = multi_best_match_querier.MltBestMatchQuerier(
root_path=self.root_path,
querier_cfg=config,
class_names=self.class_names,
db_infos=self.db_infos,
logger=self.logger
)
return bm_querier
def add_waymo_multi_best_match(self, config=None):
bm_querier = waymo_multi_best_match_querier.MltBestMatchQuerier(
root_path=self.root_path,
querier_cfg=config,
class_names=self.class_names,
db_infos=self.db_infos,
logger=self.logger
)
return bm_querier
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def random_world_flip(self, data_dict=None, config=None, enable=None):
if data_dict is None:
return partial(self.random_world_flip, config=config, enable=enable)
gt_boxes, points, bm_points, miss_points, self_points, other_points = data_dict['gt_boxes'], data_dict['points'], data_dict['bm_points'] if "bm_points" in data_dict else None, data_dict['miss_points'] if "miss_points" in data_dict else None, data_dict['self_points'] if "self_points" in data_dict else None, data_dict['other_points'] if "other_points" in data_dict else None
special_points_lst = [data_dict[pt_key] for pt_key in SPECIAL_NAMES if pt_key in data_dict]
special_name_lst = [pt_key for pt_key in SPECIAL_NAMES if pt_key in data_dict]
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y']
gt_boxes, points, special_points_lst = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(
gt_boxes, points, special_points_lst=special_points_lst, enable=enable
)
for name,val in zip(special_name_lst, special_points_lst):
data_dict[name] = val
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def abs_world_flip(self, data_dict=None, config=None):
return self.random_world_flip(data_dict=data_dict, config=config, enable=True)
def random_world_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_rotation, config=config)
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
pre_rot_points = data_dict['points']
special_points_lst = [data_dict[pt_key] for pt_key in SPECIAL_NAMES if pt_key in data_dict]
special_name_lst = [pt_key for pt_key in SPECIAL_NAMES if pt_key in data_dict]
gt_boxes, points, noise_rotation, special_points_lst = augmentor_utils.global_rotation(data_dict['gt_boxes'], pre_rot_points, rot_range=rot_range, special_points_lst=special_points_lst)
for name, val in zip(special_name_lst, special_points_lst):
data_dict[name] = val
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
if config.get("SAVE_PRE_ROT", False):
data_dict['pre_rot_points'] = pre_rot_points
data_dict['rot_z'] = noise_rotation * 180 / np.pi
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling, config=config)
special_points_lst = [data_dict[pt_key] for pt_key in SPECIAL_NAMES if pt_key in data_dict]
special_name_lst = [pt_key for pt_key in SPECIAL_NAMES if pt_key in data_dict]
gt_boxes, points, special_points_lst = augmentor_utils.global_scaling(
data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE'], special_points_lst=special_points_lst)
for name, val in zip(special_name_lst, special_points_lst):
data_dict[name] = val
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def forward(self, data_dict, validation=False):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading]
gt_names: optional, (N), string
...
Returns:
"""
data_dict["gt_boxes_inds"] = np.arange(list(data_dict["gt_boxes_mask"].shape)[0])
for cur_augmentor in self.data_augmentor_queue:
if not validation or type(cur_augmentor).__name__ in ["BestMatchQuerier", "MltBestMatchQuerier"]:
data_dict = cur_augmentor(data_dict=data_dict)
data_dict['gt_boxes'][:, 6] = common_utils.limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
# if 'calib' in data_dict:
# data_dict.pop('calib')
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
if "obj_ids" in data_dict:
data_dict['obj_ids'] = data_dict['obj_ids'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
data_dict.pop('gt_boxes_inds', None)
return data_dict
| [
"pickle.load",
"functools.partial"
] | [((4346, 4407), 'functools.partial', 'partial', (['self.random_world_flip'], {'config': 'config', 'enable': 'enable'}), '(self.random_world_flip, config=config, enable=enable)\n', (4353, 4407), False, 'from functools import partial\n'), ((5752, 5802), 'functools.partial', 'partial', (['self.random_world_rotation'], {'config': 'config'}), '(self.random_world_rotation, config=config)\n', (5759, 5802), False, 'from functools import partial\n'), ((6855, 6904), 'functools.partial', 'partial', (['self.random_world_scaling'], {'config': 'config'}), '(self.random_world_scaling, config=config)\n', (6862, 6904), False, 'from functools import partial\n'), ((1495, 1509), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1506, 1509), False, 'import pickle\n')] |
#! /usr/bin/python3
import sys
import os
import time
HOME = os.path.expanduser('~')
FILENAME = "%s/.timertime" % HOME
def starttimer():
if (checkifexists()):
response = input("Timer is already going! Overwrite previous timer? [y/n] ")
if (not (response == "y" or response == "Y" or response == "yes")):
print("Aborting")
return
f = open(FILENAME, "w")
f.write(str(int(time.time())) + "\n")
f.close()
def pausetimer(command):
if (not checkifexists()):
print("No timer going! Use 'timer start' to initialize timer!")
return
if (filelen() % 2 == 0 and command == "pause"):
print("Timer already paused! Use 'timer resume' to resume")
return
if (filelen() % 2 == 1 and command == "resume"):
print("Cannot resume, timer not paused!")
return
f = open(FILENAME, "a")
f.write(str(int(time.time())) + "\n")
f.close()
if (command == "pause"):
print("Timer paused at %s" % display())
def checkifexists():
return os.path.isfile(FILENAME)
def filelen():
f = open(FILENAME)
for i, l in enumerate(f):
pass
f.close()
return i + 1
def stoptimer():
if (not checkifexists()):
print("Timer not started!")
return
print(display())
os.remove(FILENAME)
def display():
lines = []
f = open(FILENAME, "r")
for line in f:
lines.append(int(line.rstrip()))
if (len(lines) % 2 == 1):
lines.append(int(time.time()))
stoptimes = [lines[i] for i in range(1, len(lines), 2)]
starttimes = [lines[i] for i in range(0, len(lines), 2)]
secnum = sum(stoptimes) - sum(starttimes)
m, s = divmod(secnum, 60)
h, m = divmod(m, 60)
if h > 23:
d, h = divmod(h, 24)
return "%d d, %d:%02d:%02d" % (d, h, m, s)
else:
return "%d:%02d:%02d" % (h, m, s)
def showtimer():
if (not checkifexists()):
print("Timer not started!")
return
print(display())
def statustimer():
if (not checkifexists()):
print("Timer inactive")
else:
if filelen() % 2 == 0:
print("Timer paused")
else:
print("Timer running at %s" % display())
def main():
try:
command = sys.argv[1]
if (command == "start"):
starttimer()
elif (command == "stop"):
stoptimer()
elif (command == "pause" or command == "resume"):
pausetimer(command)
elif (command == "show"):
showtimer()
elif (command == "status"):
statustimer()
else:
print("Timer: Unknown command '%s'" % command)
except IndexError:
print("No command given. Proper use:")
print("\n > timer [cmd]\n")
print("Where [cmd] is either start, stop, pause, resume, status or show.")
if __name__ == "__main__":
main()
| [
"os.path.isfile",
"time.time",
"os.path.expanduser",
"os.remove"
] | [((62, 85), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (80, 85), False, 'import os\n'), ((1083, 1107), 'os.path.isfile', 'os.path.isfile', (['FILENAME'], {}), '(FILENAME)\n', (1097, 1107), False, 'import os\n'), ((1362, 1381), 'os.remove', 'os.remove', (['FILENAME'], {}), '(FILENAME)\n', (1371, 1381), False, 'import os\n'), ((1567, 1578), 'time.time', 'time.time', ([], {}), '()\n', (1576, 1578), False, 'import time\n'), ((439, 450), 'time.time', 'time.time', ([], {}), '()\n', (448, 450), False, 'import time\n'), ((935, 946), 'time.time', 'time.time', ([], {}), '()\n', (944, 946), False, 'import time\n')] |
from django.contrib import admin
from .models import Event
from chat.models import Chat
# A TabularInline allows for an object reference (with a Foreign Key to the main object) to be displayed
# within the admin backend under the main object
# Django TabularInline: https://docs.djangoproject.com/en/3.0/ref/contrib/admin/#django.contrib.admin.TabularInline
class ChatInline(admin.TabularInline):
model = Chat
# extra: This flag allows for no additional chats to be displayed other than those connected to the event
# (Default is 3 additional objects not necessarily connected to the particular event. However, we want 0)
extra = 0
# A ModelAdmin allows you to configure the display of a model in the admin backend
# Django ModelAdmin: https://docs.djangoproject.com/en/3.0/ref/contrib/admin/
class EventAdmin(admin.ModelAdmin):
model = Event
# list_display: Configures which event fields to display on the event manager list in the admin backend
list_display = ['name', 'start_time', 'end_time', 'description']
# fields: Configures which event fields to display on an event in the admin backend
fields = ['name', 'start_time', 'end_time', 'description', 'users']
# inlines: Configures the TabularInlines to display with the fields
inlines = [
ChatInline,
]
# Configures which models to display in the admin backend
# Must register each model in events/models.py here:
admin.site.register(Event, EventAdmin) | [
"django.contrib.admin.site.register"
] | [((1430, 1468), 'django.contrib.admin.site.register', 'admin.site.register', (['Event', 'EventAdmin'], {}), '(Event, EventAdmin)\n', (1449, 1468), False, 'from django.contrib import admin\n')] |
from Algorithms.Search.BinarySearch.binary_search_recursive import (
binary_search_recursive,
)
class TestBinarySearchRecursive:
def test_element_found(self):
array = [1, 2, 3, 4, 6, 7]
expected = 3
index = binary_search_recursive(array, 4)
assert index == expected
def test_element_not_found(self):
array = [1, 2, 3, 4, 6, 7]
expected = None
index = binary_search_recursive(array, 10)
assert index == expected
| [
"Algorithms.Search.BinarySearch.binary_search_recursive.binary_search_recursive"
] | [((242, 275), 'Algorithms.Search.BinarySearch.binary_search_recursive.binary_search_recursive', 'binary_search_recursive', (['array', '(4)'], {}), '(array, 4)\n', (265, 275), False, 'from Algorithms.Search.BinarySearch.binary_search_recursive import binary_search_recursive\n'), ((425, 459), 'Algorithms.Search.BinarySearch.binary_search_recursive.binary_search_recursive', 'binary_search_recursive', (['array', '(10)'], {}), '(array, 10)\n', (448, 459), False, 'from Algorithms.Search.BinarySearch.binary_search_recursive import binary_search_recursive\n')] |
import sqlite3, os
from ..args import args
from ..log import getLogger
from ..const import TABLE_FITS, TABLE_OBSV, HDR_KEYS
from .. import calc
# Create module's logger
logger = getLogger(__name__)
# IMPORTANT: put KEYWORDS in double-quotes, some have special characters (e.g. 'DATE-OBS').
# Note: Use ..fits.printFitsHdr() for inspectation of fits files
### OBSERVATION
# Inserts row to observation table
# see obsCreateTable for datatypes
#
class ObservatoryDB:
'''class for handling sqlite3 database file
'''
conn = None
cursor = None
obsvTable = None
fitsTable = None
def __init__(self, dbfile, obsvTable, fitsTable):
''':param dbfile: path for .db file
:param obsvTable: tablename for Obsv objects
:param fitsTable: tablename for FitsFile objects
'''
self.conn = sqlite3.connect(dbfile)
self.cursor = self.conn.cursor()
self.obsvTable = obsvTable
self.fitsTable = fitsTable
# def __query(self, string):
# '''This method is for internal use only. Lets any command to be executed
# in sqlite3 database.
# :param string: SQL command to be issued.
# :returns: result of the query (list, print for details)
# '''
# self.cursor.execute(string)
# return self.cursor.fetchall()
def queryHashRange(self, column, lowerHash, upperHash):
'''Returns column(s) which are in range of specified hashes
:param lowerHash: lowest hash allowed
:param upperHash: upper limit of hashes, is forbidden
:returns: result of the query (list, print for details)
'''
self.cursor.execute(
f'SELECT {column} FROM obsv WHERE ({lowerHash}<=HASH and HASH<{upperHash})'
)
return self.cursor.fetchall()
#
def queryObsv(self, hash, column):
''':param hash: hash for Obsv to be queried
:param column: column(s) to be fetched
:returns: True if successful
'''
self.cursor.execute(
f'SELECT {column} FROM {self.obsvTable} WHERE HASH = {hash};'
)
return self.cursor.fetchall()
#
def queryFits(self, hash, column):
''':param hash: hash for FitsFile to be queried
:param column: column(s) to be fetched
:returns: True if successful
'''
self.cursor.execute(
f'SELECT {column} FROM {self.fitsTable} WHERE HASH = {hash};'
)
return self.cursor.fetchall()
#
def insertObsv(self, obsv):
''':param obsv: Obsv object to be inserted into database
:returns: True if successful
'''
try:
self.cursor.execute(
f'INSERT INTO {self.obsvTable} VALUES ('
f'"{obsv.hash}",'
f'"{obsv.date}",'
f'"{obsv.tlscp}",'
f'"{obsv.objct}",'
f'"{obsv.path}"'
f');'
)
self.conn.commit()
return True
except Exception as e:
logger.warning(f'Could not insert: {e}')
return False
# Not for stand-alone use, to be used when an Obsv is being inserted
def insertFits(self, fitsFile):
''':param fitsFile: FitsFile object to be inserted into database
:returns: True if successful
'''
try:
hdrItems = ''
for j in range(1, len(HDR_KEYS)): # 'SIMPLE' boundary cases
hdrItems += (f',"{fitsFile.hdr[HDR_KEYS[j]]}"' if HDR_KEYS[j] in fitsFile.hdr else f',"NULL"')
self.cursor.execute(
f'INSERT INTO {self.fitsTable} VALUES ('
f'"{fitsFile.hash}"'
f',"{fitsFile.obsvHash}"'
f',"{fitsFile.path}"' # absolute path of file
# HEADER KEYWORDS BELOW
# Insert integer inplace of boolean (SQLite3 specific)
f',"{(1 if fitsFile.hdr["SIMPLE"] else 0)}"'
# Insert from second element of HDR_KEYS
f'{hdrItems}'
f');'
)
self.conn.commit()
return True
except Exception as e:
logger.warning(f'Could not insert: {fitsFile.path}')
logger.warning(e)
return False
def deleteObsv(self, obsv):
'''Deletes entries for Obsv and corresponding FitsFiles from their respective tables
:param obsv: Obsv object to be deleted
:returns: True if successful
'''
try:
self.cursor.execute(
f'DELETE FROM {self.obsvTable} WHERE "HASH" = {obsv.hash}'
)
self.cursor.execute(
f'DELETE FROM {self.fitsTable} WHERE "OBSV-HASH" = {obsv.hash}'
)
self.conn.commit()
return True
except Exception as e:
logger.warning(f'{e}')
return False
def deleteObsvByRef(self, ref):
'''Deletes entries for Obsv and corresponding FitsFiles from their respective tables
:param ref: ref of Obsv to be deleted
:returns: True if successful
'''
try:
self.cursor.execute(
f'DELETE FROM {self.obsvTable} WHERE "HASH" = {calc.hash(ref)}'
)
self.cursor.execute(
f'DELETE FROM {self.fitsTable} WHERE "OBSV-HASH" = {calc.hash(ref)}'
)
self.conn.commit()
return True
except Exception as err:
#logger.warning(f'{e}')
exception_type = type(err).__name__
print(exception_type)
return False
# Creates single-table for observations, with four essential columns.
# Shall be used for creation/migration only.
def createObsvTable(self): #returns boolean
'''Creates table for storing Obsv object information (named as self.obsvTable value)
:returns: True if successful
'''
try:
self.cursor.execute(
f'CREATE TABLE IF NOT EXISTS {self.obsvTable} (\n'
f'"HASH" INTEGER PRIMARY KEY\n'
f',"DATE" TEXT NOT NULL\n' # YYYY-MM-DD, also foldername in
f',"TELESCOP" TEXT NOT NULL\n'
f',"OBJECT" TEXT NOT NULL\n'
f',"PATH" TEXT NOT NULL\n'
f');'
)
self.conn.commit()
return True
except Exception as e:
logger.warning(f'{e}')
return False
# Creates single-table for FITS-Headers
# Shall be used at creation/migration only
def createFitsTable(self): #returns boolean
'''Creates table for storing FitsFile object information (named as self.fitsTable value)
:returns: True if successful
'''
try:
self.cursor.execute(
#print( # for debugging when table not created, print the string
f'CREATE TABLE IF NOT EXISTS {self.fitsTable} (\n'
f'"HASH" INTEGER PRIMARY KEY,\n' # file's hash
f'"OBSV-HASH" INTEGER NOT NULL,\n' # hash for parent Obsv
f'"PATH" TEXT NOT NULL,\n' # absolute path of file
# HEADER KEYWORDS BELOW (AUKR-REF in header for archived files)
f'"{HDR_KEYS[0]}" INTEGER,\n' #for bool
f'"{HDR_KEYS[1]}" INTEGER,\n'
f'"{HDR_KEYS[2]}" INTEGER,\n'
f'"{HDR_KEYS[3]}" INTEGER,\n'
f'"{HDR_KEYS[4]}" INTEGER,\n'
f'"{HDR_KEYS[5]}" REAL,\n' #float
f'"{HDR_KEYS[6]}" REAL,\n' #float
f'"{HDR_KEYS[7]}" TEXT NOT NULL,\n' #str "DATE-OBS"
f'"{HDR_KEYS[8]}" REAL,\n' #float
f'"{HDR_KEYS[9]}" REAL,\n' #float
f'"{HDR_KEYS[10]}" REAL,\n' #float
f'"{HDR_KEYS[11]}" REAL,\n' #float
f'"{HDR_KEYS[12]}" REAL,\n' #float
f'"{HDR_KEYS[13]}" REAL,\n' #float
f'"{HDR_KEYS[14]}" INTEGER,\n'
f'"{HDR_KEYS[15]}" INTEGER,\n'
f'"{HDR_KEYS[16]}" INTEGER,\n'
f'"{HDR_KEYS[17]}" INTEGER,\n'
f'"{HDR_KEYS[18]}" TEXT,\n' #str
f'"{HDR_KEYS[19]}" TEXT,\n' #str
f'"{HDR_KEYS[20]}" TEXT,\n' #str
f'"{HDR_KEYS[21]}" INTEGER,\n'
f'"{HDR_KEYS[22]}" REAL,\n' #float
f'"{HDR_KEYS[23]}" TEXT,\n' #str
f'"{HDR_KEYS[24]}" TEXT,\n' #str
f'"{HDR_KEYS[25]}" TEXT,\n' #str
f'"{HDR_KEYS[26]}" TEXT,\n' #str
f'"{HDR_KEYS[27]}" TEXT,\n' #str
f'"{HDR_KEYS[28]}" TEXT,\n' #str
f'"{HDR_KEYS[29]}" TEXT,\n' #str
f'"{HDR_KEYS[30]}" REAL,\n' #float
f'"{HDR_KEYS[31]}" REAL,\n' #float
f'"{HDR_KEYS[32]}" REAL,\n' #float
f'"{HDR_KEYS[33]}" REAL,\n' #float
f'"{HDR_KEYS[34]}" REAL,\n' #float
f'"{HDR_KEYS[35]}" REAL,\n' #float
f'"{HDR_KEYS[36]}" TEXT,\n' #str
# the image #astropy.io.fits.header._HeaderCommentaryCards
f'"{HDR_KEYS[37]}" TEXT,\n' #str
f'"{HDR_KEYS[38]}" TEXT NOT NULL,\n' #str "OBJECT"
f'"{HDR_KEYS[39]}" TEXT NOT NULL,\n' #str "TELESCOP"
f'"{HDR_KEYS[40]}" TEXT,\n' #str
f'"{HDR_KEYS[41]}" TEXT,\n' #str
f'"{HDR_KEYS[42]}" TEXT,\n' #str
f'"{HDR_KEYS[43]}" TEXT,\n' #str
f'"{HDR_KEYS[44]}" TEXT,\n' #str
f'"{HDR_KEYS[45]}" TEXT,\n' #float
f'"{HDR_KEYS[46]}" TEXT,\n' #str
f'"{HDR_KEYS[47]}" TEXT,\n' #str
f'"{HDR_KEYS[48]}" TEXT,\n' #str #was null in sample
f'"{HDR_KEYS[49]}" TEXT,\n' #str #was null in sample
f'"{HDR_KEYS[50]}" TEXT,\n' #float
f'"{HDR_KEYS[51]}" TEXT,\n' #float
f'"{HDR_KEYS[52]}" TEXT,\n' #float
f'"{HDR_KEYS[53]}" TEXT,\n' #float
f'"{HDR_KEYS[54]}" TEXT NOT NULL\n' #str "AUKR-REF"
# END of HEADER KEYWORDS
f');'
)
self.conn.commit()
return True
except Exception as e:
logger.warning(f'{e}')
return False
# Create database object to connect provided sqlite3.db file
archiveDB = ObservatoryDB(args.dbfile, TABLE_OBSV, TABLE_FITS)
if not archiveDB.createFitsTable():
logger.info(f'Table "{TABLE_FITS}" could not be created')
if not archiveDB.createObsvTable():
logger.info(f'Table "{TABLE_OBSV}" could not be created')
| [
"sqlite3.connect"
] | [((865, 888), 'sqlite3.connect', 'sqlite3.connect', (['dbfile'], {}), '(dbfile)\n', (880, 888), False, 'import sqlite3, os\n')] |
import os
import json
import pickle
import numpy as np
import pandas as pd
from nltk import word_tokenize
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import ijson
from sentiments import (
parse_sentiwordnet,
parse_mpqa,
parse_senticnet,
parse_vader,
parse_huliu,
parse_general_inquirer,
parse_vae,
FPATHS,
)
imdb_dir = './data/sentiment-data/imdb'
imdb_train = f'{imdb_dir}/train'
imdb_test = f'{imdb_dir}/train'
yelp_dir = './data/sentiment-data/yelp'
yelp_train = f'{yelp_dir}/yelp_academic_dataset_review_train.json'
yelp_test = f'{yelp_dir}/yelp_academic_dataset_review_test.json'
semeval_dir = './data/sentiment-data/SemEval-2017-Task4A'
semeval_train = f'{semeval_dir}/SemEval2017-task4-dev.subtask-A.english.INPUT_train.txt'
semeval_test = f'{semeval_dir}/SemEval2017-task4-dev.subtask-A.english.INPUT_test.txt'
multidom_dir = './data/sentiment-data/multi-domain-sentiment'
multidom_train = f'{multidom_dir}/multi-domain-sentiment_indomain_train.txt'
multidom_test = f'{multidom_dir}/multi-domain-sentiment_indomain_test.txt'
peerread_acl_dir = './data/sentiment-data/PeerRead/acl_2017'
peerread_acl_train = f'{peerread_acl_dir}/train'
peerread_acl_test = f'{peerread_acl_dir}/test'
peerread_iclr_dir = './data/sentiment-data/PeerRead/iclr_2017'
peerread_iclr_train = f'{peerread_iclr_dir}/train'
peerread_iclr_test = f'{peerread_iclr_dir}/test'
SPLIT_SEED = 11235
SPLIT = False
SAVE = True
def save_object(obj, fpath):
"""
Pickle an object and save it to file
"""
with open(fpath, 'wb') as o:
pickle.dump(obj, o)
def load_object(fpath):
"""
Load a pickled object from file
"""
with open(fpath, 'rb') as i:
return pickle.load(i)
def split_file(dir, in_fname, split_prop, seed=None):
"""
Split train and test data
"""
np.random.seed(seed)
fname, ext = os.path.splitext(in_fname)
with open(f'{dir}/{in_fname}', 'r', encoding='utf-8') as f,\
open(f'{dir}/{fname}_train{ext}', 'w', encoding='utf-8') as train,\
open(f'{dir}/{fname}_test{ext}', 'w', encoding='utf-8') as test:
for i, line in enumerate(f):
print(i, end='\r')
train_split = np.random.uniform() < split_prop
if train_split:
train.write(line)
else:
test.write(line)
def split_files(dir, split_prop, seed=None):
"""
Split train and test data
"""
np.random.seed(seed)
datadir = os.listdir(dir)
for fold in datadir:
if not fold in ["books", "dvd", "electronics", "kitchen"]:
continue
split2label = {"negative.review": "0", "positive.review": "1"}
for datasplit in ["negative.review", "positive.review"]:
in_fname = os.path.join(dir, fold, datasplit)
fname = os.path.join(dir, "multi-domain-sentiment_indomain")
ext = ".txt"
with open(f'{in_fname}', 'r', encoding='utf-8') as f,\
open(f'{fname}_train{ext}', 'a', encoding='utf-8') as train,\
open(f'{fname}_test{ext}', 'a', encoding='utf-8') as test:
for i, line in enumerate(f):
print(i, end='\r')
line = line.strip("\n")
toks = line.split(" ")
toks_final = []
for tok in toks:
if "_" in tok or tok.startswith("#label"):
continue
toks_final.append(tok.split(":")[0])
text = " ".join(toks_final)
train_split = np.random.uniform() < split_prop
if train_split:
train.write(text + "\t" + split2label[datasplit] + "\n")
else:
test.write(text + "\t" + split2label[datasplit] + "\n")
if SPLIT:
split_file(yelp_dir, 'yelp_academic_dataset_review.json', split_prop=0.8, seed=SPLIT_SEED)
split_file(semeval_dir, 'SemEval2017-task4-dev.subtask-A.english.INPUT.txt', split_prop=0.8, seed=SPLIT_SEED)
split_files(multidom_dir, split_prop=0.8, seed=SPLIT_SEED)
def gen_imdb_data(dir, sent_data, score_fn, limit_to=None):
"""
Create imdb dataset from sentiment lexicon
"""
pos_data = [('pos', fname) for fname in os.listdir(os.path.join(dir, 'pos'))][:limit_to]
neg_data = [('neg', fname) for fname in os.listdir(os.path.join(dir, 'neg'))][:limit_to]
n = len(pos_data) + len(neg_data)
y = np.concatenate([np.ones(len(pos_data)), np.zeros(len(neg_data))])
x = np.zeros((n, len(score_fn('good bad', sent_data))))
for i, (sent, fname) in enumerate(pos_data + neg_data):
with open(os.path.join(dir, sent, fname), 'r', encoding='latin1') as textfile:
text = textfile.read()
sent_score = score_fn(text, sent_data)
x[i] = sent_score
print(f'{i/n*100:0.2f}%', end='\r')
return x, y
def gen_yelp_data(fpath, sent_data, score_fn, limit_to, balance=False):
"""
Create yelp dataset from sentiment lexicon
All written under the assumption that we're never going to read in all data
"""
n = limit_to
y = np.zeros(n)
x = np.zeros((n, len(score_fn('good bad', sent_data))))
i = 0
with open(fpath, 'r', encoding='utf-8') as f:
for line in f:
data = json.loads(line)
sent_score = score_fn(data['text'], sent_data)
if balance and (np.sum(y == data['stars']) >= (limit_to // 5)):
continue
x[i] = sent_score
y[i] = data['stars']
i += 1
print(f'{i/n*100:0.2f}%', end='\r')
if i >= n:
break
return x, y
def gen_multidom_data(fpath, sent_data, score_fn, limit_to=None):
"""
Create multi-domain sentiment analysis dataset from sentiment lexicon
"""
n = limit_to
y = np.zeros(n)
x = np.zeros((n, len(score_fn('good bad', sent_data))))
i = 0
with open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text, label = line.strip("\n").split("\t")
sent_score = score_fn(text, sent_data)
x[i] = sent_score
y[i] = int(label)
i += 1
print(f'{i/n*100:0.2f}%', end='\r')
if i >= n:
break
return x, y
def gen_semeval_data(fpath, sent_data, score_fn, limit_to=None, balance=False):
"""
Create yelp dataset from sentiment lexicon
"""
data = pd.read_csv(fpath, sep='\t', names=['id', 'sent', 'text', '_'], encoding='utf-8')
data['sent'] = data.sent.replace({'negative': 0, 'neutral': 1, 'positive': 2})
n = limit_to or len(data)
if balance:
class_counts = data.groupby('sent')['id'].count()
n = class_counts.min() * len(class_counts)
y = np.zeros(n)
x = np.zeros((n, len(score_fn('good bad', sent_data))))
i = 0
for _, row in data.iterrows():
sent_score = score_fn(row.text, sent_data)
if balance and (np.sum(y == row.sent) >= class_counts.min()):
continue
x[i] = sent_score
y[i] = row.sent
i += 1
print(f'{i/n*100:0.2f}%', end='\r')
if i >= n:
break
return x, y
def gen_acl_data(dir, sent_data, score_fn, limit_to=None, merge=True):
"""
Create PeerReview ACL dataset from sentiment lexicon
"""
acl_data = [fname for fname in os.listdir(os.path.join(dir, 'reviews'))][:limit_to]
n = limit_to or len(acl_data)
y = np.zeros(n)
x = np.zeros((n, len(score_fn('good bad', sent_data))))
if merge:
score2norm = {"1": 0, "2": 0, "3": 1, "4": 2, "5": 2, "6": 2}
else:
score2norm = {"1": 0, "2": 1, "3": 2, "4": 3, "5": 4, "6": 5}
largecnt = 0
for i, fname in enumerate(acl_data):
currpath = os.path.join(dir, 'reviews', fname)
f = open(currpath, encoding="utf-8")
objects = ijson.items(f, 'reviews')
for ii, obj in enumerate(objects):
for j, objj in enumerate(obj):
text = objj["comments"]
sent_score = score_fn(text, sent_data)
x[i] = sent_score
y[i] = score2norm[objj["RECOMMENDATION"]]
largecnt += 1
print(f'{i/n*100:0.2f}%', end='\r')
print(largecnt)
return x, y
def gen_iclr_data(dir, sent_data, score_fn, limit_to=None, merge=True):
"""
Create PeerReview ICLR dataset from sentiment lexicon
"""
iclr_data = [fname for fname in os.listdir(os.path.join(dir, 'reviews'))][:limit_to]
n = limit_to or len(iclr_data)
y = np.zeros(n)
x = np.zeros((n, len(score_fn('good bad', sent_data))))
if merge:
score2norm = {"1": 0, "2": 0, "3": 0, "4": 0, "5": 1, "6": 2, "7": 2, "8": 2, "9": 2, "10": 2}
else:
score2norm = {"1": 0, "2": 1, "3": 2, "4": 3, "5": 4, "6": 5, "7": 6, "8": 7, "9": 8, "10": 9}
largecnt = 0
for i, fname in enumerate(iclr_data):
currpath = os.path.join(dir, 'reviews', fname)
f = open(currpath, encoding="utf-8")
objects = ijson.items(f, 'reviews')
for ii, obj in enumerate(objects):
for j, objj in enumerate(obj):
# some are meta-reviews without scores
if not "RECOMMENDATION" in objj.keys():
continue
text = objj["comments"]
sent_score = score_fn(text, sent_data)
x[i] = sent_score
y[i] = score2norm[str(objj["RECOMMENDATION"])]
largecnt += 1
print(f'{i/n*100:0.2f}%', end='\r')
print(largecnt)
return x, y
def score_sent(text, sent_data, normalize=False):
"""
Evaluate the data
"""
test_sent = next(iter(sent_data.values()))
sents = np.zeros_like(test_sent).astype(np.float).reshape(-1)
tokens = word_tokenize(text.lower())
for token in tokens:
try:
sent = np.array(sent_data[token])
except KeyError:
continue
if normalize:
sent = sent / sent.sum()
sents += sent
score = sents / len(tokens)
return score
def read_lexica():
sent_to_dict = lambda x: x.set_index("word")["sent"].to_dict()
sentiments = {
'vae_3': sent_to_dict(parse_vae(
'./models/vae/sent_dict.csv',
sent_cols=[f'alpha_{i}' for i in range(1, 4)],
from_vae_only=True,
)),
'sentiwordnet': sent_to_dict(parse_sentiwordnet(FPATHS['sentiwordnet'], group=True)),
'mpqa': sent_to_dict(parse_mpqa(FPATHS['mpqa'])),
'senticnet': sent_to_dict(parse_senticnet(FPATHS['senticnet'])),
'huliu': sent_to_dict(parse_huliu(*FPATHS['huliu'])),
'gi': sent_to_dict(parse_general_inquirer(FPATHS['general_inquirer'])),
'vader_mean': sent_to_dict(parse_vader(FPATHS['vader'], group_mean=True)),
'vader_multi': sent_to_dict(parse_vader(FPATHS['vader'], group_multinomial=True)),
}
# binned vader dataset
sentiments['vader_binned'] = {
k: np.array([v[:4].sum(), v[4].sum(), v[5:].sum()])
for k, v in sentiments['vader_multi'].items()
}
print("VADER")
print(len(sentiments['vader_multi']))
return sentiments
def score_sentences(sentiments):
if SAVE:
imdb, yelp, semeval, multidom, acl3c, acl, iclr3c, iclr, \
imdb_testd, yelp_testd, semeval_testd, multidom_testd, acl3c_testd, acl_testd, iclr3c_testd, iclr_testd \
= {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}
for lexicon in sentiments:
print(f'On lexicon {lexicon}')
if 'vae' in lexicon:
scorer = lambda text, sent_data, : score_sent(text, sent_data, normalize=False)
else:
scorer = lambda text, sent_data, : score_sent(text, sent_data, normalize=False)
print('IMDB')
imdb[lexicon], imdb['y'] = gen_imdb_data(
imdb_train, sentiments[lexicon], scorer, limit_to=None
)
print('Yelp')
yelp[lexicon], yelp['y'] = gen_yelp_data(
yelp_train, sentiments[lexicon], scorer, limit_to=int(1e5), balance=True
)
print('SemEval')
semeval[lexicon], semeval['y'] = gen_semeval_data(
semeval_train, sentiments[lexicon], scorer, limit_to=None, balance=True
)
print('MultiDom')
multidom[lexicon], multidom['y'] = gen_multidom_data(
multidom_train, sentiments[lexicon], scorer, limit_to=6500
)
print('ACL3c')
acl3c[lexicon], acl3c['y'] = gen_acl_data(
peerread_acl_train, sentiments[lexicon], scorer, limit_to=248, merge=True,
)
print('ACL')
acl[lexicon], acl['y'] = gen_acl_data(
peerread_acl_train, sentiments[lexicon], scorer, limit_to=248, merge=False,
)
print('ICLR3c')
iclr3c[lexicon], iclr3c['y'] = gen_iclr_data(
peerread_iclr_train, sentiments[lexicon], scorer, limit_to=2166, merge=True,
)
print('ICLR')
iclr[lexicon], iclr['y'] = gen_iclr_data(
peerread_iclr_train, sentiments[lexicon], scorer, limit_to=2166, merge=False,
)
print('ICLR test')
iclr_testd[lexicon], iclr_testd['y'] = gen_iclr_data(
peerread_iclr_test, sentiments[lexicon], scorer, limit_to=230, merge=False,
)
print('ICLR3C test')
iclr3c_testd[lexicon], iclr3c_testd['y'] = gen_iclr_data(
peerread_iclr_test, sentiments[lexicon], scorer, limit_to=230, merge=True,
)
print('ACL test')
acl_testd[lexicon], acl_testd['y'] = gen_acl_data(
peerread_acl_test, sentiments[lexicon], scorer, limit_to=15, merge=False,
)
print('ACL3C test')
acl3c_testd[lexicon], acl3c_testd['y'] = gen_acl_data(
peerread_acl_test, sentiments[lexicon], scorer, limit_to=15, merge=True,
)
print('Multidom test')
multidom_testd[lexicon], multidom_testd['y'] = gen_multidom_data(
multidom_test, sentiments[lexicon], scorer, limit_to=1575
)
print('SemEval test')
semeval_testd[lexicon], semeval_testd['y'] = gen_semeval_data(
semeval_test, sentiments[lexicon], scorer, limit_to=None
)
print('IMDB test')
imdb_testd[lexicon], imdb_testd['y'] = gen_imdb_data(
imdb_test, sentiments[lexicon], scorer, limit_to=None
)
print('Yelp test')
yelp_testd[lexicon], yelp_testd['y'] = gen_yelp_data(
yelp_test, sentiments[lexicon], scorer, limit_to=int(1e5)#int(1199429)
)
save_object(imdb, './models/evaluations/imdb.pkl')
save_object(yelp, './models/evaluations/yelp.pkl')
save_object(semeval, './models/evaluations/semeval.pkl')
save_object(multidom, './models/evaluations/multidom.pkl')
save_object(acl3c, './models/evaluations/acl_3class.pkl')
save_object(acl, './models/evaluations/acl.pkl')
save_object(iclr3c, './models/evaluations/iclr_3class.pkl')
save_object(iclr, './models/evaluations/iclr.pkl')
save_object(iclr_testd, './models/evaluations/iclr_test.pkl')
save_object(iclr3c_testd, './models/evaluations/iclr3c_test.pkl')
save_object(acl_testd, './models/evaluations/acl_test.pkl')
save_object(acl3c_testd, './models/evaluations/acl3c_test.pkl')
save_object(multidom_testd, './models/evaluations/multidom_test.pkl')
save_object(semeval_testd, './models/evaluations/semeval_test.pkl')
save_object(yelp_testd, './models/evaluations/yelp_test.pkl')
save_object(imdb_testd, './models/evaluations/imdb_test.pkl')
else:
imdb = load_object('./models/evaluations/imdb.pkl')
yelp = load_object('./models/evaluations/yelp.pkl')
semeval = load_object('./models/evaluations/semeval.pkl')
multidom = load_object('./models/evaluations/multidom.pkl')
acl3c = load_object('./models/evaluations/acl_3class.pkl')
acl = load_object('./models/evaluations/acl.pkl')
iclr3c = load_object('./models/evaluations/iclr_3class.pkl')
iclr = load_object('./models/evaluations/iclr.pkl')
imdb_testd = load_object('./models/evaluations/imdb_test.pkl')
yelp_testd = load_object('./models/evaluations/yelp_test.pkl')
semeval_testd = load_object('./models/evaluations/semeval_test.pkl')
multidom_testd = load_object('./models/evaluations/multidom_test.pkl')
acl3c_testd = load_object('./models/evaluations/acl3c_test.pkl')
acl_testd = load_object('./models/evaluations/acl_test.pkl')
iclr3c_testd = load_object('./models/evaluations/iclr3c_test.pkl')
iclr_testd = load_object('./models/evaluations/iclr_test.pkl')
return imdb, yelp, semeval, multidom, acl3c, acl, iclr3c, iclr, \
imdb_testd, yelp_testd, semeval_testd, multidom_testd, acl3c_testd, acl_testd, iclr3c_testd, iclr_testd
def make_combined_score(sentiments, imdb, yelp, semeval, multidom, acl3c, acl, iclr3c, iclr, imdb_testd, yelp_testd, semeval_testd, multidom_testd, acl3c_testd, acl_testd, iclr3c_testd, iclr_testd):
# which datasets do we *not* want in the combined version?
exclude = ['vae_3', 'vae_5', 'vae_9', 'vader_mean', 'vader_binned', 'combined', 'combined_binned']
imdb['combined'] = np.hstack([imdb[lexicon] for lexicon in sentiments if lexicon not in exclude])
yelp['combined'] = np.hstack([yelp[lexicon] for lexicon in sentiments if lexicon not in exclude])
semeval['combined'] = np.hstack([semeval[lexicon] for lexicon in sentiments if lexicon not in exclude])
multidom['combined'] = np.hstack([multidom[lexicon] for lexicon in sentiments if lexicon not in exclude])
acl3c['combined'] = np.hstack([acl3c[lexicon] for lexicon in sentiments if lexicon not in exclude])
acl['combined'] = np.hstack([acl[lexicon] for lexicon in sentiments if lexicon not in exclude])
iclr3c['combined'] = np.hstack([iclr3c[lexicon] for lexicon in sentiments if lexicon not in exclude])
iclr['combined'] = np.hstack([iclr[lexicon] for lexicon in sentiments if lexicon not in exclude])
imdb_testd['combined'] = np.hstack([imdb_testd[lexicon] for lexicon in sentiments if lexicon not in exclude])
yelp_testd['combined'] = np.hstack([yelp_testd[lexicon] for lexicon in sentiments if lexicon not in exclude])
semeval_testd['combined'] = np.hstack([semeval_testd[lexicon] for lexicon in sentiments if lexicon not in exclude])
multidom_testd['combined'] = np.hstack([multidom_testd[lexicon] for lexicon in sentiments if lexicon not in exclude])
acl3c_testd['combined'] = np.hstack([acl3c_testd[lexicon] for lexicon in sentiments if lexicon not in exclude])
acl_testd['combined'] = np.hstack([acl_testd[lexicon] for lexicon in sentiments if lexicon not in exclude])
iclr3c_testd['combined'] = np.hstack([iclr3c_testd[lexicon] for lexicon in sentiments if lexicon not in exclude])
iclr_testd['combined'] = np.hstack([iclr_testd[lexicon] for lexicon in sentiments if lexicon not in exclude])
sentiments['combined'] = None # dummy such that it's included in iterations
return sentiments, imdb, yelp, semeval, multidom, acl3c, acl, iclr3c, iclr, \
imdb_testd, yelp_testd, semeval_testd, multidom_testd, acl3c_testd, acl_testd, iclr3c_testd, iclr_testd
def make_binned_yelp(sentiments, yelp):
yelp_binned = {}
neutral = np.where(yelp['y'] == 3)[0]
n = neutral.shape[0]
neg = np.where(np.isin(yelp['y'], [1, 2]))[0][:n]
pos = np.where(np.isin(yelp['y'], [4, 5]))[0][:n]
yelp_binned_idx = np.concatenate([neutral, pos, neg])
y = yelp['y'][yelp_binned_idx]
yelp_binned['y'] = (y == 3) * 1 + (np.isin(y, [4, 5])) * 2
#print(sentiments.keys())
for lexicon in sentiments:
#print(lexicon)
yelp_binned[lexicon] = yelp[lexicon][yelp_binned_idx]
return yelp_binned
def eval_all(sentiments, imdb, yelp, yelp_binned, semeval, multidom, acl3c, acl, iclr3c, iclr,
imdb_testd, yelp_testd, yelp3c_testd, semeval_testd, multidom_testd, acl3c_testd, acl_testd, iclr3c_testd, iclr_testd):
evaluation_data = (
('imdb', imdb, imdb_testd),
('yelp', yelp, yelp_testd),
('yelp_binned', yelp_binned, yelp3c_testd),
('semeval', semeval, semeval_testd),
('multidom', multidom, multidom_testd),
('acl3c', acl3c, acl3c_testd),
('acl', acl, acl_testd),
('iclr3c', iclr3c, iclr3c_testd),
('iclr', iclr, iclr_testd),
)
for eval_name, train_data, eval_data in evaluation_data:
print(f'{eval_name} accuracy - {len(np.unique(train_data["y"]))} classes')
for lexicon in list(sentiments.keys()):
# making splits here
#x_train, x_dev, y_train, y_dev = train_test_split(
# eval_data[lexicon],
# eval_data['y'],
# random_state=SPLIT_SEED,
# test_size=0.1,
#)
# separate test
x_train = train_data[lexicon]
y_train = train_data['y']
x_dev = eval_data[lexicon]
y_dev = eval_data['y']
# print(len(y_train))
logit = LogisticRegression()
logit.fit(x_train, y_train)
pred = logit.predict(x_dev)
metric = np.mean(pred == y_dev)
print(f'{lexicon:15}{metric:0.3f}')
print('\n')
if __name__ == '__main__':
sentiments = read_lexica()
imdb, yelp, semeval, multidom, acl3c, acl, iclr3c, iclr, imdb_testd, \
yelp_testd, semeval_testd, multidom_testd, acl3c_testd, acl_testd, iclr3c_testd, iclr_testd \
= score_sentences(sentiments)
sentiments, imdb, yelp, semeval, multidom, acl3c, acl, iclr3c, iclr, \
imdb_testd, yelp_testd, semeval_testd, multidom_testd, acl3c_testd, acl_testd, iclr3c_testd, iclr_testd \
= make_combined_score(sentiments, imdb, yelp, semeval, multidom, acl3c, acl, iclr3c, iclr, imdb_testd, yelp_testd, semeval_testd, multidom_testd, acl3c_testd, acl_testd, iclr3c_testd, iclr_testd)
yelp_binned = make_binned_yelp(sentiments, yelp)
yelp3c_testd = make_binned_yelp(sentiments, yelp_testd)
eval_all(sentiments, imdb, yelp, yelp_binned, semeval, multidom, acl3c, acl, iclr3c, iclr,
imdb_testd, yelp_testd, yelp3c_testd, semeval_testd, multidom_testd, acl3c_testd, acl_testd, iclr3c_testd, iclr_testd)
| [
"pandas.read_csv",
"numpy.hstack",
"numpy.isin",
"numpy.array",
"numpy.mean",
"os.listdir",
"numpy.where",
"sentiments.parse_senticnet",
"numpy.random.seed",
"numpy.concatenate",
"sentiments.parse_vader",
"sentiments.parse_huliu",
"json.loads",
"os.path.splitext",
"pickle.load",
"sentiments.parse_sentiwordnet",
"ijson.items",
"sentiments.parse_mpqa",
"pickle.dump",
"numpy.unique",
"os.path.join",
"sklearn.linear_model.LogisticRegression",
"numpy.sum",
"numpy.zeros",
"numpy.random.uniform",
"sentiments.parse_general_inquirer",
"numpy.zeros_like"
] | [((1874, 1894), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1888, 1894), True, 'import numpy as np\n'), ((1910, 1936), 'os.path.splitext', 'os.path.splitext', (['in_fname'], {}), '(in_fname)\n', (1926, 1936), False, 'import os\n'), ((2432, 2452), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2446, 2452), True, 'import numpy as np\n'), ((2466, 2481), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (2476, 2481), False, 'import os\n'), ((5103, 5114), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (5111, 5114), True, 'import numpy as np\n'), ((5835, 5846), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (5843, 5846), True, 'import numpy as np\n'), ((6447, 6533), 'pandas.read_csv', 'pd.read_csv', (['fpath'], {'sep': '"""\t"""', 'names': "['id', 'sent', 'text', '_']", 'encoding': '"""utf-8"""'}), "(fpath, sep='\\t', names=['id', 'sent', 'text', '_'], encoding=\n 'utf-8')\n", (6458, 6533), True, 'import pandas as pd\n'), ((6777, 6788), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (6785, 6788), True, 'import numpy as np\n'), ((7482, 7493), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (7490, 7493), True, 'import numpy as np\n'), ((8594, 8605), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (8602, 8605), True, 'import numpy as np\n'), ((17718, 17796), 'numpy.hstack', 'np.hstack', (['[imdb[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([imdb[lexicon] for lexicon in sentiments if lexicon not in exclude])\n', (17727, 17796), True, 'import numpy as np\n'), ((17820, 17898), 'numpy.hstack', 'np.hstack', (['[yelp[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([yelp[lexicon] for lexicon in sentiments if lexicon not in exclude])\n', (17829, 17898), True, 'import numpy as np\n'), ((17925, 18010), 'numpy.hstack', 'np.hstack', (['[semeval[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([semeval[lexicon] for lexicon in sentiments if lexicon not in\n exclude])\n', (17934, 18010), True, 'import numpy as np\n'), ((18034, 18120), 'numpy.hstack', 'np.hstack', (['[multidom[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([multidom[lexicon] for lexicon in sentiments if lexicon not in\n exclude])\n', (18043, 18120), True, 'import numpy as np\n'), ((18141, 18220), 'numpy.hstack', 'np.hstack', (['[acl3c[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([acl3c[lexicon] for lexicon in sentiments if lexicon not in exclude])\n', (18150, 18220), True, 'import numpy as np\n'), ((18243, 18320), 'numpy.hstack', 'np.hstack', (['[acl[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([acl[lexicon] for lexicon in sentiments if lexicon not in exclude])\n', (18252, 18320), True, 'import numpy as np\n'), ((18346, 18431), 'numpy.hstack', 'np.hstack', (['[iclr3c[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([iclr3c[lexicon] for lexicon in sentiments if lexicon not in exclude]\n )\n', (18355, 18431), True, 'import numpy as np\n'), ((18450, 18528), 'numpy.hstack', 'np.hstack', (['[iclr[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([iclr[lexicon] for lexicon in sentiments if lexicon not in exclude])\n', (18459, 18528), True, 'import numpy as np\n'), ((18558, 18646), 'numpy.hstack', 'np.hstack', (['[imdb_testd[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([imdb_testd[lexicon] for lexicon in sentiments if lexicon not in\n exclude])\n', (18567, 18646), True, 'import numpy as np\n'), ((18672, 18760), 'numpy.hstack', 'np.hstack', (['[yelp_testd[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([yelp_testd[lexicon] for lexicon in sentiments if lexicon not in\n exclude])\n', (18681, 18760), True, 'import numpy as np\n'), ((18789, 18880), 'numpy.hstack', 'np.hstack', (['[semeval_testd[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([semeval_testd[lexicon] for lexicon in sentiments if lexicon not in\n exclude])\n', (18798, 18880), True, 'import numpy as np\n'), ((18910, 19002), 'numpy.hstack', 'np.hstack', (['[multidom_testd[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([multidom_testd[lexicon] for lexicon in sentiments if lexicon not in\n exclude])\n', (18919, 19002), True, 'import numpy as np\n'), ((19029, 19118), 'numpy.hstack', 'np.hstack', (['[acl3c_testd[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([acl3c_testd[lexicon] for lexicon in sentiments if lexicon not in\n exclude])\n', (19038, 19118), True, 'import numpy as np\n'), ((19143, 19230), 'numpy.hstack', 'np.hstack', (['[acl_testd[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([acl_testd[lexicon] for lexicon in sentiments if lexicon not in\n exclude])\n', (19152, 19230), True, 'import numpy as np\n'), ((19258, 19348), 'numpy.hstack', 'np.hstack', (['[iclr3c_testd[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([iclr3c_testd[lexicon] for lexicon in sentiments if lexicon not in\n exclude])\n', (19267, 19348), True, 'import numpy as np\n'), ((19374, 19462), 'numpy.hstack', 'np.hstack', (['[iclr_testd[lexicon] for lexicon in sentiments if lexicon not in exclude]'], {}), '([iclr_testd[lexicon] for lexicon in sentiments if lexicon not in\n exclude])\n', (19383, 19462), True, 'import numpy as np\n'), ((20001, 20036), 'numpy.concatenate', 'np.concatenate', (['[neutral, pos, neg]'], {}), '([neutral, pos, neg])\n', (20015, 20036), True, 'import numpy as np\n'), ((1616, 1635), 'pickle.dump', 'pickle.dump', (['obj', 'o'], {}), '(obj, o)\n', (1627, 1635), False, 'import pickle\n'), ((1761, 1775), 'pickle.load', 'pickle.load', (['i'], {}), '(i)\n', (1772, 1775), False, 'import pickle\n'), ((7802, 7837), 'os.path.join', 'os.path.join', (['dir', '"""reviews"""', 'fname'], {}), "(dir, 'reviews', fname)\n", (7814, 7837), False, 'import os\n'), ((7901, 7926), 'ijson.items', 'ijson.items', (['f', '"""reviews"""'], {}), "(f, 'reviews')\n", (7912, 7926), False, 'import ijson\n'), ((8977, 9012), 'os.path.join', 'os.path.join', (['dir', '"""reviews"""', 'fname'], {}), "(dir, 'reviews', fname)\n", (8989, 9012), False, 'import os\n'), ((9076, 9101), 'ijson.items', 'ijson.items', (['f', '"""reviews"""'], {}), "(f, 'reviews')\n", (9087, 9101), False, 'import ijson\n'), ((19817, 19841), 'numpy.where', 'np.where', (["(yelp['y'] == 3)"], {}), "(yelp['y'] == 3)\n", (19825, 19841), True, 'import numpy as np\n'), ((2742, 2776), 'os.path.join', 'os.path.join', (['dir', 'fold', 'datasplit'], {}), '(dir, fold, datasplit)\n', (2754, 2776), False, 'import os\n'), ((2795, 2847), 'os.path.join', 'os.path.join', (['dir', '"""multi-domain-sentiment_indomain"""'], {}), "(dir, 'multi-domain-sentiment_indomain')\n", (2807, 2847), False, 'import os\n'), ((5278, 5294), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (5288, 5294), False, 'import json\n'), ((9933, 9959), 'numpy.array', 'np.array', (['sent_data[token]'], {}), '(sent_data[token])\n', (9941, 9959), True, 'import numpy as np\n'), ((10469, 10523), 'sentiments.parse_sentiwordnet', 'parse_sentiwordnet', (["FPATHS['sentiwordnet']"], {'group': '(True)'}), "(FPATHS['sentiwordnet'], group=True)\n", (10487, 10523), False, 'from sentiments import parse_sentiwordnet, parse_mpqa, parse_senticnet, parse_vader, parse_huliu, parse_general_inquirer, parse_vae, FPATHS\n'), ((10555, 10581), 'sentiments.parse_mpqa', 'parse_mpqa', (["FPATHS['mpqa']"], {}), "(FPATHS['mpqa'])\n", (10565, 10581), False, 'from sentiments import parse_sentiwordnet, parse_mpqa, parse_senticnet, parse_vader, parse_huliu, parse_general_inquirer, parse_vae, FPATHS\n'), ((10618, 10654), 'sentiments.parse_senticnet', 'parse_senticnet', (["FPATHS['senticnet']"], {}), "(FPATHS['senticnet'])\n", (10633, 10654), False, 'from sentiments import parse_sentiwordnet, parse_mpqa, parse_senticnet, parse_vader, parse_huliu, parse_general_inquirer, parse_vae, FPATHS\n'), ((10687, 10716), 'sentiments.parse_huliu', 'parse_huliu', (["*FPATHS['huliu']"], {}), "(*FPATHS['huliu'])\n", (10698, 10716), False, 'from sentiments import parse_sentiwordnet, parse_mpqa, parse_senticnet, parse_vader, parse_huliu, parse_general_inquirer, parse_vae, FPATHS\n'), ((10746, 10796), 'sentiments.parse_general_inquirer', 'parse_general_inquirer', (["FPATHS['general_inquirer']"], {}), "(FPATHS['general_inquirer'])\n", (10768, 10796), False, 'from sentiments import parse_sentiwordnet, parse_mpqa, parse_senticnet, parse_vader, parse_huliu, parse_general_inquirer, parse_vae, FPATHS\n'), ((10835, 10880), 'sentiments.parse_vader', 'parse_vader', (["FPATHS['vader']"], {'group_mean': '(True)'}), "(FPATHS['vader'], group_mean=True)\n", (10846, 10880), False, 'from sentiments import parse_sentiwordnet, parse_mpqa, parse_senticnet, parse_vader, parse_huliu, parse_general_inquirer, parse_vae, FPATHS\n'), ((10919, 10971), 'sentiments.parse_vader', 'parse_vader', (["FPATHS['vader']"], {'group_multinomial': '(True)'}), "(FPATHS['vader'], group_multinomial=True)\n", (10930, 10971), False, 'from sentiments import parse_sentiwordnet, parse_mpqa, parse_senticnet, parse_vader, parse_huliu, parse_general_inquirer, parse_vae, FPATHS\n'), ((20112, 20130), 'numpy.isin', 'np.isin', (['y', '[4, 5]'], {}), '(y, [4, 5])\n', (20119, 20130), True, 'import numpy as np\n'), ((21604, 21624), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (21622, 21624), False, 'from sklearn.linear_model import LogisticRegression\n'), ((21726, 21748), 'numpy.mean', 'np.mean', (['(pred == y_dev)'], {}), '(pred == y_dev)\n', (21733, 21748), True, 'import numpy as np\n'), ((2225, 2244), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2242, 2244), True, 'import numpy as np\n'), ((4618, 4648), 'os.path.join', 'os.path.join', (['dir', 'sent', 'fname'], {}), '(dir, sent, fname)\n', (4630, 4648), False, 'import os\n'), ((6971, 6992), 'numpy.sum', 'np.sum', (['(y == row.sent)'], {}), '(y == row.sent)\n', (6977, 6992), True, 'import numpy as np\n'), ((19889, 19915), 'numpy.isin', 'np.isin', (["yelp['y']", '[1, 2]'], {}), "(yelp['y'], [1, 2])\n", (19896, 19915), True, 'import numpy as np\n'), ((19943, 19969), 'numpy.isin', 'np.isin', (["yelp['y']", '[4, 5]'], {}), "(yelp['y'], [4, 5])\n", (19950, 19969), True, 'import numpy as np\n'), ((4234, 4258), 'os.path.join', 'os.path.join', (['dir', '"""pos"""'], {}), "(dir, 'pos')\n", (4246, 4258), False, 'import os\n'), ((4327, 4351), 'os.path.join', 'os.path.join', (['dir', '"""neg"""'], {}), "(dir, 'neg')\n", (4339, 4351), False, 'import os\n'), ((5383, 5409), 'numpy.sum', 'np.sum', (["(y == data['stars'])"], {}), "(y == data['stars'])\n", (5389, 5409), True, 'import numpy as np\n'), ((7396, 7424), 'os.path.join', 'os.path.join', (['dir', '"""reviews"""'], {}), "(dir, 'reviews')\n", (7408, 7424), False, 'import os\n'), ((8507, 8535), 'os.path.join', 'os.path.join', (['dir', '"""reviews"""'], {}), "(dir, 'reviews')\n", (8519, 8535), False, 'import os\n'), ((9780, 9804), 'numpy.zeros_like', 'np.zeros_like', (['test_sent'], {}), '(test_sent)\n', (9793, 9804), True, 'import numpy as np\n'), ((3535, 3554), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3552, 3554), True, 'import numpy as np\n'), ((21024, 21050), 'numpy.unique', 'np.unique', (["train_data['y']"], {}), "(train_data['y'])\n", (21033, 21050), True, 'import numpy as np\n')] |
from ctypes import c_long
from threading import Thread
from comtypes.automation import BSTR
from comtypes.client import CreateObject
from numpy import array
from corems.encapsulation.factory.parameters import default_parameters
from corems.encapsulation.constant import Labels
from corems.mass_spectra.factory.LC_Class import LCMSBase
from corems.mass_spectrum.factory.MassSpectrumClasses import MassSpecProfile
__author__ = "<NAME>"
__date__ = "July 10, 2019"
class ImportLCMSBrukerCompassXtract(Thread):
'''class docs'''
def __init__(self, file_location, auto_process=True):
Thread.__init__(self)
self.lcms = LCMSBase(file_location)
"""Set up the COM object interface"""
self.Bruker_Library = CreateObject("EDAL.MSAnalysis")
self.res = self.Bruker_Library.Open(file_location)
self.check_load_sucess()
self._initial_scan_number = 1
self._final_scan_number = self.get_scans_numbers()
self.file_location = file_location
self.auto_process = auto_process
@property
def initial_scan_number(self):
return self._initial_scan_number
@property
def final_scan_number(self):
return self._final_scan_number
def check_scan(self, scan):
scan_numbers = self.get_scans_numbers()
return scan <= scan_numbers
@initial_scan_number.setter
def initial_scan_number(self, initial_scan_number):
if self.check_scan(initial_scan_number):
self._initial_scan_number = initial_scan_number
else:
raise Exception(
"startscan and finalscan should be less than %s"
% self.get_scans_numbers()
)
@final_scan_number.setter
def final_scan_number(self, final_scan_number):
if self.check_scan(final_scan_number):
self._final_scan_number = final_scan_number
else:
raise Exception(
"startscan and finalscan should be less than %s"
% self.get_scans_numbers()
)
def get_scans_numbers(self):
scan_numbers = self.Bruker_Library.MSSpectrumCollection.Count
return scan_numbers
def get_polarity_mode(self, spectrum):
polarity_symbol = spectrum.Polarity
if polarity_symbol == 0:
return 1
# return "POSITIVE_ION_MODE"
elif polarity_symbol == 1:
return -1
# return "NEGATIVE_ION_MODE"
else:
raise IOError("Could not read mass spectrum polarity mode")
def check_load_sucess(self):
""" 0 if successful; otherwise, see Error Codes """
if self.res == 0:
self.break_it = False
else:
raise ImportError(str(self.res))
def get_bruker_tics(self):
strAnalysisData = BSTR("SumIntensity")
if self.Bruker_Library.HasAnalysisData(strAnalysisData):
tics_array = self.Bruker_Library.GetAnalysisData(strAnalysisData)
tics_array = array(tics_array)
return tics_array
def get_bruker_retention_time(self):
strAnalysisData = BSTR("RetentionTime")
if self.Bruker_Library.HasAnalysisData(strAnalysisData):
tics_array = self.Bruker_Library.GetAnalysisData(strAnalysisData)
tics_array = array(tics_array)
else:
tics_array = [0]
return tics_array
@staticmethod
def get_data(spectra, scan):
"""init_variable_from_get_spectrums
# massList set up later
#retention_time = spectrum.RetentionTime
"""
spectrum = spectra[scan]
is_profile = c_long(1)
masslist = spectrum.GetMassIntensityValues(is_profile)
# index_to_cut = self.find_index_of_mass(1200, masslist[0])
data_dict = {
Labels.mz: array(masslist[0]),
Labels.abundance: array(masslist[1]),
Labels.rp: None,
Labels.s2n: None,
}
return data_dict
def run(self):
'''creates the lcms obj'''
d_parameters = default_parameters(self.file_location)
self._import_mass_spectra(d_parameters)
def _import_mass_spectra(self, d_params):
spectra = self.Bruker_Library.MSSpectrumCollection
list_rt = self.get_bruker_retention_time()
list_Tics = self.get_bruker_tics()
list_scans = list()
for scan_number in range(self.initial_scan_number, self.final_scan_number + 1):
if spectra[scan_number].MSMSStage == 1:
# this label needs to go inside a encapsulation class for consistence
d_params["label"] = Labels.bruker_profile
d_params["polarity"] = self.get_polarity_mode(spectra[scan_number])
d_params["rt"] = list_rt[scan_number - 1]
d_params["scan_number"] = scan_number
list_scans.append(scan_number)
data_dict = self.get_data(spectra, scan_number)
mass_spec = MassSpecProfile(data_dict, d_params, auto_process=self.auto_process)
mass_spec.process_mass_spec()
self.lcms.add_mass_spectrum(mass_spec)
self.lcms.retention_time = list_rt
self.lcms.tic = list_Tics
self.lcms.scans_number = list_scans
# return each_mass_spectrum
def get_lcms_obj(self):
"""get_lc_ms_class method should only be used when using this class as a Thread,
otherwise use the run() method to return the lcms class"""
if self.lcms.get(self._initial_scan_number):
return self.lcms
else:
raise Exception("returning a empty lcms class")
| [
"threading.Thread.__init__",
"comtypes.client.CreateObject",
"ctypes.c_long",
"corems.encapsulation.factory.parameters.default_parameters",
"corems.mass_spectrum.factory.MassSpectrumClasses.MassSpecProfile",
"numpy.array",
"comtypes.automation.BSTR",
"corems.mass_spectra.factory.LC_Class.LCMSBase"
] | [((604, 625), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (619, 625), False, 'from threading import Thread\n'), ((647, 670), 'corems.mass_spectra.factory.LC_Class.LCMSBase', 'LCMSBase', (['file_location'], {}), '(file_location)\n', (655, 670), False, 'from corems.mass_spectra.factory.LC_Class import LCMSBase\n'), ((748, 779), 'comtypes.client.CreateObject', 'CreateObject', (['"""EDAL.MSAnalysis"""'], {}), "('EDAL.MSAnalysis')\n", (760, 779), False, 'from comtypes.client import CreateObject\n'), ((2854, 2874), 'comtypes.automation.BSTR', 'BSTR', (['"""SumIntensity"""'], {}), "('SumIntensity')\n", (2858, 2874), False, 'from comtypes.automation import BSTR\n'), ((3160, 3181), 'comtypes.automation.BSTR', 'BSTR', (['"""RetentionTime"""'], {}), "('RetentionTime')\n", (3164, 3181), False, 'from comtypes.automation import BSTR\n'), ((3686, 3695), 'ctypes.c_long', 'c_long', (['(1)'], {}), '(1)\n', (3692, 3695), False, 'from ctypes import c_long\n'), ((4118, 4156), 'corems.encapsulation.factory.parameters.default_parameters', 'default_parameters', (['self.file_location'], {}), '(self.file_location)\n', (4136, 4156), False, 'from corems.encapsulation.factory.parameters import default_parameters\n'), ((3046, 3063), 'numpy.array', 'array', (['tics_array'], {}), '(tics_array)\n', (3051, 3063), False, 'from numpy import array\n'), ((3353, 3370), 'numpy.array', 'array', (['tics_array'], {}), '(tics_array)\n', (3358, 3370), False, 'from numpy import array\n'), ((3875, 3893), 'numpy.array', 'array', (['masslist[0]'], {}), '(masslist[0])\n', (3880, 3893), False, 'from numpy import array\n'), ((3925, 3943), 'numpy.array', 'array', (['masslist[1]'], {}), '(masslist[1])\n', (3930, 3943), False, 'from numpy import array\n'), ((5064, 5132), 'corems.mass_spectrum.factory.MassSpectrumClasses.MassSpecProfile', 'MassSpecProfile', (['data_dict', 'd_params'], {'auto_process': 'self.auto_process'}), '(data_dict, d_params, auto_process=self.auto_process)\n', (5079, 5132), False, 'from corems.mass_spectrum.factory.MassSpectrumClasses import MassSpecProfile\n')] |
from app import db
from base import Base
class Unit(db.Model, Base):
"""A model representing a unit (or segment) of text.
This can be either a section or chapter of a document, an act in a play, or
anything that is made of sentences.
Units are hierarchical; one unit can contain many children units.
Attributes:
type (str): the unit type (document, section, etc.).
number (int): a sequencing number (e.g. 2 for chapter 2).
parent (Unit): The ``Unit`` that owns this ``Unit``.
children (list of Units): ``Unit``s that this ``Unit`` owns.
sentences (list of Sentences): ``Sentences`` found in this ``Unit``.
properties (list of Properties): Metadata for this ``Unit``.
name (str): A name for this ``Unit``.
Relationships:
has one: parent
has many: children (Unit), sentences, properties
"""
# Attributes
# We need to redefine ID here for the children relationship
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(64))
number = db.Column(db.Integer)
parent_id = db.Column(db.Integer, db.ForeignKey("unit.id", ondelete='CASCADE'))
name = db.Column(db.String)
project_id = db.Column(db.Integer, db.ForeignKey("project.id", ondelete='CASCADE'))
# Relationships
children = db.relationship(
"Unit",
backref=db.backref("parent", remote_side=[id]))
sentences = db.relationship(
"Sentence",
backref="unit",
cascade="all, delete-orphan",
passive_deletes=True)
properties = db.relationship(
"Property",
backref="unit",
lazy="dynamic")
__mapper_args__ = {
"polymorphic_identity": "unit",
"polymorphic_on": type
}
def property(self, name):
"""Look up a property by its name.
:param str name: The name of the property
"""
return self.properties.filter_by(name=name).first()
def __repr__(self):
"""Return a string representation of a unit, which is its type followed
by its ordering number.
"""
return "<Unit: " + " ".join([str(self.type),
str(self.number)]) + ">"
| [
"app.db.String",
"app.db.backref",
"app.db.Column",
"app.db.ForeignKey",
"app.db.relationship"
] | [((985, 1024), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (994, 1024), False, 'from app import db\n'), ((1074, 1095), 'app.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (1083, 1095), False, 'from app import db\n'), ((1191, 1211), 'app.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (1200, 1211), False, 'from app import db\n'), ((1442, 1541), 'app.db.relationship', 'db.relationship', (['"""Sentence"""'], {'backref': '"""unit"""', 'cascade': '"""all, delete-orphan"""', 'passive_deletes': '(True)'}), "('Sentence', backref='unit', cascade='all, delete-orphan',\n passive_deletes=True)\n", (1457, 1541), False, 'from app import db\n'), ((1591, 1650), 'app.db.relationship', 'db.relationship', (['"""Property"""'], {'backref': '"""unit"""', 'lazy': '"""dynamic"""'}), "('Property', backref='unit', lazy='dynamic')\n", (1606, 1650), False, 'from app import db\n'), ((1046, 1059), 'app.db.String', 'db.String', (['(64)'], {}), '(64)\n', (1055, 1059), False, 'from app import db\n'), ((1134, 1178), 'app.db.ForeignKey', 'db.ForeignKey', (['"""unit.id"""'], {'ondelete': '"""CASCADE"""'}), "('unit.id', ondelete='CASCADE')\n", (1147, 1178), False, 'from app import db\n'), ((1251, 1298), 'app.db.ForeignKey', 'db.ForeignKey', (['"""project.id"""'], {'ondelete': '"""CASCADE"""'}), "('project.id', ondelete='CASCADE')\n", (1264, 1298), False, 'from app import db\n'), ((1386, 1424), 'app.db.backref', 'db.backref', (['"""parent"""'], {'remote_side': '[id]'}), "('parent', remote_side=[id])\n", (1396, 1424), False, 'from app import db\n')] |
import numpy as np
import imageio
import os
from shapely.geometry import Polygon
from scipy import ndimage
from skimage.transform import resize
from tensorflow.keras.utils import Sequence
from hds_module.utils import get_file_paths, path_leaf
# Normalize image by pixel depth by making it white on black instead of black on white
def normalize_image(image_file, pixel_depth):
try:
array = imageio.imread(image_file)
except ValueError:
raise
return 1.0 - (array.astype(float))/pixel_depth # (1 - x) will make it white on black
# Restore original image from normalized image
def unnormalize_image(image, pixel_depth):
return (pixel_depth*image).astype(np.uint8)
def is_correct_shape_to_process(shapeDir, correct_shape):
shape = os.path.basename(shapeDir)
if shape == correct_shape:
return True
return False
def replace_last(source_string, replace_what, replace_with):
head, _sep, tail = source_string.rpartition(replace_what)
return head + replace_with + tail
def find_lefmost_index_in_subrect(vertices, x_from, y_from, x_to_excluded, y_to_excluded, vertice_count):
count = 0
min_x = 1
leftmost_index = 0
very_close_range = 0.03
for index, vertice in enumerate(vertices):
if vertice[0] >= x_from and vertice[0] < x_to_excluded and \
vertice[1] >= y_from and vertice[1] < y_to_excluded and \
index < vertice_count:
count += 1
if vertice[0] < min_x:
min_x = vertice[0]
leftmost_index = index
# If another vertices is very close to the leftmost one, pick the lowest one in the image (higher y)
for index, vertice in enumerate(vertices):
if index != leftmost_index and abs(vertice[0] - min_x) <= very_close_range and vertice[1] > y_of_min_x:
min_x = vertice[0]
y_of_min_x = vertice[1]
leftmost_index = index
return (count, leftmost_index)
def find_nearest_index(vertices, x_from, y_from, vertice_count):
"""
Find the index of the vertex that is the closest to specified
coordinate (x_from, y_from).
"""
min_distance_squared = 1000000
nearest_index = 0
x_from *= 1000
y_from *= 1000
for index, vertice in enumerate(vertices):
dx = (vertice[0] * 1000) - x_from
dy = (vertice[1] * 1000) - y_from
square_dist = dx*dx + dy*dy
if square_dist < min_distance_squared and index < vertice_count:
nearest_index = index
min_distance_squared = square_dist
return nearest_index
def select_first_vertice_index(vertices, vertice_count, x_pos, y_pos):
"""
The first vertex will be the vertex that is closest to the anchor point
specified as (x_pos, y_pos)
"""
nearest_index = find_nearest_index(vertices, x_pos, y_pos, vertice_count)
return nearest_index
def sort_vertices_clockwize(vertices, first_vertice_index, vertice_count):
"""
Sort the vertices by navigating clockwise starting with the first vertex
"""
vertices_sorted = np.zeros(vertices.shape)
first_vertice_angle = 0
smaller_vertices = [] # contains an aray of tuple (index, angle) where the angle is smaller to the 1st vertice
bigger_vertices = [] # contains an aray of tuple (index, angle) where the angle is bigger to the 1st vertice
for index, vertice in enumerate(vertices):
if index < vertice_count:
vertice_angle = np.arctan2(0.5 - vertice[1], vertice[0] - 0.5) * 180 / np.pi
if vertice_angle < 0:
vertice_angle = 360 + vertice_angle
if index == first_vertice_index:
first_vertice_angle = vertice_angle
break
for index, vertice in enumerate(vertices):
if index < vertice_count:
vertice_angle = np.arctan2(0.5 - vertice[1], vertice[0] - 0.5) * 180 / np.pi
if vertice_angle < 0:
vertice_angle = 360 + vertice_angle
if index != first_vertice_index:
if vertice_angle < first_vertice_angle:
smaller_vertices.append((index, vertice_angle))
else:
bigger_vertices.append((index, vertice_angle))
# Ordered (clockwise) vertices that we need will be composed of:
# 1. The first vertice
# 2. The smaller vertice from the biggest angle to the smallest angle (0)
# 2. The bigger vertice from the biggest angle to the smallest angle (first_vertice_angle)
vertices_ordered = [vertices[first_vertice_index]]
smaller_vertices.sort(key=lambda x: x[1], reverse=True)
bigger_vertices.sort( key=lambda x: x[1], reverse=True)
for index, vertice_angle in smaller_vertices:
vertices_ordered.append(vertices[index])
for index, vertice_angle in bigger_vertices:
vertices_ordered.append(vertices[index])
for i in range(0, vertice_count):
vertices_sorted[i] = vertices_ordered[i]
return vertices_sorted
def load_images_for_shape(root, pixel_depth, user_images,
user_images_labels, user_images_paths,
min_nimages=1,
vertice_count=4,
x_pos=0.2, y_pos=1.0,
verbose=False):
"""
Load images and vertices for a specific user and shape.
"""
if verbose:
print("root for load_images_for_shape: ", root)
image_files = get_file_paths(root)
image_index = 0
for image_file in image_files:
try:
if path_leaf(image_file).startswith('.'): # skip files like .DSStore
continue
# Make sure that the corresponding vertice file exists
vertice_file = replace_last(image_file, "/images/", "/vertices/")
vertice_file = replace_last(vertice_file, ".png", ".csv")
if os.path.exists(vertice_file) == False:
raise FileNotFoundError(vertice_file)
# Load Vertices file as points
vertices = np.loadtxt(vertice_file, delimiter=",") #, max_rows=3)
# Re-order the vertices
first_vertice_index = select_first_vertice_index(vertices, vertice_count=vertice_count, x_pos=x_pos, y_pos=y_pos)
vertices_sorted = sort_vertices_clockwize(vertices, first_vertice_index=first_vertice_index, vertice_count=vertice_count)
vertices = vertices_sorted.ravel()
vertices = vertices.reshape(-1)
vertices = vertices[:vertice_count*2] # *2 because x and y are separate
image_data_all_channels = normalize_image(image_file, pixel_depth)
image_data = image_data_all_channels[:, :, 0]
user_images.append(image_data)
user_images_labels.append(vertices)
image_index += 1
except Exception as error:
print(error)
print('Skipping because of not being able to read: ', image_file)
if image_index < min_nimages:
raise Exception('Fewer images than expected: %d < %d' % (image_index, min_nimages))
# Coordinates translations
def cartesian_to_polar(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def polar_to_cartesian(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
# Calculate Accuracy
def calculate_IOU(label, pred, nb_vertices=4):
y_polygon = Polygon(label.reshape(nb_vertices, 2))
pred_polygon= Polygon(pred.reshape(nb_vertices, 2))
I = y_polygon.intersection(pred_polygon).area
U = y_polygon.union(pred_polygon).area
IOU = I / U
return IOU
def calculate_Dice(label, pred, nb_vertices=4):
y_polygon = Polygon(label.reshape(nb_vertices, 2))
pred_polygon= Polygon(pred.reshape(nb_vertices, 2))
I = y_polygon.intersection(pred_polygon).area
U = y_polygon.union(pred_polygon).area
dice = 2 * I / (y_polygon.area + pred_polygon.area)
return dice
def calculate_Dice_for_set(Y, Y_pred, nb_vertices):
nb_samples = Y.shape[0]
dice_sum = 0.0
valid_shapes_count = 0
for i in range(nb_samples):
try:
dice = calculate_Dice(Y[i], Y_pred[i], nb_vertices=nb_vertices)
dice_sum += dice
valid_shapes_count += 1
except Exception as e:
print(f"Cannot compute Dice for shape: {i}.")
return dice_sum / valid_shapes_count
class HdsDataGenerator(Sequence):
"""
Generates data for Keras while modifying the labels
as the images are flipped or rotated.
"""
def __init__(self, list_IDs, x_set, y_set, batch_size=32, dim=(70, 70), n_channels=1, n_vertices=4, x_pos=0.0, y_pos=0.65, shuffle=True):
"""
Initialization
"""
self.dim = dim
self.im_size = dim[0]
self.batch_size = batch_size
self.x_set = x_set
self.y_set = y_set
self.list_IDs = list_IDs
self.n_channels = n_channels
self.n_vertices = n_vertices
self.x_pos = x_pos
self.y_pos = y_pos
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
"""
Denotes the number of batches per epoch.
"""
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
"""
Generate one batch of data
"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
"""
Updates indexes after each epoch
"""
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def rotate_vertices(self, label, angle, padding_is_used, padding):
"""
Rotate vertices counter-clockwize in degres
"""
nb_label = len(label)
label_new = np.array(label, copy=True)
# rotate each point by angle
vertices = label_new.reshape((-1, 2))
for index, vertice in enumerate(vertices):
x = vertice[0]
y = vertice[1]
dx = x - 0.5
dy = y - 0.5
current_angle = np.arctan2(-dy, dx) * 180 / np.pi # Note Y is reversed
current_length = np.sqrt(dx*dx + dy*dy)
new_angle = current_angle + angle
new_rad_angle = new_angle * np.pi / 180
new_length = current_length
if padding_is_used:
new_length *= (1 - 2*padding/self.im_size)
new_dx, new_dy = polar_to_cartesian(new_length, new_rad_angle)
new_x = 0.5 + new_dx
new_y = 0.5 - new_dy # Note Y is de-reversed
label_new[2*index + 0] = new_x
label_new[2*index + 1] = new_y
return label_new
def shift_vertices(self, label, offset_h_px, offset_v_px):
label_new = np.array(label, copy=True)
vertices = label_new.reshape((-1, 2))
for index, vertice in enumerate(vertices):
x = vertice[0]
y = vertice[1]
new_x = x + offset_h_px/self.im_size
new_y = y + offset_v_px/self.im_size
label_new[2*index + 0] = new_x
label_new[2*index + 1] = new_y
return label_new
def get_margin_values(self, image_data):
image_size = self.im_size
# TODO Perform an analysis on the number of padding pixel.
# All black on the left, top, right or bottom.
# For each image Determine the smallest black margin
im = image_data.reshape((image_size, image_size))
mat_rows = np.all(im == 0, axis=1)
mat_cols = np.all(im == 0, axis=0)
rows = np.argwhere(mat_rows == False)
cols = np.argwhere(mat_cols == False)
top_margin = 0
bottom_margin = 0
left_margin = 0
right_margin = 0
if len(rows) > 0 and len(cols) > 0:
top_margin = rows[0]
bottom_margin = image_size - 1 - rows[-1]
left_margin = cols[0]
right_margin = image_size - 1 - cols[-1]
return top_margin, right_margin, bottom_margin, left_margin
def get_margin(self, image_data):
top_margin, right_margin, bottom_margin, left_margin = self.get_margin_values(image_data)
margin = [0]
if top_margin != 0 or right_margin != 0 or bottom_margin != 0 or left_margin != 0:
margin = min(top_margin + bottom_margin, left_margin + right_margin)
return margin[0]
def rotate_image(self, image, angle):
rotated_image = ndimage.rotate(image, angle, reshape=False)
cleaned_image = self.cleanup_image(rotated_image)
return cleaned_image
def center_image(self, image):
top_margin, right_margin, bottom_margin, left_margin = self.get_margin_values(image)
target_h_margin = (left_margin + right_margin) // 2
target_v_margin = (top_margin + bottom_margin) // 2
offset_h = target_h_margin - left_margin
offset_v = target_v_margin - top_margin
cleaned_image = image
if offset_h != 0 or offset_v != 0:
centered_image = ndimage.shift(image, (offset_v,offset_h))
cleaned_image = self.cleanup_image(centered_image)
return cleaned_image, offset_h, offset_v
def cleanup_image(self, image):
img = image.reshape(self.dim)
img[img > 1] = 1
img[img < 0.1] = 0
return img
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim, self.n_channels))
y = np.empty((self.batch_size, self.n_vertices * 2))
indices_lr = np.random.choice(1000, self.batch_size, replace=False)
indices_ud = np.random.choice(1000, self.batch_size, replace=False)
indices_ro = np.random.choice(380, self.batch_size, replace=False)
# Generate data
for i, ID in enumerate(list_IDs_temp):
image = np.array(self.x_set[ID], copy=True)
label = np.array(self.y_set[ID], copy=True)
is_modified = False
# Perform modification on both X (the image) and y (the vertices)
if (indices_lr[i] < 450): # Flip Left - Right with probability 450/1000
image = np.fliplr(image)
for m in range(len(label)):
if m % 2 == 0:
label[m] = 1 - label[m]
is_modified = True
if (indices_ud[i] < 450): # Flip Up - Down with probability 450/1000
image = np.flipud(image)
nb_label = len(label)
for m in range(nb_label):
if m % 2 == 1:
label[m] = 1 - label[m]
is_modified = True
# Rotate counter-clockwize
if indices_ro[i] < 360: # Rotate with probability 360/380
angle = indices_ro[i]
margin_before = self.get_margin(image)
image = self.rotate_image(image, angle)
margin_rotated = self.get_margin(image)
padding = int((margin_before - margin_rotated) / 2)
padding_is_used = False
image = image.reshape(self.dim)
if margin_rotated < 10 and padding > 0:
image = np.pad(image, ((padding, padding), (padding, padding)), mode='constant')
image = resize(image, self.dim, anti_aliasing=True)
padding_is_used = True
margin_padded = self.get_margin(image)
label = self.rotate_vertices(label, angle, padding_is_used, padding)
image, offset_h_px, offset_v_px = self.center_image(image)
if offset_h_px != 0 or offset_v_px != 0:
label = self.shift_vertices(label, offset_h_px, offset_v_px)
image = image.reshape((self.im_size, self.im_size, 1))
is_modified = True
if is_modified:
# re-order the vertices in the labels
vertices = label.reshape((self.n_vertices, 2))
first_vertice_index = select_first_vertice_index(vertices, vertice_count=self.n_vertices, x_pos=self.x_pos, y_pos=self.y_pos)
vertices_sorted = sort_vertices_clockwize(vertices, first_vertice_index=first_vertice_index,
vertice_count=self.n_vertices)
label = vertices_sorted.reshape((self.n_vertices * 2,))
X[i,] = image
y[i] = label
return X, y
| [
"numpy.sqrt",
"numpy.array",
"numpy.loadtxt",
"numpy.arctan2",
"numpy.sin",
"scipy.ndimage.rotate",
"os.path.exists",
"numpy.empty",
"hds_module.utils.path_leaf",
"numpy.flipud",
"numpy.random.choice",
"numpy.fliplr",
"numpy.cos",
"imageio.imread",
"skimage.transform.resize",
"hds_module.utils.get_file_paths",
"scipy.ndimage.shift",
"numpy.zeros",
"numpy.argwhere",
"numpy.pad",
"os.path.basename",
"numpy.all",
"numpy.random.shuffle"
] | [((769, 795), 'os.path.basename', 'os.path.basename', (['shapeDir'], {}), '(shapeDir)\n', (785, 795), False, 'import os\n'), ((3090, 3114), 'numpy.zeros', 'np.zeros', (['vertices.shape'], {}), '(vertices.shape)\n', (3098, 3114), True, 'import numpy as np\n'), ((5495, 5515), 'hds_module.utils.get_file_paths', 'get_file_paths', (['root'], {}), '(root)\n', (5509, 5515), False, 'from hds_module.utils import get_file_paths, path_leaf\n'), ((7209, 7233), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (7216, 7233), True, 'import numpy as np\n'), ((7240, 7256), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (7250, 7256), True, 'import numpy as np\n'), ((404, 430), 'imageio.imread', 'imageio.imread', (['image_file'], {}), '(image_file)\n', (418, 430), False, 'import imageio\n'), ((7327, 7338), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (7333, 7338), True, 'import numpy as np\n'), ((7353, 7364), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (7359, 7364), True, 'import numpy as np\n'), ((10178, 10204), 'numpy.array', 'np.array', (['label'], {'copy': '(True)'}), '(label, copy=True)\n', (10186, 10204), True, 'import numpy as np\n'), ((11178, 11204), 'numpy.array', 'np.array', (['label'], {'copy': '(True)'}), '(label, copy=True)\n', (11186, 11204), True, 'import numpy as np\n'), ((11912, 11935), 'numpy.all', 'np.all', (['(im == 0)'], {'axis': '(1)'}), '(im == 0, axis=1)\n', (11918, 11935), True, 'import numpy as np\n'), ((11955, 11978), 'numpy.all', 'np.all', (['(im == 0)'], {'axis': '(0)'}), '(im == 0, axis=0)\n', (11961, 11978), True, 'import numpy as np\n'), ((11995, 12025), 'numpy.argwhere', 'np.argwhere', (['(mat_rows == False)'], {}), '(mat_rows == False)\n', (12006, 12025), True, 'import numpy as np\n'), ((12041, 12071), 'numpy.argwhere', 'np.argwhere', (['(mat_cols == False)'], {}), '(mat_cols == False)\n', (12052, 12071), True, 'import numpy as np\n'), ((12884, 12927), 'scipy.ndimage.rotate', 'ndimage.rotate', (['image', 'angle'], {'reshape': '(False)'}), '(image, angle, reshape=False)\n', (12898, 12927), False, 'from scipy import ndimage\n'), ((13964, 14019), 'numpy.empty', 'np.empty', (['(self.batch_size, *self.dim, self.n_channels)'], {}), '((self.batch_size, *self.dim, self.n_channels))\n', (13972, 14019), True, 'import numpy as np\n'), ((14032, 14080), 'numpy.empty', 'np.empty', (['(self.batch_size, self.n_vertices * 2)'], {}), '((self.batch_size, self.n_vertices * 2))\n', (14040, 14080), True, 'import numpy as np\n'), ((14102, 14156), 'numpy.random.choice', 'np.random.choice', (['(1000)', 'self.batch_size'], {'replace': '(False)'}), '(1000, self.batch_size, replace=False)\n', (14118, 14156), True, 'import numpy as np\n'), ((14178, 14232), 'numpy.random.choice', 'np.random.choice', (['(1000)', 'self.batch_size'], {'replace': '(False)'}), '(1000, self.batch_size, replace=False)\n', (14194, 14232), True, 'import numpy as np\n'), ((14254, 14307), 'numpy.random.choice', 'np.random.choice', (['(380)', 'self.batch_size'], {'replace': '(False)'}), '(380, self.batch_size, replace=False)\n', (14270, 14307), True, 'import numpy as np\n'), ((6084, 6123), 'numpy.loadtxt', 'np.loadtxt', (['vertice_file'], {'delimiter': '""","""'}), "(vertice_file, delimiter=',')\n", (6094, 6123), True, 'import numpy as np\n'), ((9946, 9977), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (9963, 9977), True, 'import numpy as np\n'), ((10559, 10585), 'numpy.sqrt', 'np.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (10566, 10585), True, 'import numpy as np\n'), ((13482, 13524), 'scipy.ndimage.shift', 'ndimage.shift', (['image', '(offset_v, offset_h)'], {}), '(image, (offset_v, offset_h))\n', (13495, 13524), False, 'from scipy import ndimage\n'), ((14400, 14435), 'numpy.array', 'np.array', (['self.x_set[ID]'], {'copy': '(True)'}), '(self.x_set[ID], copy=True)\n', (14408, 14435), True, 'import numpy as np\n'), ((14456, 14491), 'numpy.array', 'np.array', (['self.y_set[ID]'], {'copy': '(True)'}), '(self.y_set[ID], copy=True)\n', (14464, 14491), True, 'import numpy as np\n'), ((5924, 5952), 'os.path.exists', 'os.path.exists', (['vertice_file'], {}), '(vertice_file)\n', (5938, 5952), False, 'import os\n'), ((14714, 14730), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (14723, 14730), True, 'import numpy as np\n'), ((15013, 15029), 'numpy.flipud', 'np.flipud', (['image'], {}), '(image)\n', (15022, 15029), True, 'import numpy as np\n'), ((3487, 3533), 'numpy.arctan2', 'np.arctan2', (['(0.5 - vertice[1])', '(vertice[0] - 0.5)'], {}), '(0.5 - vertice[1], vertice[0] - 0.5)\n', (3497, 3533), True, 'import numpy as np\n'), ((3863, 3909), 'numpy.arctan2', 'np.arctan2', (['(0.5 - vertice[1])', '(vertice[0] - 0.5)'], {}), '(0.5 - vertice[1], vertice[0] - 0.5)\n', (3873, 3909), True, 'import numpy as np\n'), ((5600, 5621), 'hds_module.utils.path_leaf', 'path_leaf', (['image_file'], {}), '(image_file)\n', (5609, 5621), False, 'from hds_module.utils import get_file_paths, path_leaf\n'), ((10475, 10494), 'numpy.arctan2', 'np.arctan2', (['(-dy)', 'dx'], {}), '(-dy, dx)\n', (10485, 10494), True, 'import numpy as np\n'), ((15792, 15864), 'numpy.pad', 'np.pad', (['image', '((padding, padding), (padding, padding))'], {'mode': '"""constant"""'}), "(image, ((padding, padding), (padding, padding)), mode='constant')\n", (15798, 15864), True, 'import numpy as np\n'), ((15894, 15937), 'skimage.transform.resize', 'resize', (['image', 'self.dim'], {'anti_aliasing': '(True)'}), '(image, self.dim, anti_aliasing=True)\n', (15900, 15937), False, 'from skimage.transform import resize\n')] |
import streamlit as st
from tweet_analyzer import twitter_actions as ta
from tweet_analyzer import display_elements as display
import login
st.set_page_config(page_title="Tweet Analyzer", page_icon=None, layout="wide")
if 'login' not in st.session_state:
st.session_state['login']= False
def ta_main():
if "submit" not in st.session_state:
st.session_state['submit'] = False
# Input Section
st.header("Tweet Analyzer")
username = st.text_input("Enter a Twitter handle name")
submit_button = st.button('Submit')
if submit_button or st.session_state['submit']:
st.session_state['submit'] = True
if not username:
st.warning("Please give an input")
st.stop()
with st.spinner('Reading Tweets.'):
try:
user_id = ta.get_user_id(username)
except:
st.error("User Not Found")
st.stop()
#fetch data
user = ta.user_details(username)
tweets = ta.get_original_tweets(user_id)
list_owned = ta.get_lists_owned(user_id)
list_followed = ta.get_lists_followed(user_id)
list_membership = ta.get_lists_membership(user_id)
display.draw_divider()
#user profile and Engagment Chart
c1, c2 = st.columns((1, 3))
display.display_user_profile(user, c1)
st.session_state['days'] = display.display_days_selector(c2)
display.draw_engagement_chart(tweets, c2)
display.draw_divider()
#Lists
col1, col2, col3 = st.columns((3, 3, 3))
display.display_user_lists(list_followed, list_owned, list_membership, col1, col2, col3)
display.draw_divider()
if not tweets.empty:
#hashtags and piechart
c9, c10 = st.columns((2, 2))
with c9:
st.header("Top Hashtags")
top_hashtags = ta.find_hashtags(user_id)
st.table(top_hashtags)
# All Tweets
with c10:
st.header("Tweets")
tweet_container = st.container()
st.dataframe(tweets.loc[:, tweets.columns != 'Engagement'])
else:
st.warning("No Tweets to display")
#OAuth part
if st.session_state['login' ] == True:
ta_main() # main app
else:
if st.button("Sign in"):
logged_in = login.authrize()
if logged_in:
print("hi")
st.session_state['login'] = True
#ta_main() | [
"streamlit.table",
"tweet_analyzer.twitter_actions.user_details",
"streamlit.button",
"tweet_analyzer.display_elements.draw_divider",
"tweet_analyzer.twitter_actions.get_lists_owned",
"login.authrize",
"streamlit.text_input",
"streamlit.header",
"streamlit.warning",
"streamlit.stop",
"streamlit.set_page_config",
"streamlit.columns",
"tweet_analyzer.display_elements.display_days_selector",
"tweet_analyzer.display_elements.display_user_lists",
"tweet_analyzer.twitter_actions.get_original_tweets",
"tweet_analyzer.twitter_actions.get_lists_followed",
"streamlit.dataframe",
"streamlit.container",
"tweet_analyzer.display_elements.draw_engagement_chart",
"tweet_analyzer.twitter_actions.get_user_id",
"streamlit.spinner",
"tweet_analyzer.display_elements.display_user_profile",
"streamlit.error",
"tweet_analyzer.twitter_actions.find_hashtags",
"tweet_analyzer.twitter_actions.get_lists_membership"
] | [((140, 218), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Tweet Analyzer"""', 'page_icon': 'None', 'layout': '"""wide"""'}), "(page_title='Tweet Analyzer', page_icon=None, layout='wide')\n", (158, 218), True, 'import streamlit as st\n'), ((420, 447), 'streamlit.header', 'st.header', (['"""Tweet Analyzer"""'], {}), "('Tweet Analyzer')\n", (429, 447), True, 'import streamlit as st\n'), ((463, 507), 'streamlit.text_input', 'st.text_input', (['"""Enter a Twitter handle name"""'], {}), "('Enter a Twitter handle name')\n", (476, 507), True, 'import streamlit as st\n'), ((528, 547), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (537, 547), True, 'import streamlit as st\n'), ((2574, 2594), 'streamlit.button', 'st.button', (['"""Sign in"""'], {}), "('Sign in')\n", (2583, 2594), True, 'import streamlit as st\n'), ((2616, 2632), 'login.authrize', 'login.authrize', ([], {}), '()\n', (2630, 2632), False, 'import login\n'), ((680, 714), 'streamlit.warning', 'st.warning', (['"""Please give an input"""'], {}), "('Please give an input')\n", (690, 714), True, 'import streamlit as st\n'), ((727, 736), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (734, 736), True, 'import streamlit as st\n'), ((750, 779), 'streamlit.spinner', 'st.spinner', (['"""Reading Tweets."""'], {}), "('Reading Tweets.')\n", (760, 779), True, 'import streamlit as st\n'), ((998, 1023), 'tweet_analyzer.twitter_actions.user_details', 'ta.user_details', (['username'], {}), '(username)\n', (1013, 1023), True, 'from tweet_analyzer import twitter_actions as ta\n'), ((1045, 1076), 'tweet_analyzer.twitter_actions.get_original_tweets', 'ta.get_original_tweets', (['user_id'], {}), '(user_id)\n', (1067, 1076), True, 'from tweet_analyzer import twitter_actions as ta\n'), ((1102, 1129), 'tweet_analyzer.twitter_actions.get_lists_owned', 'ta.get_lists_owned', (['user_id'], {}), '(user_id)\n', (1120, 1129), True, 'from tweet_analyzer import twitter_actions as ta\n'), ((1158, 1188), 'tweet_analyzer.twitter_actions.get_lists_followed', 'ta.get_lists_followed', (['user_id'], {}), '(user_id)\n', (1179, 1188), True, 'from tweet_analyzer import twitter_actions as ta\n'), ((1220, 1252), 'tweet_analyzer.twitter_actions.get_lists_membership', 'ta.get_lists_membership', (['user_id'], {}), '(user_id)\n', (1243, 1252), True, 'from tweet_analyzer import twitter_actions as ta\n'), ((1267, 1289), 'tweet_analyzer.display_elements.draw_divider', 'display.draw_divider', ([], {}), '()\n', (1287, 1289), True, 'from tweet_analyzer import display_elements as display\n'), ((1358, 1376), 'streamlit.columns', 'st.columns', (['(1, 3)'], {}), '((1, 3))\n', (1368, 1376), True, 'import streamlit as st\n'), ((1389, 1427), 'tweet_analyzer.display_elements.display_user_profile', 'display.display_user_profile', (['user', 'c1'], {}), '(user, c1)\n', (1417, 1427), True, 'from tweet_analyzer import display_elements as display\n'), ((1467, 1500), 'tweet_analyzer.display_elements.display_days_selector', 'display.display_days_selector', (['c2'], {}), '(c2)\n', (1496, 1500), True, 'from tweet_analyzer import display_elements as display\n'), ((1513, 1554), 'tweet_analyzer.display_elements.draw_engagement_chart', 'display.draw_engagement_chart', (['tweets', 'c2'], {}), '(tweets, c2)\n', (1542, 1554), True, 'from tweet_analyzer import display_elements as display\n'), ((1581, 1603), 'tweet_analyzer.display_elements.draw_divider', 'display.draw_divider', ([], {}), '()\n', (1601, 1603), True, 'from tweet_analyzer import display_elements as display\n'), ((1655, 1676), 'streamlit.columns', 'st.columns', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (1665, 1676), True, 'import streamlit as st\n'), ((1689, 1781), 'tweet_analyzer.display_elements.display_user_lists', 'display.display_user_lists', (['list_followed', 'list_owned', 'list_membership', 'col1', 'col2', 'col3'], {}), '(list_followed, list_owned, list_membership, col1,\n col2, col3)\n', (1715, 1781), True, 'from tweet_analyzer import display_elements as display\n'), ((1792, 1814), 'tweet_analyzer.display_elements.draw_divider', 'display.draw_divider', ([], {}), '()\n', (1812, 1814), True, 'from tweet_analyzer import display_elements as display\n'), ((824, 848), 'tweet_analyzer.twitter_actions.get_user_id', 'ta.get_user_id', (['username'], {}), '(username)\n', (838, 848), True, 'from tweet_analyzer import twitter_actions as ta\n'), ((1923, 1941), 'streamlit.columns', 'st.columns', (['(2, 2)'], {}), '((2, 2))\n', (1933, 1941), True, 'import streamlit as st\n'), ((2446, 2480), 'streamlit.warning', 'st.warning', (['"""No Tweets to display"""'], {}), "('No Tweets to display')\n", (2456, 2480), True, 'import streamlit as st\n'), ((885, 911), 'streamlit.error', 'st.error', (['"""User Not Found"""'], {}), "('User Not Found')\n", (893, 911), True, 'import streamlit as st\n'), ((928, 937), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (935, 937), True, 'import streamlit as st\n'), ((1987, 2012), 'streamlit.header', 'st.header', (['"""Top Hashtags"""'], {}), "('Top Hashtags')\n", (1996, 2012), True, 'import streamlit as st\n'), ((2048, 2073), 'tweet_analyzer.twitter_actions.find_hashtags', 'ta.find_hashtags', (['user_id'], {}), '(user_id)\n', (2064, 2073), True, 'from tweet_analyzer import twitter_actions as ta\n'), ((2094, 2116), 'streamlit.table', 'st.table', (['top_hashtags'], {}), '(top_hashtags)\n', (2102, 2116), True, 'import streamlit as st\n'), ((2222, 2241), 'streamlit.header', 'st.header', (['"""Tweets"""'], {}), "('Tweets')\n", (2231, 2241), True, 'import streamlit as st\n'), ((2280, 2294), 'streamlit.container', 'st.container', ([], {}), '()\n', (2292, 2294), True, 'import streamlit as st\n'), ((2315, 2374), 'streamlit.dataframe', 'st.dataframe', (["tweets.loc[:, tweets.columns != 'Engagement']"], {}), "(tweets.loc[:, tweets.columns != 'Engagement'])\n", (2327, 2374), True, 'import streamlit as st\n')] |
# Generated by Django 3.2.4 on 2021-06-13 15:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('publications', '0005_rename_published_publication_is_published'),
]
operations = [
migrations.AlterField(
model_name='publication',
name='category',
field=models.CharField(choices=[('GL', 'General'), ('CS', 'Closures'), ('VS', 'Vacancies'), ('CN', 'Consultations'), ('JS', 'Judicial Sales'), ('NR', 'Newsletters'), ('TR', 'Tenders')], default='CS', max_length=50),
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['name'],
'abstract': False,
},
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.BigAutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((504, 720), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('GL', 'General'), ('CS', 'Closures'), ('VS', 'Vacancies'), ('CN',\n 'Consultations'), ('JS', 'Judicial Sales'), ('NR', 'Newsletters'), (\n 'TR', 'Tenders')]", 'default': '"""CS"""', 'max_length': '(50)'}), "(choices=[('GL', 'General'), ('CS', 'Closures'), ('VS',\n 'Vacancies'), ('CN', 'Consultations'), ('JS', 'Judicial Sales'), ('NR',\n 'Newsletters'), ('TR', 'Tenders')], default='CS', max_length=50)\n", (520, 720), False, 'from django.db import migrations, models\n'), ((832, 928), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (851, 928), False, 'from django.db import migrations, models\n'), ((952, 997), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (968, 997), False, 'from django.db import migrations, models\n'), ((1031, 1070), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1051, 1070), False, 'from django.db import migrations, models\n'), ((1104, 1139), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1124, 1139), False, 'from django.db import migrations, models\n'), ((1173, 1287), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='+', to=settings.AUTH_USER_MODEL)\n", (1190, 1287), False, 'from django.db import migrations, models\n'), ((1316, 1412), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1333, 1412), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python3
"""Perform a full roundtrip of messages from thin-edge to Azure IoT.
We publish with thin-edge to Azure IoT; then route the messages to a
Service Bus Queue; from there we retrieve the messages via a REST
Interface and compare them with what we have sent in the beginning.
Alternatively, we can use the Azure SDK to access the IoT Hub directly.
When this script is called you need to be already connected to Azure.
Call example:
$ ./roundtrip_local_to_az.py -a 10 -p sas_policy -b thinedgebus -q testqueue
Set Env:
- SASKEYQUEUE : Shared Access Key to the service bus queue
Alternatively:
./ci/roundtrip_local_to_az.py eventhub
Set Env:
- AZUREENDPOINT : Endpoint descritpion string copied from the Azure UI
- AZUREEVENTHUB : Name of the IoT Hub
"""
import argparse
import base64
import json
import json.decoder
import hashlib
import hmac
import os
import sys
import subprocess
import time
import urllib
import requests
import logging
from azure.eventhub import EventHubConsumerClient
import datetime
debug = False
if debug:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig()
logger = logging.getLogger("roundtrip")
logger.setLevel(level=logging.INFO)
def publish_az(amount, topic, key):
"""Publish to Azure topic"""
logger.info(f"Publishing messages to topic {topic}")
for i in range(amount):
message = f'{{"{key}": {i} }}'
cmd = ["/usr/bin/tedge", "mqtt", "pub", topic, message]
try:
ret = subprocess.run(cmd, check=True)
except subprocess.CalledProcessError as e:
logger.error("Failed to publish %s", e)
sys.exit(1)
ret.check_returncode()
logger.info("Published message: %s" % message)
time.sleep(0.05)
def get_auth_token(sb_name, eh_name, sas_name, sas_value):
"""Create authentication token
Analog to:
https://docs.microsoft.com/en-us/rest/api/eventhub/generate-sas-token
"""
newuri = urllib.parse.quote_plus(
f"https://{sb_name}.servicebus.windows.net/{eh_name}"
)
sas_enc = sas_value.encode("utf-8")
expiry = str(int(time.time()) + 10000)
str_sign = newuri + "\n" + expiry
signed_hmac = hmac.HMAC(sas_enc, str_sign.encode("utf-8"), hashlib.sha256)
signature = urllib.parse.quote(base64.b64encode(signed_hmac.digest()))
ret = {
"sb_name": sb_name,
"eh_name": eh_name,
"token": f"<PASSWORD> sr={newuri}&sig={signature}&se={expiry}&skn={sas_name}",
}
return ret
def retrieve_queue_az(
sas_policy_name, service_bus_name, queue_name, amount, verbose, key
):
"""Get the published messages back from a service bus queue
Probably soon obsolete.
"""
try:
sas_policy_primary_key = os.environ["SASKEYQUEUE"]
except KeyError:
print("Error environment variable SASKEYQUEUE not set")
sys.exit(1)
tokendict = get_auth_token(
service_bus_name, queue_name, sas_policy_name, sas_policy_primary_key
)
token = tokendict["token"]
if verbose:
print("Token", token)
# See also:
# https://docs.microsoft.com/en-us/rest/api/servicebus/receive-and-delete-message-destructive-read
url = (
f"https://{service_bus_name}.servicebus.windows.net/{queue_name}/messages/head"
)
print(f"Downloading mesages from {url}")
headers = {
"Accept": "application/json",
"Content-Type": "application/json;charset=utf-8",
"Authorization": token,
}
messages = []
while True:
try:
req = requests.delete(url, headers=headers)
except requests.exceptions.ConnectionError as e:
print("Exception: ", e)
print("Connection error: We wait for some seconds and then continue ...")
time.sleep(10)
continue
if req.status_code == 200:
text = req.text
props = json.loads(req.headers["BrokerProperties"])
number = props["SequenceNumber"]
queuetime = props["EnqueuedTimeUtc"]
try:
data = json.loads(text)
value = data[key]
except json.decoder.JSONDecodeError:
print("Json Parsing Error: ", text)
value = None
except KeyError:
print("Parsing Error: ", text)
value = None
print(
f'Got message {number} from {queuetime} message is "{text}" value: "{value}"'
)
messages.append(value)
elif req.status_code == 204:
print("Queue Empty: HTTP status: ", req.status_code)
break
elif req.status_code == 401:
print("Token Expired: HTTP status: ", req.status_code)
raise SystemError("Token Expired")
else:
print(req)
print("Error HTTP status: ", req.status_code)
raise SystemError("HTTP Error")
if messages == list(range(amount)):
print("Validation PASSED")
return True
else:
print("Validation FAILED")
return False
class EventHub:
"""Class to host all properties and access functions for an IoT Hub/ Eventhub
Needs https://pypi.org/project/azure-eventhub
Docs:
https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-messages-read-builtin
https://azuresdkdocs.blob.core.windows.net/$web/python/azure-eventhub/latest/azure.eventhub.html
https://azuresdkdocs.blob.core.windows.net/$web/python/azure-eventhub/latest/azure.eventhub.html#azure.eventhub.EventData
"""
def __init__(self, message_key, amount):
try:
connection_str = os.environ["AZUREENDPOINT"]
except KeyError:
logger.error("Error environment variable AZUREENDPOINT not set")
sys.exit(1)
try:
eventhub_name = os.environ["AZUREEVENTHUB"]
except KeyError:
logger.error("Error environment variable AZUREEVENTHUB not set")
sys.exit(1)
self.message_key = message_key
self.amount = amount
consumer_group = "$Default"
timeout = 10 # 10s : minimum timeout
self.client = EventHubConsumerClient.from_connection_string(
connection_str,
consumer_group,
eventhub_name=eventhub_name,
idle_timeout=timeout,
)
self.received_messages = []
def on_error(self, partition_context, event):
logger.error(
"Received Error from partition {}".format(partition_context.partition_id)
)
logger.error(f"Event: {event}")
def on_event(self, partition_context, event):
logger.debug(
"Received event from partition {}".format(partition_context.partition_id)
)
logger.debug(f"Event: {event}")
if event == None:
logger.debug("Timeout: Exiting event loop ... ")
self.client.close()
return
partition_context.update_checkpoint(event)
jevent = event.body_as_json()
message = jevent.get(self.message_key)
if message != None:
logger.info("Matched key: %s" % message)
self.received_messages.append(message)
else:
logger.info("Not matched key: %s" % jevent)
def read_from_hub(self, start):
"""Read data from the event hub
Possible values for start:
start = "-1" : Read all messages
start = "@latest" : Read only the latest messages
start = datetime.datetime.now(tz=datetime.timezone.utc) : use current sdate
When no messages are received the client.receive will return.
"""
with self.client:
self.client.receive(
on_event=self.on_event,
on_error=self.on_error,
starting_position=start,
max_wait_time=10,
)
logger.info("Exiting event loop")
def validate(self):
"""Validate the messages that we have received against"""
if self.received_messages == list(range(self.amount)):
print("Validation PASSED")
return True
else:
print("Validation FAILED")
return False
def main():
"""Main entry point"""
parser = argparse.ArgumentParser()
parser.add_argument("method", choices=["eventhub", "servicebus"])
parser.add_argument("-b", "--bus", help="Service Bus Name")
parser.add_argument("-p", "--policy", help="SAS Policy Name")
parser.add_argument("-q", "--queue", help="Queue Name")
parser.add_argument(
"-a", "--amount", help="Amount of messages to send", type=int, default=20
)
parser.add_argument("-v", "--verbose", help="Verbosity", action="count", default=0)
args = parser.parse_args()
amount = args.amount
sas_policy_name = args.policy
service_bus_name = args.bus
queue_name = args.queue
verbose = args.verbose
method = args.method
if method == "servicebus":
try:
os.environ["SASKEYQUEUE"]
except KeyError:
print("Error environment variable SASKEYQUEUE not set")
sys.exit(1)
try:
device = os.environ["C8YDEVICE"]
except KeyError:
print("Error environment variable C8YDEVICE not set")
sys.exit(1)
# Send roundtrip via the tedge mapper
mqtt_topic = "tedge/measurements"
# In case that we want to avoid the azure mapper
# mqtt_topic = "az/messages/events/"
message_key = "thin-edge-azure-roundtrip-" + device
if method == "eventhub":
eh = EventHub(message_key=message_key, amount=amount)
start = datetime.datetime.now(tz=datetime.timezone.utc)
publish_az(amount, mqtt_topic, message_key)
eh.read_from_hub(start)
if not eh.validate():
sys.exit(1)
elif method == "servicebus":
publish_az(amount, mqtt_topic, message_key)
result = retrieve_queue_az(
sas_policy_name, service_bus_name, queue_name, amount, verbose, message_key
)
if not result:
sys.exit(1)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"logging.basicConfig",
"json.loads",
"argparse.ArgumentParser",
"subprocess.run",
"time.sleep",
"requests.delete",
"datetime.datetime.now",
"sys.exit",
"azure.eventhub.EventHubConsumerClient.from_connection_string",
"time.time",
"urllib.parse.quote_plus"
] | [((1164, 1194), 'logging.getLogger', 'logging.getLogger', (['"""roundtrip"""'], {}), "('roundtrip')\n", (1181, 1194), False, 'import logging\n'), ((1082, 1121), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1101, 1121), False, 'import logging\n'), ((1132, 1153), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (1151, 1153), False, 'import logging\n'), ((2006, 2084), 'urllib.parse.quote_plus', 'urllib.parse.quote_plus', (['f"""https://{sb_name}.servicebus.windows.net/{eh_name}"""'], {}), "(f'https://{sb_name}.servicebus.windows.net/{eh_name}')\n", (2029, 2084), False, 'import urllib\n'), ((8400, 8425), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8423, 8425), False, 'import argparse\n'), ((1779, 1795), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1789, 1795), False, 'import time\n'), ((6273, 6405), 'azure.eventhub.EventHubConsumerClient.from_connection_string', 'EventHubConsumerClient.from_connection_string', (['connection_str', 'consumer_group'], {'eventhub_name': 'eventhub_name', 'idle_timeout': 'timeout'}), '(connection_str,\n consumer_group, eventhub_name=eventhub_name, idle_timeout=timeout)\n', (6318, 6405), False, 'from azure.eventhub import EventHubConsumerClient\n'), ((9786, 9833), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (9807, 9833), False, 'import datetime\n'), ((1525, 1556), 'subprocess.run', 'subprocess.run', (['cmd'], {'check': '(True)'}), '(cmd, check=True)\n', (1539, 1556), False, 'import subprocess\n'), ((2912, 2923), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2920, 2923), False, 'import sys\n'), ((3610, 3647), 'requests.delete', 'requests.delete', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3625, 3647), False, 'import requests\n'), ((3959, 4002), 'json.loads', 'json.loads', (["req.headers['BrokerProperties']"], {}), "(req.headers['BrokerProperties'])\n", (3969, 4002), False, 'import json\n'), ((9432, 9443), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9440, 9443), False, 'import sys\n'), ((9962, 9973), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9970, 9973), False, 'import sys\n'), ((1672, 1683), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1680, 1683), False, 'import sys\n'), ((2160, 2171), 'time.time', 'time.time', ([], {}), '()\n', (2169, 2171), False, 'import time\n'), ((3839, 3853), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3849, 3853), False, 'import time\n'), ((4138, 4154), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (4148, 4154), False, 'import json\n'), ((5891, 5902), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5899, 5902), False, 'import sys\n'), ((6087, 6098), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6095, 6098), False, 'import sys\n'), ((9278, 9289), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9286, 9289), False, 'import sys\n'), ((10232, 10243), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10240, 10243), False, 'import sys\n')] |
from pprint import pprint
from api import ERMS
if __name__ == '__main__':
import json
with open('../../config/settings/secret_settings.json', 'r') as infile:
settings = json.load(infile)
erms = ERMS(settings['ERMS_API_URL'])
pprint(erms.fetch_objects(object_id=(574, 575, 603)))
# for idt in erms.fetch_endpoint(erms.EP_IDENTITY):
# pprint(idt)
# ref_keys = Counter()
# for per in erms.fetch_objects(erms.CLS_PERSON):
# for key in per['refs'].keys():
# ref_keys[key] += 1
# pprint(per)
# print(ref_keys)
# for org in erms.fetch_objects(erms.CLS_ORGANIZATION):
# for key, val in org.get('vals', {}).items():
# if len(val) > 1:
# print(key, val)
# pprint(erms.fetch_objects(cls=erms.CLS_PLATFORM))
# pprint(erms.fetch_endpoint(erms.EP_CONSORTIUM))
# pprint(erms.fetch_endpoint(erms.EP_CONSORTIUM_MEMBER))
# pprint(erms.fetch_endpoint(erms.EP_ACQUISITION))
# pprint(erms.fetch_objects(object_id=(670, 665)))
if False:
offers = Counter()
for rec in erms.fetch_endpoint(erms.EP_PROCUREMENT):
offer_id = rec['offer']
offers[offer_id] += 1
if offer_id:
offer = erms.fetch_endpoint(erms.EP_OFFER, object_id=offer_id)[0]
splits = erms.fetch_endpoint(erms.EP_OFFER_SPLIT, offer=offer_id)
for ppy in offer['price_per_year']:
year_splits = [split for split in splits if split['year'] == ppy['year']]
print("Year: {}, Price offer: {} {}; Price splits: {}+{} ({})+({})".format(
ppy['year'],
ppy['amount'],
ppy['currency'],
sum(ys['participation']['amount'] for ys in year_splits
if ys['participation']['amount']),
sum(ys['subsidy']['amount'] for ys in year_splits
if 'subsidy' in ys and ys['subsidy'] and ys['subsidy']['amount']),
', '.join(str(ys['participation']['amount']) for ys in year_splits),
', '.join(str(ys['subsidy']['amount']) for ys in year_splits
if 'subsidy' in ys and ys['subsidy']),
))
print(offers)
| [
"json.load",
"api.ERMS"
] | [((217, 247), 'api.ERMS', 'ERMS', (["settings['ERMS_API_URL']"], {}), "(settings['ERMS_API_URL'])\n", (221, 247), False, 'from api import ERMS\n'), ((188, 205), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (197, 205), False, 'import json\n')] |
import socket
import errno
import struct
try:
import thread
except ImportError: # for python3
import _thread as thread
import select
from raft.bijectivemap import create_map
def start(port, uuid):
tcp = TCP(port, uuid)
tcp.start()
return tcp
class TCP(object):
greeting = 'howdy!'
def __init__(self, port, uuid):
self.port = port
self.connections = {}
self.c2u, self.u2c = create_map()
self.data = {}
self.unknowns = set()
self.a2c, self.c2a = create_map()
self.uuid = uuid
def __contains__(self, uuid):
return uuid in self.u2c
def start(self):
self.running = True
self.srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.srv.bind(("", self.port))
thread.start_new_thread(self.accept, ())
def connect(self, addr):
if addr in self.a2c:
return
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
conn.connect(addr)
except socket.error as e:
if e.errno == errno.ECONNREFUSED:
return None
raise
conn.setblocking(0)
self.a2c[addr] = conn
self.add_unknown(conn)
return True
def accept(self):
self.srv.listen(5)
while self.running:
try:
conn, addr = self.srv.accept()
self.a2c[addr] = conn
conn.setblocking(0)
self.add_unknown(conn)
except socket.error as e:
if e.errno == errno.ECONNABORTED:
continue
def recv(self, timeout=0):
try:
recv, _, _ = select.select(list(self.c2u.keys()), [], [], timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
return
raise
rcvd = []
for conn in recv:
msgs = self.read_conn_msg(conn)
if msgs is not None:
uuid = self.c2u[conn]
rcvd.append((uuid, msgs))
self.read_unknowns()
return rcvd
def add_unknown(self, conn):
self.unknowns.add(conn)
msgsize = struct.pack("!I", len(self.greeting) +
len(self.uuid) + struct.calcsize("!I"))
try:
sent = 0
msg = msgsize + self.greeting + self.uuid
while sent < len(msg):
sent += conn.send(msg[sent:], socket.MSG_DONTWAIT)
except socket.error:
self.unknowns.remove(conn)
return
self.read_unknowns()
def read_unknowns(self):
recv, _, _ = select.select(list(self.unknowns), [], [], 0)
for conn in recv:
uuid = self.read_conn_msg(conn, 1)
if uuid:
uuid = uuid[0]
assert uuid.startswith(self.greeting)
uuid = uuid[len(self.greeting):]
self.u2c[uuid] = conn
self.unknowns.remove(conn)
def read_conn_msg(self, conn, msgnum=0):
try:
data = conn.recv(4092)
except socket.error:
self.remconn(conn)
return
if data == '':
self.remconn(conn)
if conn in self.c2u:
del self.c2u[conn]
if conn in self.data:
del self.data[conn]
return
buff = self.data.get(conn, '')
buff += data
self.data[conn] = buff
msgs = []
for count, msg in enumerate(self.extract_msg(conn)):
msgs.append(msg)
if msgnum and count >= msgnum:
return msgs
return msgs
def extract_msg(self, conn):
buff = self.data[conn]
isize = struct.calcsize("!I")
if len(buff) < isize:
# can't even get the length of the next message
return
while len(buff) > isize:
size = struct.unpack("!I", buff[0:isize])[0]
if len(buff) < size:
return
msg = buff[isize:size]
buff = buff[size:]
self.data[conn] = buff
yield msg
def send(self, msg, uuid):
msgsize = struct.pack("!I", len(msg) + struct.calcsize("!I"))
try:
conn = self.u2c[uuid]
except KeyError:
return
try:
sent = 0
msg = msgsize + msg
while sent < len(msg):
sent += conn.send(msg[sent:], socket.MSG_DONTWAIT)
except socket.error as e:
if e.errno == errno.EPIPE:
addr = conn.getsockname()
self.connect(addr)
def remconn(self, conn):
if conn in self.c2u:
del self.c2u[conn]
if conn in self.c2a:
del self.c2a[conn]
if conn in self.data:
del self.data[conn]
def shutdown(self):
try:
self.running = False
self.srv.shutdown(socket.SHUT_RDWR)
self.sock.close()
except:
pass
| [
"struct.calcsize",
"socket.socket",
"struct.unpack",
"raft.bijectivemap.create_map",
"_thread.start_new_thread"
] | [((431, 443), 'raft.bijectivemap.create_map', 'create_map', ([], {}), '()\n', (441, 443), False, 'from raft.bijectivemap import create_map\n'), ((526, 538), 'raft.bijectivemap.create_map', 'create_map', ([], {}), '()\n', (536, 538), False, 'from raft.bijectivemap import create_map\n'), ((700, 749), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (713, 749), False, 'import socket\n'), ((868, 908), '_thread.start_new_thread', 'thread.start_new_thread', (['self.accept', '()'], {}), '(self.accept, ())\n', (891, 908), True, 'import _thread as thread\n'), ((1002, 1051), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1015, 1051), False, 'import socket\n'), ((3853, 3874), 'struct.calcsize', 'struct.calcsize', (['"""!I"""'], {}), "('!I')\n", (3868, 3874), False, 'import struct\n'), ((2363, 2384), 'struct.calcsize', 'struct.calcsize', (['"""!I"""'], {}), "('!I')\n", (2378, 2384), False, 'import struct\n'), ((4036, 4070), 'struct.unpack', 'struct.unpack', (['"""!I"""', 'buff[0:isize]'], {}), "('!I', buff[0:isize])\n", (4049, 4070), False, 'import struct\n'), ((4332, 4353), 'struct.calcsize', 'struct.calcsize', (['"""!I"""'], {}), "('!I')\n", (4347, 4353), False, 'import struct\n')] |
import math, random
def generatePolygon( ctrX, ctrY, aveRadius, irregularity, spikeyness, numVerts ):
irregularity = clip( irregularity, 0,1 ) * 2*math.pi / numVerts
spikeyness = clip( spikeyness, 0,1 ) * aveRadius
# generate n angle steps
angleSteps = []
lower = (2*math.pi / numVerts) - irregularity
upper = (2*math.pi / numVerts) + irregularity
sum = 0
for i in range(numVerts):
tmp = random.uniform(lower, upper)
angleSteps.append( tmp )
sum = sum + tmp
# normalize the steps so that point 0 and point n+1 are the same
k = sum / (2*math.pi)
for i in range(numVerts):
angleSteps[i] = angleSteps[i] / k
# now generate the points
points = []
angle = random.uniform(0, 2*math.pi)
for i in range(numVerts):
r_i = clip( random.gauss(aveRadius, spikeyness), 0, 2*aveRadius )
x = ctrX + r_i*math.cos(angle)
y = ctrY + r_i*math.sin(angle)
points.append( (int(x),int(y)) )
angle = angle + angleSteps[i]
return points
def clip(x, min, max):
if( min > max ): return x
elif( x < min ): return min
elif( x > max ): return max
else: return x
| [
"math.cos",
"random.uniform",
"math.sin",
"random.gauss"
] | [((743, 773), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (757, 773), False, 'import math, random\n'), ((430, 458), 'random.uniform', 'random.uniform', (['lower', 'upper'], {}), '(lower, upper)\n', (444, 458), False, 'import math, random\n'), ((822, 857), 'random.gauss', 'random.gauss', (['aveRadius', 'spikeyness'], {}), '(aveRadius, spikeyness)\n', (834, 857), False, 'import math, random\n'), ((899, 914), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (907, 914), False, 'import math, random\n'), ((938, 953), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (946, 953), False, 'import math, random\n')] |
from django.db import models
from .base import CommonBase, ContactDetailBase, LinkBase, IdentifierBase, OtherNameBase
from .jurisdiction import Jurisdiction
class Organization(CommonBase):
id = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=300)
image = models.URLField(blank=True)
parent = models.ForeignKey('self', related_name='children', null=True)
jurisdiction = models.ForeignKey(Jurisdiction, related_name='organizations')
classification = models.CharField(max_length=100, blank=True) # enum
chamber = models.CharField(max_length=10, blank=True)
founding_date = models.CharField(max_length=10, blank=True) # YYYY[-MM[-DD]]
dissolution_date = models.CharField(max_length=10, blank=True) # YYYY[-MM[-DD]]
class OrganizationIdentifier(IdentifierBase):
organization = models.ForeignKey(Organization, related_name='identifiers')
class OrganizationName(OtherNameBase):
organization = models.ForeignKey(Organization, related_name='other_names')
class OrganizationContactDetail(ContactDetailBase):
organization = models.ForeignKey(Organization, related_name='contact_details')
class OrganizationLink(LinkBase):
organization = models.ForeignKey(Organization, related_name='links')
class OrganizationSource(LinkBase):
organization = models.ForeignKey(Organization, related_name='sources')
class Post(CommonBase):
id = models.CharField(max_length=100, primary_key=True)
label = models.CharField(max_length=300, blank=True)
role = models.CharField(max_length=300, blank=True)
organization = models.ForeignKey(Organization, related_name='posts')
start_date = models.CharField(max_length=10) # YYYY[-MM[-DD]]
end_date = models.CharField(max_length=10) # YYYY[-MM[-DD]]
class PostContactDetail(ContactDetailBase):
post = models.ForeignKey(Post, related_name='contact_details')
class PostLinks(LinkBase):
post = models.ForeignKey(Post, related_name='links')
class Person(CommonBase):
id = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=300)
image = models.URLField(blank=True)
gender = models.CharField(max_length=100)
summary = models.CharField(max_length=500)
national_identity = models.CharField(max_length=300)
biography = models.TextField()
birth_date = models.CharField(max_length=10, blank=True) # YYYY[-MM[-DD]]
death_date = models.CharField(max_length=10, blank=True) # YYYY[-MM[-DD]]
class PersonIdentifier(IdentifierBase):
person = models.ForeignKey(Person, related_name='identifiers')
class PersonName(OtherNameBase):
person = models.ForeignKey(Person, related_name='other_names')
class PersonContactDetail(ContactDetailBase):
person = models.ForeignKey(Person, related_name='contact_details')
class PersonLink(LinkBase):
person = models.ForeignKey(Person, related_name='links')
class PersonSource(LinkBase):
person = models.ForeignKey(Person, related_name='sources')
class Membership(CommonBase):
id = models.CharField(max_length=100, primary_key=True)
organization = models.ForeignKey(Organization, related_name='memberships')
person = models.ForeignKey(Person, related_name='memberships')
post = models.ForeignKey(Post, related_name='posts', null=True)
on_behalf_of = models.ForeignKey(Organization, related_name='memberships_on_behalf_of',
null=True)
label = models.CharField(max_length=300, blank=True)
role = models.CharField(max_length=300, blank=True)
start_date = models.CharField(max_length=10, blank=True) # YYYY[-MM[-DD]]
end_date = models.CharField(max_length=10, blank=True) # YYYY[-MM[-DD]]
class MembershipContactDetail(ContactDetailBase):
membership = models.ForeignKey(Membership, related_name='contact_details')
class MembershipLink(LinkBase):
membership = models.ForeignKey(Membership, related_name='links')
| [
"django.db.models.URLField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((200, 250), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'primary_key': '(True)'}), '(max_length=100, primary_key=True)\n', (216, 250), False, 'from django.db import models\n'), ((262, 294), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (278, 294), False, 'from django.db import models\n'), ((307, 334), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)'}), '(blank=True)\n', (322, 334), False, 'from django.db import models\n'), ((348, 409), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""'], {'related_name': '"""children"""', 'null': '(True)'}), "('self', related_name='children', null=True)\n", (365, 409), False, 'from django.db import models\n'), ((429, 490), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Jurisdiction'], {'related_name': '"""organizations"""'}), "(Jurisdiction, related_name='organizations')\n", (446, 490), False, 'from django.db import models\n'), ((512, 556), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (528, 556), False, 'from django.db import models\n'), ((580, 623), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)'}), '(max_length=10, blank=True)\n', (596, 623), False, 'from django.db import models\n'), ((644, 687), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)'}), '(max_length=10, blank=True)\n', (660, 687), False, 'from django.db import models\n'), ((732, 775), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)'}), '(max_length=10, blank=True)\n', (748, 775), False, 'from django.db import models\n'), ((861, 920), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Organization'], {'related_name': '"""identifiers"""'}), "(Organization, related_name='identifiers')\n", (878, 920), False, 'from django.db import models\n'), ((981, 1040), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Organization'], {'related_name': '"""other_names"""'}), "(Organization, related_name='other_names')\n", (998, 1040), False, 'from django.db import models\n'), ((1114, 1177), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Organization'], {'related_name': '"""contact_details"""'}), "(Organization, related_name='contact_details')\n", (1131, 1177), False, 'from django.db import models\n'), ((1233, 1286), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Organization'], {'related_name': '"""links"""'}), "(Organization, related_name='links')\n", (1250, 1286), False, 'from django.db import models\n'), ((1344, 1399), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Organization'], {'related_name': '"""sources"""'}), "(Organization, related_name='sources')\n", (1361, 1399), False, 'from django.db import models\n'), ((1435, 1485), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'primary_key': '(True)'}), '(max_length=100, primary_key=True)\n', (1451, 1485), False, 'from django.db import models\n'), ((1498, 1542), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'blank': '(True)'}), '(max_length=300, blank=True)\n', (1514, 1542), False, 'from django.db import models\n'), ((1554, 1598), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'blank': '(True)'}), '(max_length=300, blank=True)\n', (1570, 1598), False, 'from django.db import models\n'), ((1618, 1671), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Organization'], {'related_name': '"""posts"""'}), "(Organization, related_name='posts')\n", (1635, 1671), False, 'from django.db import models\n'), ((1689, 1720), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (1705, 1720), False, 'from django.db import models\n'), ((1756, 1787), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (1772, 1787), False, 'from django.db import models\n'), ((1865, 1920), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Post'], {'related_name': '"""contact_details"""'}), "(Post, related_name='contact_details')\n", (1882, 1920), False, 'from django.db import models\n'), ((1961, 2006), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Post'], {'related_name': '"""links"""'}), "(Post, related_name='links')\n", (1978, 2006), False, 'from django.db import models\n'), ((2044, 2094), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'primary_key': '(True)'}), '(max_length=100, primary_key=True)\n', (2060, 2094), False, 'from django.db import models\n'), ((2106, 2138), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (2122, 2138), False, 'from django.db import models\n'), ((2151, 2178), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)'}), '(blank=True)\n', (2166, 2178), False, 'from django.db import models\n'), ((2192, 2224), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2208, 2224), False, 'from django.db import models\n'), ((2239, 2271), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (2255, 2271), False, 'from django.db import models\n'), ((2296, 2328), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (2312, 2328), False, 'from django.db import models\n'), ((2345, 2363), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2361, 2363), False, 'from django.db import models\n'), ((2381, 2424), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)'}), '(max_length=10, blank=True)\n', (2397, 2424), False, 'from django.db import models\n'), ((2462, 2505), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)'}), '(max_length=10, blank=True)\n', (2478, 2505), False, 'from django.db import models\n'), ((2581, 2634), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person'], {'related_name': '"""identifiers"""'}), "(Person, related_name='identifiers')\n", (2598, 2634), False, 'from django.db import models\n'), ((2683, 2736), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person'], {'related_name': '"""other_names"""'}), "(Person, related_name='other_names')\n", (2700, 2736), False, 'from django.db import models\n'), ((2798, 2855), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person'], {'related_name': '"""contact_details"""'}), "(Person, related_name='contact_details')\n", (2815, 2855), False, 'from django.db import models\n'), ((2899, 2946), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person'], {'related_name': '"""links"""'}), "(Person, related_name='links')\n", (2916, 2946), False, 'from django.db import models\n'), ((2992, 3041), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person'], {'related_name': '"""sources"""'}), "(Person, related_name='sources')\n", (3009, 3041), False, 'from django.db import models\n'), ((3083, 3133), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'primary_key': '(True)'}), '(max_length=100, primary_key=True)\n', (3099, 3133), False, 'from django.db import models\n'), ((3153, 3212), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Organization'], {'related_name': '"""memberships"""'}), "(Organization, related_name='memberships')\n", (3170, 3212), False, 'from django.db import models\n'), ((3226, 3279), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person'], {'related_name': '"""memberships"""'}), "(Person, related_name='memberships')\n", (3243, 3279), False, 'from django.db import models\n'), ((3291, 3347), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Post'], {'related_name': '"""posts"""', 'null': '(True)'}), "(Post, related_name='posts', null=True)\n", (3308, 3347), False, 'from django.db import models\n'), ((3367, 3454), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Organization'], {'related_name': '"""memberships_on_behalf_of"""', 'null': '(True)'}), "(Organization, related_name='memberships_on_behalf_of',\n null=True)\n", (3384, 3454), False, 'from django.db import models\n'), ((3500, 3544), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'blank': '(True)'}), '(max_length=300, blank=True)\n', (3516, 3544), False, 'from django.db import models\n'), ((3556, 3600), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'blank': '(True)'}), '(max_length=300, blank=True)\n', (3572, 3600), False, 'from django.db import models\n'), ((3618, 3661), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)'}), '(max_length=10, blank=True)\n', (3634, 3661), False, 'from django.db import models\n'), ((3697, 3740), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)'}), '(max_length=10, blank=True)\n', (3713, 3740), False, 'from django.db import models\n'), ((3832, 3893), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Membership'], {'related_name': '"""contact_details"""'}), "(Membership, related_name='contact_details')\n", (3849, 3893), False, 'from django.db import models\n'), ((3945, 3996), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Membership'], {'related_name': '"""links"""'}), "(Membership, related_name='links')\n", (3962, 3996), False, 'from django.db import models\n')] |
# Copyright 2019 Nokia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import json
from cmframework.server.cmprocessor import CMProcessor
from cmframework.apis.cmerror import CMError
from cmframework.server.cmcsn import CMCSN
from cmframework.server import cmchangemonitor
class CMProcessorAutomaticActivationTest(unittest.TestCase):
@staticmethod
def backend_get_property(key):
if key == 'cloud.cmframework':
return '{"csn": {"global": 101, "nodes": {"node-a": 99, "node-b": 100, "node-c": 101}}}'
elif key == 'foo':
return '{"foo": "bar"}'
@mock.patch('cmframework.utils.cmflagfile.os')
@mock.patch('cmframework.server.cmprocessor.logging')
def test_set_property_automatic_activation_disabled(self, mock_logging, mock_flagfile_os):
mock_backend = mock.MagicMock()
mock_backend.get_property = CMProcessorAutomaticActivationTest.backend_get_property
mock_validator = mock.MagicMock()
mock_activator = mock.MagicMock()
mock_changemonitor = mock.MagicMock()
mock_activationstate_handler = mock.MagicMock()
mock_snapshot_handler = mock.MagicMock()
mock_flagfile_os.path = mock.MagicMock()
mock_flagfile_os.path.exists = mock.MagicMock()
mock_flagfile_os.path.exists.return_value = True
processor = CMProcessor(mock_backend, mock_validator, mock_activator,
mock_changemonitor, mock_activationstate_handler,
mock_snapshot_handler)
processor.set_property('foo', 'barbar')
mock_validator.validate_set.assert_called_once_with({'foo': 'barbar'})
mock_backend.set_properties.called_once_with({'foo': 'barbar'})
mock_activator.add_work.assert_not_called()
@mock.patch('cmframework.utils.cmflagfile.os')
@mock.patch('cmframework.server.cmprocessor.logging')
def test_delete_property_automatic_activation_disabled(self, mock_logging, mock_flagfile_os):
mock_backend = mock.MagicMock()
mock_backend.get_property = CMProcessorAutomaticActivationTest.backend_get_property
mock_validator = mock.MagicMock()
mock_activator = mock.MagicMock()
mock_changemonitor = mock.MagicMock()
mock_activationstate_handler = mock.MagicMock()
mock_snapshot_handler = mock.MagicMock()
mock_flagfile_os.path = mock.MagicMock()
mock_flagfile_os.path.exists = mock.MagicMock()
mock_flagfile_os.path.exists.return_value = True
processor = CMProcessor(mock_backend, mock_validator, mock_activator,
mock_changemonitor, mock_activationstate_handler,
mock_snapshot_handler)
processor.delete_property('foo')
mock_validator.validate_delete.assert_called_once_with(['foo'])
mock_backend.delete_property.assert_called_once_with('foo')
mock_activator.add_work.assert_not_called()
@mock.patch('cmframework.utils.cmflagfile.os')
@mock.patch('cmframework.server.cmprocessor.logging')
@mock.patch('cmframework.server.cmprocessor.cmactivationwork.CMActivationWork')
@mock.patch.object(CMProcessor, '_clear_reboot_requests')
@mock.patch.object(CMCSN, 'sync_node_csn')
def test_activate_node_automatic_activation_disabled(self,
mock_sync_node_csn,
mock_clear_reboot_requests,
mock_work,
mock_logging,
mock_flagfile_os):
mock_backend = mock.MagicMock()
mock_backend.get_property = CMProcessorAutomaticActivationTest.backend_get_property
mock_validator = mock.MagicMock()
mock_activator = mock.MagicMock()
mock_changemonitor = mock.MagicMock()
mock_activationstate_handler = mock.MagicMock()
mock_snapshot_handler = mock.MagicMock()
mock_work.return_value.get_result = mock.MagicMock()
mock_work.return_value.get_result.return_value = None
mock_flagfile_os.path = mock.MagicMock()
mock_flagfile_os.path.exists = mock.MagicMock()
mock_flagfile_os.path.exists.return_value = True
mock_work.OPER_NODE = mock.MagicMock()
processor = CMProcessor(mock_backend, mock_validator, mock_activator,
mock_changemonitor, mock_activationstate_handler,
mock_snapshot_handler)
self.assertEqual(processor.activate_node('node-b'), False)
mock_clear_reboot_requests.assert_not_called()
mock_work.assert_not_called()
mock_activator.add_work.assert_not_called()
mock_sync_node_csn.assert_not_called()
if __name__ == '__main__':
unittest.main()
| [
"mock.patch",
"mock.patch.object",
"cmframework.server.cmprocessor.CMProcessor",
"unittest.main",
"mock.MagicMock"
] | [((1125, 1170), 'mock.patch', 'mock.patch', (['"""cmframework.utils.cmflagfile.os"""'], {}), "('cmframework.utils.cmflagfile.os')\n", (1135, 1170), False, 'import mock\n'), ((1176, 1228), 'mock.patch', 'mock.patch', (['"""cmframework.server.cmprocessor.logging"""'], {}), "('cmframework.server.cmprocessor.logging')\n", (1186, 1228), False, 'import mock\n'), ((2330, 2375), 'mock.patch', 'mock.patch', (['"""cmframework.utils.cmflagfile.os"""'], {}), "('cmframework.utils.cmflagfile.os')\n", (2340, 2375), False, 'import mock\n'), ((2381, 2433), 'mock.patch', 'mock.patch', (['"""cmframework.server.cmprocessor.logging"""'], {}), "('cmframework.server.cmprocessor.logging')\n", (2391, 2433), False, 'import mock\n'), ((3520, 3565), 'mock.patch', 'mock.patch', (['"""cmframework.utils.cmflagfile.os"""'], {}), "('cmframework.utils.cmflagfile.os')\n", (3530, 3565), False, 'import mock\n'), ((3571, 3623), 'mock.patch', 'mock.patch', (['"""cmframework.server.cmprocessor.logging"""'], {}), "('cmframework.server.cmprocessor.logging')\n", (3581, 3623), False, 'import mock\n'), ((3629, 3707), 'mock.patch', 'mock.patch', (['"""cmframework.server.cmprocessor.cmactivationwork.CMActivationWork"""'], {}), "('cmframework.server.cmprocessor.cmactivationwork.CMActivationWork')\n", (3639, 3707), False, 'import mock\n'), ((3713, 3769), 'mock.patch.object', 'mock.patch.object', (['CMProcessor', '"""_clear_reboot_requests"""'], {}), "(CMProcessor, '_clear_reboot_requests')\n", (3730, 3769), False, 'import mock\n'), ((3775, 3816), 'mock.patch.object', 'mock.patch.object', (['CMCSN', '"""sync_node_csn"""'], {}), "(CMCSN, 'sync_node_csn')\n", (3792, 3816), False, 'import mock\n'), ((5469, 5484), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5482, 5484), False, 'import unittest\n'), ((1347, 1363), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1361, 1363), False, 'import mock\n'), ((1482, 1498), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1496, 1498), False, 'import mock\n'), ((1524, 1540), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1538, 1540), False, 'import mock\n'), ((1570, 1586), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1584, 1586), False, 'import mock\n'), ((1626, 1642), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1640, 1642), False, 'import mock\n'), ((1675, 1691), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1689, 1691), False, 'import mock\n'), ((1725, 1741), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1739, 1741), False, 'import mock\n'), ((1781, 1797), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1795, 1797), False, 'import mock\n'), ((1876, 2010), 'cmframework.server.cmprocessor.CMProcessor', 'CMProcessor', (['mock_backend', 'mock_validator', 'mock_activator', 'mock_changemonitor', 'mock_activationstate_handler', 'mock_snapshot_handler'], {}), '(mock_backend, mock_validator, mock_activator,\n mock_changemonitor, mock_activationstate_handler, mock_snapshot_handler)\n', (1887, 2010), False, 'from cmframework.server.cmprocessor import CMProcessor\n'), ((2555, 2571), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2569, 2571), False, 'import mock\n'), ((2690, 2706), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2704, 2706), False, 'import mock\n'), ((2732, 2748), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2746, 2748), False, 'import mock\n'), ((2778, 2794), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2792, 2794), False, 'import mock\n'), ((2834, 2850), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2848, 2850), False, 'import mock\n'), ((2883, 2899), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2897, 2899), False, 'import mock\n'), ((2933, 2949), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2947, 2949), False, 'import mock\n'), ((2989, 3005), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3003, 3005), False, 'import mock\n'), ((3084, 3218), 'cmframework.server.cmprocessor.CMProcessor', 'CMProcessor', (['mock_backend', 'mock_validator', 'mock_activator', 'mock_changemonitor', 'mock_activationstate_handler', 'mock_snapshot_handler'], {}), '(mock_backend, mock_validator, mock_activator,\n mock_changemonitor, mock_activationstate_handler, mock_snapshot_handler)\n', (3095, 3218), False, 'from cmframework.server.cmprocessor import CMProcessor\n'), ((4280, 4296), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4294, 4296), False, 'import mock\n'), ((4415, 4431), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4429, 4431), False, 'import mock\n'), ((4457, 4473), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4471, 4473), False, 'import mock\n'), ((4503, 4519), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4517, 4519), False, 'import mock\n'), ((4559, 4575), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4573, 4575), False, 'import mock\n'), ((4608, 4624), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4622, 4624), False, 'import mock\n'), ((4670, 4686), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4684, 4686), False, 'import mock\n'), ((4782, 4798), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4796, 4798), False, 'import mock\n'), ((4838, 4854), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4852, 4854), False, 'import mock\n'), ((4943, 4959), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4957, 4959), False, 'import mock\n'), ((4981, 5115), 'cmframework.server.cmprocessor.CMProcessor', 'CMProcessor', (['mock_backend', 'mock_validator', 'mock_activator', 'mock_changemonitor', 'mock_activationstate_handler', 'mock_snapshot_handler'], {}), '(mock_backend, mock_validator, mock_activator,\n mock_changemonitor, mock_activationstate_handler, mock_snapshot_handler)\n', (4992, 5115), False, 'from cmframework.server.cmprocessor import CMProcessor\n')] |
# GIL全局终端锁 global interpreter Lock(cpython)
# python中的一个线程对应c语言中的一个线程
# GIL是的python同一时间只能有一个线程运行的cpu上执行字节码
# gil会根据执行的字节码行数以及时间片切换线程(时间片的时间),无法将多个线程映射到多个CPU上,gil遇到IO操作的情况下主动释放
# 对io操作来说,多线程和多进程效率差不多
# 共享变量和Queue
# pipy去GIL化的库
# Gil会根据执行的字节码行数以及时间片释放GIL,遇到IO操作的时候回释放GIL
# GIL在python2,Python3中的区别
# Python和Cpython的区别
# +=============dis查看字节码===================
# import dis
#
#
# def add(a):
# a = a + 1
# return a
#
#
# print(dis.dis(add))
# +=============dis查看字节码===================
total = 0
def add():
global total
for i in range(1000000):
total += 1
def des():
global total
for i in range(1000000):
total -= 1
import threading
thread1 = threading.Thread(target=add)
thread2 = threading.Thread(target=des)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print(total)
| [
"threading.Thread"
] | [((690, 718), 'threading.Thread', 'threading.Thread', ([], {'target': 'add'}), '(target=add)\n', (706, 718), False, 'import threading\n'), ((729, 757), 'threading.Thread', 'threading.Thread', ([], {'target': 'des'}), '(target=des)\n', (745, 757), False, 'import threading\n')] |
import argparse
import json
import random
import cargan
###############################################################################
# Partition
###############################################################################
def dataset(name):
"""Partition datasets and save partitions to disk"""
# Handle vctk
if name == 'vctk':
return vctk()
# Get the data indices
length = len(cargan.data.Dataset(name))
indices = list(range(length))
# Deterministically shuffle indices
random.seed(cargan.RANDOM_SEED)
if name != 'musdb':
random.shuffle(indices)
# Daps is eval-only
if name == 'daps':
return {'train': [], 'valid': [], 'test': indices}
# Get split locations
left = int(cargan.SPLIT_SIZE_TRAIN * length)
right = left + int(cargan.SPLIT_SIZE_VALID * length)
# Shuffle musdb test set
test = indices[right:]
# Split into partitions
return {
'train': indices[:left],
'valid': indices[left:right],
'test': test}
def vctk():
"""Partition the vctk dataset"""
# Get list of speakers
dataset = cargan.data.Dataset('vctk')
speakers = dataset.speakers()
# Shuffle speakers
random.seed(cargan.RANDOM_SEED)
random.shuffle(speakers)
# Select test speakers
test_speakers = speakers[:8]
# Get test partition indices
test_indices = [
i for i in range(len(dataset))
if dataset.speaker(i) in test_speakers]
# Shuffle so adjacent indices aren't always same speaker
random.shuffle(test_indices)
# Get residual indices
indices = list(range(len(dataset)))
indices = [i for i in indices if i not in test_indices]
random.shuffle(indices)
# Split into train/valid
split = int(.95 * len(indices))
train_indices = indices[:split]
valid_indices = indices[split:]
return {
'train': train_indices,
'valid': valid_indices,
'test': test_indices}
###############################################################################
# Entry point
###############################################################################
def main(datasets, overwrite):
"""Partition datasets and save to disk"""
for name in datasets:
# Check if partition already exists
file = cargan.PARTITION_DIR / f'{name}.json'
if file.exists():
if not overwrite:
print(f'Not overwriting existing partition {file}')
continue
# Save to disk
with open(file, 'w') as file:
json.dump(dataset(name), file, ensure_ascii=False, indent=4)
def parse_args():
"""Parse command-line arguments"""
parser = argparse.ArgumentParser(description='Partition datasets')
parser.add_argument(
'--datasets',
nargs='+',
help='The datasets to partition')
parser.add_argument(
'--overwrite',
action='store_true',
help='Whether to overwrite existing partitions')
return parser.parse_args()
if __name__ == '__main__':
main(**vars(parse_args()))
| [
"argparse.ArgumentParser",
"random.shuffle",
"cargan.data.Dataset",
"random.seed"
] | [((527, 558), 'random.seed', 'random.seed', (['cargan.RANDOM_SEED'], {}), '(cargan.RANDOM_SEED)\n', (538, 558), False, 'import random\n'), ((1143, 1170), 'cargan.data.Dataset', 'cargan.data.Dataset', (['"""vctk"""'], {}), "('vctk')\n", (1162, 1170), False, 'import cargan\n'), ((1233, 1264), 'random.seed', 'random.seed', (['cargan.RANDOM_SEED'], {}), '(cargan.RANDOM_SEED)\n', (1244, 1264), False, 'import random\n'), ((1269, 1293), 'random.shuffle', 'random.shuffle', (['speakers'], {}), '(speakers)\n', (1283, 1293), False, 'import random\n'), ((1575, 1603), 'random.shuffle', 'random.shuffle', (['test_indices'], {}), '(test_indices)\n', (1589, 1603), False, 'import random\n'), ((1740, 1763), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (1754, 1763), False, 'import random\n'), ((2749, 2806), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Partition datasets"""'}), "(description='Partition datasets')\n", (2772, 2806), False, 'import argparse\n'), ((421, 446), 'cargan.data.Dataset', 'cargan.data.Dataset', (['name'], {}), '(name)\n', (440, 446), False, 'import cargan\n'), ((591, 614), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (605, 614), False, 'import random\n')] |
import unittest
from wtforms import Form
from goslinks.forms import SlugField
class SlugFieldTestCase(unittest.TestCase):
class F(Form):
slug = SlugField()
def test_slug_field(self):
form = self.F(slug="foo")
self.assertTrue(form.validate())
def test_slug_field_cleans_special_characters(self):
form = self.F(slug="foo!bar")
self.assertTrue(form.validate())
self.assertEqual(form.data["slug"], "foobar")
| [
"goslinks.forms.SlugField"
] | [((160, 171), 'goslinks.forms.SlugField', 'SlugField', ([], {}), '()\n', (169, 171), False, 'from goslinks.forms import SlugField\n')] |
## module getCCDphoto
''' CCD_save(getphoto(),'name').
Take a photo with charge-coupled device.
You should change the screen magnification first.
The photo will save as 'name.png' in folder like "2021-07-22 19'05'57".
laplacian(imgPath) and tenengrad(imgPath)
will return the clarity of photo
'''
import win32gui, win32ui, win32con
import os
import cv2
from datetime import datetime
from ctypes import windll
from PIL import Image
def getphoto():
global im
hwnd = win32gui.FindWindow(None, 'Blackfly S BFS-U3-51S5C 19370957')
# Prevent windows from being minimized
win32gui.SendMessage(hwnd, win32con.WM_SYSCOMMAND, win32con.SC_RESTORE, 0)
# Change the line below depending on whether you want the whole window
# or just the client area.
left, top, right, bot = win32gui.GetClientRect(hwnd)
#left, top, right, bot = win32gui.GetWindowRect(hwnd)
# Change screen magnification
mag = 1
w = int((right - left)*mag)
h = int((bot - top)*mag)
hwndDC = win32gui.GetWindowDC(hwnd)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
# Change the line below depending on whether you want the whole window
# or just the client area.
result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 1)
#result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 0)
#if result == 1: print('screenshot succeeded')
bmpinfo = saveBitMap.GetInfo()
bmpstr = saveBitMap.GetBitmapBits(True)
im = Image.frombuffer(
'RGB',
(bmpinfo['bmWidth'], bmpinfo['bmHeight']),
bmpstr, 'raw', 'BGRX', 0, 1)
win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(hwnd, hwndDC)
return result
def CCD_dir_save(result, name):
global im
if result == 1:
im.save(str(name) + ".png")
def CCD_save(result, imgPath):
global im
if result == 1:
im.save(imgPath)
def laplacian(imgPath):
image_ori = cv2.imread(imgPath)
#image = image_ori[83:1042,17:1163]
img2gray = cv2.cvtColor(image_ori, cv2.COLOR_BGR2GRAY)
imageVar = cv2.Laplacian(img2gray, cv2.CV_64F).var()
return imageVar
def tenengrad(imgPath):
image = cv2.imread(imgPath)
x = cv2.Sobel(image,cv2.CV_16S,1,0,ksize=3)
y = cv2.Sobel(image,cv2.CV_16S,0,1,ksize=3)
absX = cv2.convertScaleAbs(x)
absY = cv2.convertScaleAbs(y)
dst = cv2.addWeighted(absX,0.5,absY,0.5,0).var()
print(f'tenengrad:{dst:.1f}')
return dst
if __name__ == '__main__':
def main():
folder_name = str(datetime.now())[:19].replace(':', "'")
photo_name = 'test'
os.mkdir(folder_name)
imgPath = './'+ folder_name +'/'+ photo_name + '.png'
CCD_save(getphoto(), imgPath)
print('laplacian:', laplacian(imgPath), '\ntenengrad:', tenengrad(imgPath))
main()
| [
"cv2.Laplacian",
"win32gui.SendMessage",
"cv2.convertScaleAbs",
"win32gui.GetWindowDC",
"win32gui.FindWindow",
"win32gui.GetClientRect",
"win32ui.CreateDCFromHandle",
"cv2.addWeighted",
"datetime.datetime.now",
"os.mkdir",
"win32gui.ReleaseDC",
"cv2.cvtColor",
"PIL.Image.frombuffer",
"win32ui.CreateBitmap",
"cv2.imread",
"cv2.Sobel"
] | [((495, 556), 'win32gui.FindWindow', 'win32gui.FindWindow', (['None', '"""Blackfly S BFS-U3-51S5C 19370957"""'], {}), "(None, 'Blackfly S BFS-U3-51S5C 19370957')\n", (514, 556), False, 'import win32gui, win32ui, win32con\n'), ((604, 678), 'win32gui.SendMessage', 'win32gui.SendMessage', (['hwnd', 'win32con.WM_SYSCOMMAND', 'win32con.SC_RESTORE', '(0)'], {}), '(hwnd, win32con.WM_SYSCOMMAND, win32con.SC_RESTORE, 0)\n', (624, 678), False, 'import win32gui, win32ui, win32con\n'), ((819, 847), 'win32gui.GetClientRect', 'win32gui.GetClientRect', (['hwnd'], {}), '(hwnd)\n', (841, 847), False, 'import win32gui, win32ui, win32con\n'), ((1028, 1054), 'win32gui.GetWindowDC', 'win32gui.GetWindowDC', (['hwnd'], {}), '(hwnd)\n', (1048, 1054), False, 'import win32gui, win32ui, win32con\n'), ((1068, 1102), 'win32ui.CreateDCFromHandle', 'win32ui.CreateDCFromHandle', (['hwndDC'], {}), '(hwndDC)\n', (1094, 1102), False, 'import win32gui, win32ui, win32con\n'), ((1165, 1187), 'win32ui.CreateBitmap', 'win32ui.CreateBitmap', ([], {}), '()\n', (1185, 1187), False, 'import win32gui, win32ui, win32con\n'), ((1680, 1779), 'PIL.Image.frombuffer', 'Image.frombuffer', (['"""RGB"""', "(bmpinfo['bmWidth'], bmpinfo['bmHeight'])", 'bmpstr', '"""raw"""', '"""BGRX"""', '(0)', '(1)'], {}), "('RGB', (bmpinfo['bmWidth'], bmpinfo['bmHeight']), bmpstr,\n 'raw', 'BGRX', 0, 1)\n", (1696, 1779), False, 'from PIL import Image\n'), ((1903, 1935), 'win32gui.ReleaseDC', 'win32gui.ReleaseDC', (['hwnd', 'hwndDC'], {}), '(hwnd, hwndDC)\n', (1921, 1935), False, 'import win32gui, win32ui, win32con\n'), ((2202, 2221), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (2212, 2221), False, 'import cv2\n'), ((2277, 2320), 'cv2.cvtColor', 'cv2.cvtColor', (['image_ori', 'cv2.COLOR_BGR2GRAY'], {}), '(image_ori, cv2.COLOR_BGR2GRAY)\n', (2289, 2320), False, 'import cv2\n'), ((2435, 2454), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (2445, 2454), False, 'import cv2\n'), ((2463, 2506), 'cv2.Sobel', 'cv2.Sobel', (['image', 'cv2.CV_16S', '(1)', '(0)'], {'ksize': '(3)'}), '(image, cv2.CV_16S, 1, 0, ksize=3)\n', (2472, 2506), False, 'import cv2\n'), ((2512, 2555), 'cv2.Sobel', 'cv2.Sobel', (['image', 'cv2.CV_16S', '(0)', '(1)'], {'ksize': '(3)'}), '(image, cv2.CV_16S, 0, 1, ksize=3)\n', (2521, 2555), False, 'import cv2\n'), ((2563, 2585), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['x'], {}), '(x)\n', (2582, 2585), False, 'import cv2\n'), ((2597, 2619), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['y'], {}), '(y)\n', (2616, 2619), False, 'import cv2\n'), ((2876, 2897), 'os.mkdir', 'os.mkdir', (['folder_name'], {}), '(folder_name)\n', (2884, 2897), False, 'import os\n'), ((2336, 2371), 'cv2.Laplacian', 'cv2.Laplacian', (['img2gray', 'cv2.CV_64F'], {}), '(img2gray, cv2.CV_64F)\n', (2349, 2371), False, 'import cv2\n'), ((2630, 2670), 'cv2.addWeighted', 'cv2.addWeighted', (['absX', '(0.5)', 'absY', '(0.5)', '(0)'], {}), '(absX, 0.5, absY, 0.5, 0)\n', (2645, 2670), False, 'import cv2\n'), ((2801, 2815), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2813, 2815), False, 'from datetime import datetime\n')] |
import ntpath
import numpy as np
from numpy.lib.stride_tricks import as_strided
class Loader:
"""
This class handles .npz data that has an embedded matrix-like object.
Its methods deal with loading/unpacking Numpy .npz data objects,
and can also create a tiled data which is averaging values in a window
of the matrix (i.e. averaging values in a sub-matrix).
"""
def __init__(self, path):
self.path = path
self.data = None
self.tiled_data = None
def __repr__(self):
return f'{type(self).__name__}(path="{self.path}")'
def load_file(self):
"""
Loads the contents of a compressed .npz file
The expected input file only has the contents of a single file (i.e. data.files[] has only 1 element)
That single element is an array with a single embedded array-of-arrays:
[[[0,1][2,3]]]
:return: None
"""
archive = np.load(self.path)
self.data = archive[archive.files[0]][0]
def get_data(self):
"""
Get the data
:return: Numpy array of arrays
"""
return self.data
def get_identifier(self):
"""
Sets the identifier attribute
The input file name is expected to be in the format of [identifier]_[*]+.npz
For example: identifier_example.npz
:return: None
"""
return ntpath.basename(self.path).split('_')[0]
def get_range(self):
"""
Gets the minimum and maximum values from the data array-of-arrays
:return: Tuple, min and max values in the data
"""
return np.amin(self.data), np.amax(self.data)
def create_tiled_data(self):
"""
This method takes an input n x n numpy array-of-arrays (i.e. matrix) and creates a new matrix
that has the average or an i x i size slice matrix in a new matrix
For example:
input = [[0,1],[2,3]]
output with a window of 2x2 = [[1.5]]
:return: Numpy array of arrays
"""
data = self.data
tiled_data = [data]
while len(data) > 300:
dimension = data.shape[0]
if dimension % 2 != 0:
trimmed_data = data[0:len(data)-1, 0:len(data)-1]
data = trimmed_data
data = self.sum_sub_matrices(data, 2)
tiled_data.append(data)
self.tiled_data = tiled_data
return tiled_data
@staticmethod
def as_sub_matrices(x, rows, cols=None, writeable=False):
"""
Create sub-matrices from an input Numpy array-of-arrays (i.e. matrix)
It uses "rows" and "cols" to set the window size for getting the sub-matrices
:param x: Numpy array (matrix)
:param rows: Number; the size of the window in terms of rows
:param cols: Number; the size of the window in terms of columns
:param writeable: Boolean
:return: Numpy array of sub-matrices
"""
if cols is None:
cols = rows
x = np.asarray(x)
x_rows, x_cols = x.shape
s1, s2 = x.strides
if x_rows % rows != 0 or x_cols % cols != 0:
print(x_rows, rows, x_cols, cols)
raise ValueError('Invalid dimensions.')
out_shape = (x_rows // rows, x_cols // cols, rows, cols)
out_strides = (s1 * rows, s2 * cols, s1, s2)
return as_strided(x, out_shape, out_strides, writeable=writeable)
def sum_sub_matrices(self, x, rows, cols=None):
"""
Calculate the sum over a window in a matrix
:param x: Numpy array (matrix)
:param rows: Number; the size of the window in terms of rows
:param cols: Number; the size of the window in terms of columns
:return: Numpy array (matrix); same size as x
"""
if cols is None:
cols = rows
x = np.asarray(x)
x_sub = self.as_sub_matrices(x, rows, cols)
x_sum = np.mean(x_sub, axis=(2, 3))
return x_sum
| [
"numpy.mean",
"ntpath.basename",
"numpy.amin",
"numpy.asarray",
"numpy.lib.stride_tricks.as_strided",
"numpy.load",
"numpy.amax"
] | [((946, 964), 'numpy.load', 'np.load', (['self.path'], {}), '(self.path)\n', (953, 964), True, 'import numpy as np\n'), ((3054, 3067), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3064, 3067), True, 'import numpy as np\n'), ((3412, 3470), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['x', 'out_shape', 'out_strides'], {'writeable': 'writeable'}), '(x, out_shape, out_strides, writeable=writeable)\n', (3422, 3470), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((3896, 3909), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3906, 3909), True, 'import numpy as np\n'), ((3978, 4005), 'numpy.mean', 'np.mean', (['x_sub'], {'axis': '(2, 3)'}), '(x_sub, axis=(2, 3))\n', (3985, 4005), True, 'import numpy as np\n'), ((1645, 1663), 'numpy.amin', 'np.amin', (['self.data'], {}), '(self.data)\n', (1652, 1663), True, 'import numpy as np\n'), ((1665, 1683), 'numpy.amax', 'np.amax', (['self.data'], {}), '(self.data)\n', (1672, 1683), True, 'import numpy as np\n'), ((1409, 1435), 'ntpath.basename', 'ntpath.basename', (['self.path'], {}), '(self.path)\n', (1424, 1435), False, 'import ntpath\n')] |
from app import app
from app.framework.controller import *
from flask_login import login_required, current_user
from app.model.todolist import *
from app.framework.requests.request import request
from flask import jsonify, Response
import json
from sqlalchemy.ext.declarative import DeclarativeMeta
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
fields[field] = data
except TypeError:
fields[field] = None
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
class ToDoListController(Controller):
route_prefix = '/api'
def construct(cls):
ToDoListController.register(app)
@route('/create', methods=['POST'])
@login_required
def create(self):
todolist = ToDoList()
data = request.get_json()
todolist.title = data['title']
todolist.description = data['description']
todolist.progress = data['progress']
todolist.author = current_user.user_id
try:
todolist.save()
except:
return jsonify(message="Failure")
data = json.dumps(todolist, cls=AlchemyEncoder)
data = json.loads(data)
data.update({'message': 'Success'})
return Response(json.dumps(data), mimetype="application/json")
@route('/todolist/<user_id>')
def fetch(self, user_id):
todolist = ToDoList.query.filter_by(_creator=user_id).all()
data = json.dumps(todolist, cls=AlchemyEncoder)
return Response(data, mimetype='application/json')
@route('/done/<item_id>', methods=['POST'])
def done(self, item_id):
todolist = ToDoList.query.filter_by(id=item_id).first()
todolist.done = True
todolist.progress = False
todolist.save()
return jsonify(message="Success")
@route('/undone/<item_id>', methods=['POST'])
def undone(self, item_id):
todolist = ToDoList.query.filter_by(id=item_id).first()
todolist.done = False
todolist.progress = True
try:
todolist.save()
except:
return jsonify(message="Failure")
return jsonify(message="Success")
@route("/delete/<item_id>", methods=['POST'])
def delete(self, item_id):
todolist = ToDoList.query.filter_by(id=item_id).first()
try:
todolist.delete()
except:
return jsonify(message="Failure")
return jsonify(message="Success")
@route('item/<item_id>', methods=['GET'])
def item(self,item_id):
todolist = ToDoList.query.filter_by(id=item_).first()
@route('<item_id>', methods=['POST'])
def edit(self, item_id):
data = request.get_json()
todolist = ToDoList.query.filter_by(id=item_id).first()
todolist.title = data['title']
todolist.description = data['description']
try:
todolist.save()
except:
return jsonify(message="Failure")
return jsonify(message="Success") | [
"json.loads",
"json.JSONEncoder.default",
"json.dumps",
"app.framework.requests.request.request.get_json",
"flask.Response",
"flask.jsonify"
] | [((939, 974), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (963, 974), False, 'import json\n'), ((1239, 1257), 'app.framework.requests.request.request.get_json', 'request.get_json', ([], {}), '()\n', (1255, 1257), False, 'from app.framework.requests.request import request\n'), ((1558, 1598), 'json.dumps', 'json.dumps', (['todolist'], {'cls': 'AlchemyEncoder'}), '(todolist, cls=AlchemyEncoder)\n', (1568, 1598), False, 'import json\n'), ((1614, 1630), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1624, 1630), False, 'import json\n'), ((1899, 1939), 'json.dumps', 'json.dumps', (['todolist'], {'cls': 'AlchemyEncoder'}), '(todolist, cls=AlchemyEncoder)\n', (1909, 1939), False, 'import json\n'), ((1955, 1998), 'flask.Response', 'Response', (['data'], {'mimetype': '"""application/json"""'}), "(data, mimetype='application/json')\n", (1963, 1998), False, 'from flask import jsonify, Response\n'), ((2244, 2270), 'flask.jsonify', 'jsonify', ([], {'message': '"""Success"""'}), "(message='Success')\n", (2251, 2270), False, 'from flask import jsonify, Response\n'), ((2602, 2628), 'flask.jsonify', 'jsonify', ([], {'message': '"""Success"""'}), "(message='Success')\n", (2609, 2628), False, 'from flask import jsonify, Response\n'), ((2908, 2934), 'flask.jsonify', 'jsonify', ([], {'message': '"""Success"""'}), "(message='Success')\n", (2915, 2934), False, 'from flask import jsonify, Response\n'), ((3160, 3178), 'app.framework.requests.request.request.get_json', 'request.get_json', ([], {}), '()\n', (3176, 3178), False, 'from app.framework.requests.request import request\n'), ((3451, 3477), 'flask.jsonify', 'jsonify', ([], {'message': '"""Success"""'}), "(message='Success')\n", (3458, 3477), False, 'from flask import jsonify, Response\n'), ((1699, 1715), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1709, 1715), False, 'import json\n'), ((1516, 1542), 'flask.jsonify', 'jsonify', ([], {'message': '"""Failure"""'}), "(message='Failure')\n", (1523, 1542), False, 'from flask import jsonify, Response\n'), ((2560, 2586), 'flask.jsonify', 'jsonify', ([], {'message': '"""Failure"""'}), "(message='Failure')\n", (2567, 2586), False, 'from flask import jsonify, Response\n'), ((2866, 2892), 'flask.jsonify', 'jsonify', ([], {'message': '"""Failure"""'}), "(message='Failure')\n", (2873, 2892), False, 'from flask import jsonify, Response\n'), ((3409, 3435), 'flask.jsonify', 'jsonify', ([], {'message': '"""Failure"""'}), "(message='Failure')\n", (3416, 3435), False, 'from flask import jsonify, Response\n'), ((667, 683), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (677, 683), False, 'import json\n')] |
import os
import shutil
root = input("Enter the full path of directory you want to sort: \n")
# add more extension if needed
dic = {"doc": "Word File", "py": "Python File", "docx": "Word File", "exe": "EXE file", "zip": "Zip File",
"txt": "Text File", "html": "HTML File", "png": "Images", "jpg": "Images", "mp4": "Videos",
"pptx": "PPT file", "ppt": "PPT file", "cpp": "C++ File", "xlsx": "Excel File", "xlx": "Excel File",
"mp3": "Audio File", "c": "C File",
"pdf": "PDF File"}
flag = False
list_ = os.listdir(root)
for file_ in list_:
name, ext = os.path.splitext(file_)
ext = ext[1:] # store Extension
if ext == '': # if it is a directory then skip
continue
else:
if ext in dic.keys():
if os.path.exists(root + '/Sorted/' + dic[ext]): # if the extension name file already exists
shutil.move(root + '/' + file_,
root + '/Sorted/' + dic[ext] + '/' + file_) # then move the current file
else:
os.makedirs(root + '/Sorted/' + dic[ext]) # otherwise create the directory
shutil.move(root + '/' + file_, root + '/Sorted/' + dic[ext] + '/' + file_) # then move the file
else:
if not os.path.exists(root + '/Sorted/Extra File'):
os.makedirs(root + '/Sorted/Extra File') # otherwise create the directory named "Extra File"
shutil.move(root + '/' + file_, root + '/Sorted/Extra File/' + file_)
else:
if os.path.isfile(root + '/Sorted/Extra File/' + file_):
continue
shutil.move(root + '/' + file_, root + '/Sorted/Extra File/' + file_)
| [
"os.path.exists",
"os.listdir",
"os.makedirs",
"shutil.move",
"os.path.splitext",
"os.path.isfile"
] | [((543, 559), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (553, 559), False, 'import os\n'), ((598, 621), 'os.path.splitext', 'os.path.splitext', (['file_'], {}), '(file_)\n', (614, 621), False, 'import os\n'), ((791, 835), 'os.path.exists', 'os.path.exists', (["(root + '/Sorted/' + dic[ext])"], {}), "(root + '/Sorted/' + dic[ext])\n", (805, 835), False, 'import os\n'), ((899, 974), 'shutil.move', 'shutil.move', (["(root + '/' + file_)", "(root + '/Sorted/' + dic[ext] + '/' + file_)"], {}), "(root + '/' + file_, root + '/Sorted/' + dic[ext] + '/' + file_)\n", (910, 974), False, 'import shutil\n'), ((1070, 1111), 'os.makedirs', 'os.makedirs', (["(root + '/Sorted/' + dic[ext])"], {}), "(root + '/Sorted/' + dic[ext])\n", (1081, 1111), False, 'import os\n'), ((1163, 1238), 'shutil.move', 'shutil.move', (["(root + '/' + file_)", "(root + '/Sorted/' + dic[ext] + '/' + file_)"], {}), "(root + '/' + file_, root + '/Sorted/' + dic[ext] + '/' + file_)\n", (1174, 1238), False, 'import shutil\n'), ((1296, 1339), 'os.path.exists', 'os.path.exists', (["(root + '/Sorted/Extra File')"], {}), "(root + '/Sorted/Extra File')\n", (1310, 1339), False, 'import os\n'), ((1358, 1398), 'os.makedirs', 'os.makedirs', (["(root + '/Sorted/Extra File')"], {}), "(root + '/Sorted/Extra File')\n", (1369, 1398), False, 'import os\n'), ((1469, 1538), 'shutil.move', 'shutil.move', (["(root + '/' + file_)", "(root + '/Sorted/Extra File/' + file_)"], {}), "(root + '/' + file_, root + '/Sorted/Extra File/' + file_)\n", (1480, 1538), False, 'import shutil\n'), ((1596, 1648), 'os.path.isfile', 'os.path.isfile', (["(root + '/Sorted/Extra File/' + file_)"], {}), "(root + '/Sorted/Extra File/' + file_)\n", (1610, 1648), False, 'import os\n'), ((1697, 1766), 'shutil.move', 'shutil.move', (["(root + '/' + file_)", "(root + '/Sorted/Extra File/' + file_)"], {}), "(root + '/' + file_, root + '/Sorted/Extra File/' + file_)\n", (1708, 1766), False, 'import shutil\n')] |
# Generated by Django 2.0 on 2019-12-26 10:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gram', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='username',
field=models.CharField(default='UserName', max_length=50),
),
]
| [
"django.db.models.CharField"
] | [((321, 372), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""UserName"""', 'max_length': '(50)'}), "(default='UserName', max_length=50)\n", (337, 372), False, 'from django.db import migrations, models\n')] |
import fsspec
from pydantic import BaseModel
from .compression import COMPRESSORS
from .encryption import ENCRYPTORS
from .models import CommitRef, Tag, User
from .refs import Refs
from .remotes import Remote
from .serializers import SERIALIZERS
from .storage import (ContentAddressableStorage, FunctionStorage,
ObjectStorage, PydanticModelStorage,
SubfolderByKeyStorage, SubfolderStorage)
class Config(BaseModel):
user: User = None
main_branch: str = "master"
HEAD: str = main_branch
root_path: str = "file://./"
igit_path: str = ".igit"
tree_path: str = None
serialization: str = "msgpack-dill"
hash_func: str = "sha1"
compression: str = "noop"
encryption: str = "noop"
encryption_kwargs: dict = None
@classmethod
def from_path(cls, path):
with fsspec.open(path, "rb") as f:
cfg = cls.parse_raw(f.read())
return cfg
def get_compressor(self):
return COMPRESSORS[self.compression]
def get_encryptor(self):
kwargs = {}
if self.encryption_kwargs is not None:
kwargs.update(self.encryption_kwargs)
return ENCRYPTORS[self.encryption](**kwargs)
def get_serializer(self):
return SERIALIZERS[self.serialization]
def get_objects(self, store):
if isinstance(store, str):
store = fsspec.get_mapper(store)
store = SubfolderStorage(store, name='objects')
encryptor = self.get_encryptor()
store = FunctionStorage(
store,
encryptor.encrypt,
encryptor.decrypt,
)
compressor = self.get_compressor()
if compressor is not None:
store = FunctionStorage(store, compressor.compress,
compressor.decompress)
serializer = self.get_serializer()
if serializer is not None:
store = ObjectStorage(store, serializer=serializer)
store = SubfolderByKeyStorage(store)
store = ContentAddressableStorage(store)
return store
def get_index(self, store):
store = SubfolderStorage(store, name='index')
serializer = self.get_serializer()
return ObjectStorage(store, serializer=serializer)
def get_refs(self, store):
commits_store = PydanticModelStorage(store, CommitRef)
heads = SubfolderStorage(commits_store, "heads")
tags_store = PydanticModelStorage(store, Tag)
tags = SubfolderStorage(tags_store, "tags")
remotes_store = PydanticModelStorage(store, Remote)
remotes = SubfolderStorage(remotes_store, "remotes")
return Refs(heads, tags, remotes)
| [
"fsspec.get_mapper",
"fsspec.open"
] | [((856, 879), 'fsspec.open', 'fsspec.open', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (867, 879), False, 'import fsspec\n'), ((1391, 1415), 'fsspec.get_mapper', 'fsspec.get_mapper', (['store'], {}), '(store)\n', (1408, 1415), False, 'import fsspec\n')] |
import json
import os
import firebase_admin
from dotenv import load_dotenv
from firebase_admin import credentials, firestore
load_dotenv()
if not firebase_admin._apps:
CREDENTIALS = credentials.Certificate({
'type': 'service_account',
'token_uri': 'https://oauth2.googleapis.com/token',
'project_id': os.environ['FIREBASE_PROJECT_ID'],
'client_email': os.environ['FIREBASE_CLIENT_EMAIL'],
'private_key': os.environ['FIREBASE_PRIVATE_KEY'].replace('\\n', '\n')
})
firebase_admin.initialize_app(CREDENTIALS,{'databaseURL': 'https://'+os.environ['FIREBASE_PROJECT_ID']+'.firebaseio.com'})
db = firestore.client()
def main():
itemNameList = json.load(open('json/ship.json', 'r', encoding="utf-8_sig"))["pagePosition"]
for channel in itemNameList:
docs = db.collection('shipPost').where('channel', '==', channel).get()
count = len(docs)
print(channel, count)
db.collection('count').document(channel).update({'count': count, 'update': firestore.SERVER_TIMESTAMP})
if __name__ == "__main__":
main() | [
"firebase_admin.firestore.client",
"firebase_admin.initialize_app",
"dotenv.load_dotenv"
] | [((127, 140), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (138, 140), False, 'from dotenv import load_dotenv\n'), ((626, 644), 'firebase_admin.firestore.client', 'firestore.client', ([], {}), '()\n', (642, 644), False, 'from firebase_admin import credentials, firestore\n'), ((498, 630), 'firebase_admin.initialize_app', 'firebase_admin.initialize_app', (['CREDENTIALS', "{'databaseURL': 'https://' + os.environ['FIREBASE_PROJECT_ID'] +\n '.firebaseio.com'}"], {}), "(CREDENTIALS, {'databaseURL': 'https://' + os.\n environ['FIREBASE_PROJECT_ID'] + '.firebaseio.com'})\n", (527, 630), False, 'import firebase_admin\n')] |
#!/usr/bin/env python3.7
import numpy as np
import os
import time
import dask
import dask.array as da
from distributed import Client
from dask_jobqueue import SLURMCluster
from ipflag import compute_uv_bins, gridflag, listobs_daskms, annulus_stats, plotgrid
from config_parser import validate_args as va
import logging
import bookkeeping
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)-15s %(levelname)s: %(message)s", level=logging.INFO)
def map_field_name_id(fieldname, msfile):
"""Convert field name to field ID.
Parameters
----------
fieldname : string
Returns
-------
fieldid : int
"""
lo = listobs_daskms.ListObs(msfile, 'DATA')
field_info = lo.get_fields()
fnames = {ifield['Name']:ifield['ID'] for ifield in field_info}
try:
fid = fnames[fieldname]
except:
raise ValueError(f"The field name value of {fieldname} is not a valid field in this dataset.")
return fid
def run_gridflag(visname, fields):
stokes = 'Q'
n_workers = 1
username = os.environ["USER"]
logger.info('GridFlag: Setting up Dask Cluster with {0} workers.'.format(n_workers))
# Cluster configuration should be moved to ~/.config/dask/jobqueue.yaml
cluster = SLURMCluster(
queue="Main",
cores=16,
processes=1,
n_workers=n_workers,
memory="114GB",
interface="ens3",
shebang='#!/usr/bin/env bash',
walltime="02:00:00",
local_directory="temp/",
death_timeout="1m",
log_directory="logs/",
project="b09-mightee-ag",
python="singularity exec /idia/software/containers/gridflag_tools.simg /usr/bin/python3"
)
cluster.scale(jobs=n_workers)
time.sleep(30)
client = Client(cluster)
start = time.time()
fieldname = fields.targetfield
fieldid = map_field_name_id(fieldname, visname)
logger.info('Reading measurement set in dask-ms: {0}.'.format(visname))
ds_ind, uvbins = compute_uv_bins.load_ms_file(visname,
fieldid=fieldid,
bin_count_factor=1.0,
chunksize=2*10**7)
logger.info('Flagging field {0} ({1}).'.format(fieldid, fieldname, visname))
# Check existing flags in MS
# check_exising_flags(ds_ind, stokes=stokes, client=client)
flag_list, median_grid, median_grid_flg = gridflag.compute_ipflag_grid(ds_ind,
uvbins,
stokes = stokes,
sigma=3.0,
partition_level=3,
stokes=stokes,
client=client)
flag_vis_percentage = 100*len(flag_list)/len(ds_ind.DATA)
print("Percentage of rows flagged: {:.1f}% - {}/{} visibilities".format(flag_vis_percentage, len(flag_list), len(ds_ind.DATA)))
logger.info("Percentage of rows flagged {0:.2f} % conprising {1}/{2} visibilities.".format(flag_vis_percentage, len(flag_list), len(ds_ind.DATA)))
# Save UV-grid median plot
annulus_width = annulus_stats.compute_annulus_bins(median_grid, uvbins, 10)
plotgrid.plot_uv_grid(median_grid, uvbins, annulus_width, filename="uv_grid_unflagged.png")
plotgrid.plot_uv_grid(median_grid_flg, uvbins, annulus_width, filename="uv_grid_flagged.png")
compute_uv_bins.write_ms_file(visname, ds_ind, flag_list, fieldid, stokes=stokes, overwrite=True)
end = time.time()
print("GridFlag runtime: {} seconds.".format(end-start))
logger.info("Flagging completed. Runtime: {0} seconds.".format((end-start)))
client.close()
cluster.close()
def main(args, taskvals):
visname = va(taskvals, 'data', 'vis', str)
fields = bookkeeping.get_field_ids(taskvals['fields'])
run_gridflag(visname, fields)
if __name__ == '__main__':
bookkeeping.run_script(main)
| [
"logging.getLogger",
"logging.basicConfig",
"ipflag.compute_uv_bins.write_ms_file",
"ipflag.compute_uv_bins.load_ms_file",
"ipflag.gridflag.compute_ipflag_grid",
"config_parser.validate_args",
"bookkeeping.run_script",
"bookkeeping.get_field_ids",
"time.sleep",
"ipflag.listobs_daskms.ListObs",
"ipflag.annulus_stats.compute_annulus_bins",
"ipflag.plotgrid.plot_uv_grid",
"distributed.Client",
"time.time",
"dask_jobqueue.SLURMCluster"
] | [((354, 381), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (371, 381), False, 'import logging\n'), ((382, 477), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)-15s %(levelname)s: %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)-15s %(levelname)s: %(message)s',\n level=logging.INFO)\n", (401, 477), False, 'import logging\n'), ((684, 722), 'ipflag.listobs_daskms.ListObs', 'listobs_daskms.ListObs', (['msfile', '"""DATA"""'], {}), "(msfile, 'DATA')\n", (706, 722), False, 'from ipflag import compute_uv_bins, gridflag, listobs_daskms, annulus_stats, plotgrid\n'), ((1293, 1655), 'dask_jobqueue.SLURMCluster', 'SLURMCluster', ([], {'queue': '"""Main"""', 'cores': '(16)', 'processes': '(1)', 'n_workers': 'n_workers', 'memory': '"""114GB"""', 'interface': '"""ens3"""', 'shebang': '"""#!/usr/bin/env bash"""', 'walltime': '"""02:00:00"""', 'local_directory': '"""temp/"""', 'death_timeout': '"""1m"""', 'log_directory': '"""logs/"""', 'project': '"""b09-mightee-ag"""', 'python': '"""singularity exec /idia/software/containers/gridflag_tools.simg /usr/bin/python3"""'}), "(queue='Main', cores=16, processes=1, n_workers=n_workers,\n memory='114GB', interface='ens3', shebang='#!/usr/bin/env bash',\n walltime='02:00:00', local_directory='temp/', death_timeout='1m',\n log_directory='logs/', project='b09-mightee-ag', python=\n 'singularity exec /idia/software/containers/gridflag_tools.simg /usr/bin/python3'\n )\n", (1305, 1655), False, 'from dask_jobqueue import SLURMCluster\n'), ((1784, 1798), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (1794, 1798), False, 'import time\n'), ((1813, 1828), 'distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (1819, 1828), False, 'from distributed import Client\n'), ((1842, 1853), 'time.time', 'time.time', ([], {}), '()\n', (1851, 1853), False, 'import time\n'), ((2041, 2144), 'ipflag.compute_uv_bins.load_ms_file', 'compute_uv_bins.load_ms_file', (['visname'], {'fieldid': 'fieldid', 'bin_count_factor': '(1.0)', 'chunksize': '(2 * 10 ** 7)'}), '(visname, fieldid=fieldid, bin_count_factor=1.0,\n chunksize=2 * 10 ** 7)\n', (2069, 2144), False, 'from ipflag import compute_uv_bins, gridflag, listobs_daskms, annulus_stats, plotgrid\n'), ((2516, 2639), 'ipflag.gridflag.compute_ipflag_grid', 'gridflag.compute_ipflag_grid', (['ds_ind', 'uvbins'], {'stokes': 'stokes', 'sigma': '(3.0)', 'partition_level': '(3)', 'client': 'client'}), '(ds_ind, uvbins, stokes=stokes, sigma=3.0,\n partition_level=3, stokes=stokes, client=client)\n', (2544, 2639), False, 'from ipflag import compute_uv_bins, gridflag, listobs_daskms, annulus_stats, plotgrid\n'), ((3492, 3551), 'ipflag.annulus_stats.compute_annulus_bins', 'annulus_stats.compute_annulus_bins', (['median_grid', 'uvbins', '(10)'], {}), '(median_grid, uvbins, 10)\n', (3526, 3551), False, 'from ipflag import compute_uv_bins, gridflag, listobs_daskms, annulus_stats, plotgrid\n'), ((3556, 3652), 'ipflag.plotgrid.plot_uv_grid', 'plotgrid.plot_uv_grid', (['median_grid', 'uvbins', 'annulus_width'], {'filename': '"""uv_grid_unflagged.png"""'}), "(median_grid, uvbins, annulus_width, filename=\n 'uv_grid_unflagged.png')\n", (3577, 3652), False, 'from ipflag import compute_uv_bins, gridflag, listobs_daskms, annulus_stats, plotgrid\n'), ((3652, 3750), 'ipflag.plotgrid.plot_uv_grid', 'plotgrid.plot_uv_grid', (['median_grid_flg', 'uvbins', 'annulus_width'], {'filename': '"""uv_grid_flagged.png"""'}), "(median_grid_flg, uvbins, annulus_width, filename=\n 'uv_grid_flagged.png')\n", (3673, 3750), False, 'from ipflag import compute_uv_bins, gridflag, listobs_daskms, annulus_stats, plotgrid\n'), ((3751, 3853), 'ipflag.compute_uv_bins.write_ms_file', 'compute_uv_bins.write_ms_file', (['visname', 'ds_ind', 'flag_list', 'fieldid'], {'stokes': 'stokes', 'overwrite': '(True)'}), '(visname, ds_ind, flag_list, fieldid, stokes=\n stokes, overwrite=True)\n', (3780, 3853), False, 'from ipflag import compute_uv_bins, gridflag, listobs_daskms, annulus_stats, plotgrid\n'), ((3860, 3871), 'time.time', 'time.time', ([], {}), '()\n', (3869, 3871), False, 'import time\n'), ((4098, 4130), 'config_parser.validate_args', 'va', (['taskvals', '"""data"""', '"""vis"""', 'str'], {}), "(taskvals, 'data', 'vis', str)\n", (4100, 4130), True, 'from config_parser import validate_args as va\n'), ((4149, 4194), 'bookkeeping.get_field_ids', 'bookkeeping.get_field_ids', (["taskvals['fields']"], {}), "(taskvals['fields'])\n", (4174, 4194), False, 'import bookkeeping\n'), ((4265, 4293), 'bookkeeping.run_script', 'bookkeeping.run_script', (['main'], {}), '(main)\n', (4287, 4293), False, 'import bookkeeping\n')] |
#!/usr/bin/python3
import sys
import os
import argparse
import numpy as np
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('inputFile', type=str, nargs=1, help='name of the input file')
parser.add_argument('outputFile',type=str, nargs=1, help='name of the output file')
# option
parser.add_argument('--opt_name',type=str,dest="destination",default="default name",help='')
args = parser.parse_args()
inputFilename=args.inputFile[0]
if __name__== "__main__":
main()
| [
"argparse.ArgumentParser"
] | [((101, 140), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (124, 140), False, 'import argparse\n')] |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.html import strip_tags
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext, ugettext_lazy as _
from filebrowser.fields import FileBrowseField
from django.core.mail import send_mail
# Create your models here.
class MetaData(models.Model):
"""
Abstract model that provides meta data for content.
"""
_meta_title = models.CharField(_(u"Tytuł sekcji meta"), null=True, blank=True,
max_length=500, help_text=_(u"Opcjonalny HTML title tag."))
meta_description = models.TextField(_(u"Opis sekcji meta"), blank=True)
keywords = models.CharField(max_length=500, blank=True, null=True)
class Meta:
abstract = True
def save(self, *args, **kwargs):
"""
Set the description field on save.
"""
self.meta_description = strip_tags(self.meta_description)
super(MetaData, self).save(*args, **kwargs)
def meta_title(self):
"""
Accessor for the optional ``_meta_title`` field, which returns
the string version of the instance if not provided.
"""
return self._meta_title or str(self)
class Segment(models.Model):
title = models.CharField(max_length=254)
def __unicode__(self):
return self.title
#class Page(models.Model):
class Category(MetaData):
slug = models.SlugField(max_length=50)
title = models.CharField(_(u'Tytuł'), default='', max_length=254, blank=False, null=False)
items_title = models.CharField(_(u'Tytuł Katalogu'), default='', max_length=254, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
slogan = models.CharField(max_length=255, default='', blank=True)
description = models.TextField(_(u'Opis'), blank=True, null=True)
picture = models.ImageField(_(u'Kategoria'), default='pics/default.png', upload_to='pics/',
blank=False, null=False)
header = models.CharField(_(u'Klasa obrazka'), max_length=50, default='sec1', blank=True, null=True)
order = models.IntegerField(default=0, blank=False, null=False)
parent_category = models.ForeignKey('self', blank=True, null=True, related_name="subcategories")
segment = models.ForeignKey(Segment, default='1', blank=False, null=False)
#products = models.ManyToManyField(Product, blank=True, null=True)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse("adoffice:detail", kwargs={"slug": self.slug})
class PageClass(models.Model):
name = models.CharField(max_length=255, default='homepage', blank=False, null=False)
def __unicode__(self):
return self.name
class Page(MetaData):
section1 = models.TextField(u'Sekcja-1', blank=True, null=True)
section2 = models.TextField(u'Sekcja-2', blank=True, null=True)
title = models.CharField(max_length=254, default='')
pageclass = models.ForeignKey(PageClass, default='1', blank=False, null=False)
def __unicode__(self):
return self.title
#subcategories = Category.objects.filter(parent_category__id=target_category.id)
class GroupCategory(models.Model):
title = models.CharField(max_length=254)
def __unicode__(self):
return self.title
class Finishing(models.Model):
title = models.CharField(max_length=254)
def __unicode__(self):
return self.title
class Accessories(models.Model):
title = models.CharField(max_length=254)
def __unicode__(self):
return self.title
class Product(MetaData):
_category = ''
title = models.CharField(max_length=254)
slug = models.SlugField(max_length=150, unique=True)
description = models.TextField(_(u'Opis'), blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
format_size = models.CharField(max_length=25, blank=True)
category = models.ManyToManyField(Category, blank=True, null=True, name='categories')
group_category = models.ManyToManyField(GroupCategory, blank=True, null=True)
finishing = models.ManyToManyField(Finishing, blank=True, null=True)
accessories = models.ManyToManyField(Accessories, blank=True, null=True)
picture = models.ImageField(_(u'Obrazek'), default='pics/default.png', upload_to='pics/',
blank=False, null=False)
document = FileBrowseField("PDF", max_length=200, directory="documents/", extensions=[".pdf"], blank=True, null=True)
def __unicode__(self):
return self.title
class Meta:
unique_together = ('title', 'slug')
def get_absolute_url(self):
return reverse("adoffice:product", kwargs={"slug": self.slug, "category": self.categories.all()[0].slug})
def get_next(self):
self._category = self.categories.all()[0]
next = Product.objects.filter(id__gt=self.id, categories__in=[self._category], active=True).order_by('id')
if next:
return next[0]
return False
def get_prev(self):
self._category = self.categories.all()[0]
prev = Product.objects.filter(id__lt=self.id, categories__in=[self._category], active=True).order_by('-id')
if prev:
return prev[0]
return False
def get_category(self):
self._category = self.categories.all()[0]
return self._category
class ProductImage(models.Model):
product = models.ForeignKey(Product)
picture = models.ImageField(_(u'Obrazek'), default='pics/default.png', upload_to='pics/',
blank=False, null=False)
featured = models.BooleanField(default=False)
thumbnail = models.BooleanField(default=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return self.product.title
class Quota(models.Model):
email = models.EmailField()
ref = models.CharField(max_length=254, default='')
full_name = models.CharField(_(u'Imię i Nazwisko'), max_length=254, default='', blank=False, null=False)
company = models.CharField(_(u'Firma'), max_length=254, default='', blank=True, null=True)
phone = models.CharField(_(u'Telefon'), max_length=254, default='')
note = models.TextField(_(u'Wiadomość'), default='', blank=True, null=True)
newsletter = models.BooleanField(_(u'Newsletter'), default=True, help_text=_(u'Chcę być informowany o nowościach i ofertach firmy Achilles.'))
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
def __unicode__(self):
return self.email
def save(self):
if self.email:
message = _("Zapytanie strona www ").format(self)
subject = _(u"Dziękujemy za kontakt")
#send_mail(subject, message, "<EMAIL>", [self.email, ])
#print('Stored: {}'.format(self))
super(Quota, self).save()
| [
"django.db.models.EmailField",
"django.utils.translation.ugettext_lazy",
"django.utils.html.strip_tags",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"filebrowser.fields.FileBrowseField",
"django.db.models.ManyToManyField",
"django.core.urlresolvers.reverse",
"django.db.models.BooleanField",
"django.db.models.SlugField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((692, 747), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'blank': '(True)', 'null': '(True)'}), '(max_length=500, blank=True, null=True)\n', (708, 747), False, 'from django.db import models\n'), ((1282, 1314), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (1298, 1314), False, 'from django.db import models\n'), ((1437, 1468), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1453, 1468), False, 'from django.db import models\n'), ((1688, 1727), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1708, 1727), False, 'from django.db import models\n'), ((1741, 1774), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1760, 1774), False, 'from django.db import models\n'), ((1788, 1844), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""', 'blank': '(True)'}), "(max_length=255, default='', blank=True)\n", (1804, 1844), False, 'from django.db import models\n'), ((2185, 2240), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'blank': '(False)', 'null': '(False)'}), '(default=0, blank=False, null=False)\n', (2204, 2240), False, 'from django.db import models\n'), ((2263, 2341), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""'], {'blank': '(True)', 'null': '(True)', 'related_name': '"""subcategories"""'}), "('self', blank=True, null=True, related_name='subcategories')\n", (2280, 2341), False, 'from django.db import models\n'), ((2356, 2420), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Segment'], {'default': '"""1"""', 'blank': '(False)', 'null': '(False)'}), "(Segment, default='1', blank=False, null=False)\n", (2373, 2420), False, 'from django.db import models\n'), ((2694, 2771), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '"""homepage"""', 'blank': '(False)', 'null': '(False)'}), "(max_length=255, default='homepage', blank=False, null=False)\n", (2710, 2771), False, 'from django.db import models\n'), ((2864, 2916), 'django.db.models.TextField', 'models.TextField', (['u"""Sekcja-1"""'], {'blank': '(True)', 'null': '(True)'}), "(u'Sekcja-1', blank=True, null=True)\n", (2880, 2916), False, 'from django.db import models\n'), ((2932, 2984), 'django.db.models.TextField', 'models.TextField', (['u"""Sekcja-2"""'], {'blank': '(True)', 'null': '(True)'}), "(u'Sekcja-2', blank=True, null=True)\n", (2948, 2984), False, 'from django.db import models\n'), ((2997, 3041), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'default': '""""""'}), "(max_length=254, default='')\n", (3013, 3041), False, 'from django.db import models\n'), ((3058, 3124), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PageClass'], {'default': '"""1"""', 'blank': '(False)', 'null': '(False)'}), "(PageClass, default='1', blank=False, null=False)\n", (3075, 3124), False, 'from django.db import models\n'), ((3311, 3343), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (3327, 3343), False, 'from django.db import models\n'), ((3443, 3475), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (3459, 3475), False, 'from django.db import models\n'), ((3577, 3609), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (3593, 3609), False, 'from django.db import models\n'), ((3722, 3754), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (3738, 3754), False, 'from django.db import models\n'), ((3766, 3811), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(150)', 'unique': '(True)'}), '(max_length=150, unique=True)\n', (3782, 3811), False, 'from django.db import models\n'), ((3898, 3937), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3918, 3937), False, 'from django.db import models\n'), ((3951, 3984), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3970, 3984), False, 'from django.db import models\n'), ((4003, 4046), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)', 'blank': '(True)'}), '(max_length=25, blank=True)\n', (4019, 4046), False, 'from django.db import models\n'), ((4062, 4136), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Category'], {'blank': '(True)', 'null': '(True)', 'name': '"""categories"""'}), "(Category, blank=True, null=True, name='categories')\n", (4084, 4136), False, 'from django.db import models\n'), ((4158, 4218), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['GroupCategory'], {'blank': '(True)', 'null': '(True)'}), '(GroupCategory, blank=True, null=True)\n', (4180, 4218), False, 'from django.db import models\n'), ((4235, 4291), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Finishing'], {'blank': '(True)', 'null': '(True)'}), '(Finishing, blank=True, null=True)\n', (4257, 4291), False, 'from django.db import models\n'), ((4310, 4368), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Accessories'], {'blank': '(True)', 'null': '(True)'}), '(Accessories, blank=True, null=True)\n', (4332, 4368), False, 'from django.db import models\n'), ((4535, 4646), 'filebrowser.fields.FileBrowseField', 'FileBrowseField', (['"""PDF"""'], {'max_length': '(200)', 'directory': '"""documents/"""', 'extensions': "['.pdf']", 'blank': '(True)', 'null': '(True)'}), "('PDF', max_length=200, directory='documents/', extensions=[\n '.pdf'], blank=True, null=True)\n", (4550, 4646), False, 'from filebrowser.fields import FileBrowseField\n'), ((5594, 5620), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Product'], {}), '(Product)\n', (5611, 5620), False, 'from django.db import models\n'), ((5787, 5821), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (5806, 5821), False, 'from django.db import models\n'), ((5838, 5872), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (5857, 5872), False, 'from django.db import models\n'), ((5887, 5942), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(False)', 'auto_now': '(True)'}), '(auto_now_add=False, auto_now=True)\n', (5907, 5942), False, 'from django.db import models\n'), ((6046, 6065), 'django.db.models.EmailField', 'models.EmailField', ([], {}), '()\n', (6063, 6065), False, 'from django.db import models\n'), ((6076, 6120), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)', 'default': '""""""'}), "(max_length=254, default='')\n", (6092, 6120), False, 'from django.db import models\n'), ((6640, 6695), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'auto_now': '(False)'}), '(auto_now_add=True, auto_now=False)\n', (6660, 6695), False, 'from django.db import models\n'), ((458, 481), 'django.utils.translation.ugettext_lazy', '_', (['u"""Tytuł sekcji meta"""'], {}), "(u'Tytuł sekcji meta')\n", (459, 481), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((641, 663), 'django.utils.translation.ugettext_lazy', '_', (['u"""Opis sekcji meta"""'], {}), "(u'Opis sekcji meta')\n", (642, 663), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((926, 959), 'django.utils.html.strip_tags', 'strip_tags', (['self.meta_description'], {}), '(self.meta_description)\n', (936, 959), False, 'from django.utils.html import strip_tags\n'), ((1498, 1509), 'django.utils.translation.ugettext_lazy', '_', (['u"""Tytuł"""'], {}), "(u'Tytuł')\n", (1499, 1509), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((1599, 1619), 'django.utils.translation.ugettext_lazy', '_', (['u"""Tytuł Katalogu"""'], {}), "(u'Tytuł Katalogu')\n", (1600, 1619), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((1880, 1890), 'django.utils.translation.ugettext_lazy', '_', (['u"""Opis"""'], {}), "(u'Opis')\n", (1881, 1890), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((1947, 1962), 'django.utils.translation.ugettext_lazy', '_', (['u"""Kategoria"""'], {}), "(u'Kategoria')\n", (1948, 1962), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((2098, 2117), 'django.utils.translation.ugettext_lazy', '_', (['u"""Klasa obrazka"""'], {}), "(u'Klasa obrazka')\n", (2099, 2117), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((2595, 2649), 'django.core.urlresolvers.reverse', 'reverse', (['"""adoffice:detail"""'], {'kwargs': "{'slug': self.slug}"}), "('adoffice:detail', kwargs={'slug': self.slug})\n", (2602, 2649), False, 'from django.core.urlresolvers import reverse\n'), ((3847, 3857), 'django.utils.translation.ugettext_lazy', '_', (['u"""Opis"""'], {}), "(u'Opis')\n", (3848, 3857), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((4401, 4414), 'django.utils.translation.ugettext_lazy', '_', (['u"""Obrazek"""'], {}), "(u'Obrazek')\n", (4402, 4414), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((5653, 5666), 'django.utils.translation.ugettext_lazy', '_', (['u"""Obrazek"""'], {}), "(u'Obrazek')\n", (5654, 5666), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((6154, 6175), 'django.utils.translation.ugettext_lazy', '_', (['u"""Imię i Nazwisko"""'], {}), "(u'Imię i Nazwisko')\n", (6155, 6175), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((6261, 6272), 'django.utils.translation.ugettext_lazy', '_', (['u"""Firma"""'], {}), "(u'Firma')\n", (6262, 6272), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((6354, 6367), 'django.utils.translation.ugettext_lazy', '_', (['u"""Telefon"""'], {}), "(u'Telefon')\n", (6355, 6367), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((6425, 6440), 'django.utils.translation.ugettext_lazy', '_', (['u"""Wiadomość"""'], {}), "(u'Wiadomość')\n", (6426, 6440), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((6514, 6530), 'django.utils.translation.ugettext_lazy', '_', (['u"""Newsletter"""'], {}), "(u'Newsletter')\n", (6515, 6530), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((567, 599), 'django.utils.translation.ugettext_lazy', '_', (['u"""Opcjonalny HTML title tag."""'], {}), "(u'Opcjonalny HTML title tag.')\n", (568, 599), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((6556, 6622), 'django.utils.translation.ugettext_lazy', '_', (['u"""Chcę być informowany o nowościach i ofertach firmy Achilles."""'], {}), "(u'Chcę być informowany o nowościach i ofertach firmy Achilles.')\n", (6557, 6622), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((6878, 6905), 'django.utils.translation.ugettext_lazy', '_', (['u"""Dziękujemy za kontakt"""'], {}), "(u'Dziękujemy za kontakt')\n", (6879, 6905), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n'), ((6816, 6842), 'django.utils.translation.ugettext_lazy', '_', (['"""Zapytanie strona www """'], {}), "('Zapytanie strona www ')\n", (6817, 6842), True, 'from django.utils.translation import ugettext, ugettext_lazy as _\n')] |
from src.model.luckyMoneyManagement import newLuckyMoney, deleteLuckyMoney, takeLuckyMoney, getLuckyMoney, editWhoTake
from src.model.makeDatabaseConnection import makeDatabaseConnection
def test_():
db = makeDatabaseConnection()
senderID = 12345
msgID = 34567
money = 10000
quantity = 5
luckMoneyInfo: tuple = getLuckyMoney(db, msgID)
if luckMoneyInfo is not None:
deleteLuckyMoney(db, luckMoneyInfo[0])
uuid = newLuckyMoney(db, senderID, msgID, quantity, money)
assert uuid != ''
assert takeLuckyMoney(db, msgID, 200) is True
assert editWhoTake(db, msgID, '12345')
luckMoneyInfo: tuple = getLuckyMoney(db, msgID)
assert luckMoneyInfo[2] == money - 200
assert luckMoneyInfo[3] == quantity - 1
assert luckMoneyInfo[5] == '12345'
assert deleteLuckyMoney(db, uuid) is True
db.close()
| [
"src.model.luckyMoneyManagement.getLuckyMoney",
"src.model.luckyMoneyManagement.deleteLuckyMoney",
"src.model.makeDatabaseConnection.makeDatabaseConnection",
"src.model.luckyMoneyManagement.newLuckyMoney",
"src.model.luckyMoneyManagement.editWhoTake",
"src.model.luckyMoneyManagement.takeLuckyMoney"
] | [((210, 234), 'src.model.makeDatabaseConnection.makeDatabaseConnection', 'makeDatabaseConnection', ([], {}), '()\n', (232, 234), False, 'from src.model.makeDatabaseConnection import makeDatabaseConnection\n'), ((336, 360), 'src.model.luckyMoneyManagement.getLuckyMoney', 'getLuckyMoney', (['db', 'msgID'], {}), '(db, msgID)\n', (349, 360), False, 'from src.model.luckyMoneyManagement import newLuckyMoney, deleteLuckyMoney, takeLuckyMoney, getLuckyMoney, editWhoTake\n'), ((453, 504), 'src.model.luckyMoneyManagement.newLuckyMoney', 'newLuckyMoney', (['db', 'senderID', 'msgID', 'quantity', 'money'], {}), '(db, senderID, msgID, quantity, money)\n', (466, 504), False, 'from src.model.luckyMoneyManagement import newLuckyMoney, deleteLuckyMoney, takeLuckyMoney, getLuckyMoney, editWhoTake\n'), ((588, 619), 'src.model.luckyMoneyManagement.editWhoTake', 'editWhoTake', (['db', 'msgID', '"""12345"""'], {}), "(db, msgID, '12345')\n", (599, 619), False, 'from src.model.luckyMoneyManagement import newLuckyMoney, deleteLuckyMoney, takeLuckyMoney, getLuckyMoney, editWhoTake\n'), ((648, 672), 'src.model.luckyMoneyManagement.getLuckyMoney', 'getLuckyMoney', (['db', 'msgID'], {}), '(db, msgID)\n', (661, 672), False, 'from src.model.luckyMoneyManagement import newLuckyMoney, deleteLuckyMoney, takeLuckyMoney, getLuckyMoney, editWhoTake\n'), ((403, 441), 'src.model.luckyMoneyManagement.deleteLuckyMoney', 'deleteLuckyMoney', (['db', 'luckMoneyInfo[0]'], {}), '(db, luckMoneyInfo[0])\n', (419, 441), False, 'from src.model.luckyMoneyManagement import newLuckyMoney, deleteLuckyMoney, takeLuckyMoney, getLuckyMoney, editWhoTake\n'), ((538, 568), 'src.model.luckyMoneyManagement.takeLuckyMoney', 'takeLuckyMoney', (['db', 'msgID', '(200)'], {}), '(db, msgID, 200)\n', (552, 568), False, 'from src.model.luckyMoneyManagement import newLuckyMoney, deleteLuckyMoney, takeLuckyMoney, getLuckyMoney, editWhoTake\n'), ((811, 837), 'src.model.luckyMoneyManagement.deleteLuckyMoney', 'deleteLuckyMoney', (['db', 'uuid'], {}), '(db, uuid)\n', (827, 837), False, 'from src.model.luckyMoneyManagement import newLuckyMoney, deleteLuckyMoney, takeLuckyMoney, getLuckyMoney, editWhoTake\n')] |
import time
from stem import Signal
from stem.control import Controller
from pytz import timezone
from datetime import datetime
from slack_webhook import Slack
from termcolor import cprint
from requests import get
from bs4 import BeautifulSoup as bs
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
TIMEZONE = timezone("Asia/Kolkata")
URL = "https://instagram.com/{}/"
SOCKSPort = 9050 ## Tor SOCKS listner port
ControlPort = 9051 ## Tor control listner port
OUTPUT_FOLDER = "./output/" ## Output Folder path
slack = Slack(url = "<<ADD_YOUR_SLACK_WEBHOOK_URL_HERE")
## Proxies for request over Tor
proxies = {
"http" : f"socks5h://localhost:{SOCKSPort}",
"https": f"socks5h://localhost:{ControlPort}"
}
## dict of temporary instagram ids
ids = {
"temp/fake_id1" : "<PASSWORD>",
"temp/fake_id2" : "<PASSWORD>"
}
## list of instagram usernames to scrape/monitor.
usernames = ["<<ADD_INSTAGRAM_USERNAMES_IN_THIS_LIST>>"]
## Creating a firefox profile and adding some privacy and security prefrences.
profile = FirefoxProfile()
profile.set_preference("places.history.enabled", False)
profile.set_preference("privacy.clearOnShutdown.offlineApps", True)
profile.set_preference("privacy.clearOnShutdown.passwords", True)
profile.set_preference("privacy.clearOnShutdown.siteSettings", True)
profile.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
profile.set_preference("signon.rememberSignons", False)
profile.set_preference("network.cookie.lifetimePolicy", 2)
profile.set_preference("network.dns.disablePrefetch", True)
profile.set_preference("network.http.sendRefererHeader", 0)
profile.set_preference("network.proxy.type", 1)
profile.set_preference("network.proxy.socks_version", 5)
profile.set_preference("network.proxy.socks", '127.0.0.1')
profile.set_preference("network.proxy.socks_port", SOCKSPort) ## Replace local PORT value if your Tor service is running on another port.
profile.set_preference("network.proxy.socks_remote_dns", True)
profile.set_preference("permissions.default.image", 2) ## I Disabled image loading, because it will speed up our scraping.
def date():
return datetime.now(TIMEZONE).strftime("%d/%m/%Y")
def timee():
return datetime.now(TIMEZONE).strftime("%H:%M:%S")
def weekday():
return datetime.now(TIMEZONE).strftime("%A")
def lps(prefix, string, color):
cprint(f"{prefix}[{timee()}][{date()}] {string}", color)
slack.post(text = string)
def lprint(prefix, string, color):
cprint(f"{prefix}[{timee()}][{date()}] {string}", color)
def get_tor_ip():
return get("http://httpbin.org/ip", proxies = proxies).text.split('"')[3]
def renew_tor_ip():
ip_before_renewel = get_tor_ip()
with Controller.from_port(port = 9056) as controller:
controller.authenticate()
controller.signal(Signal.NEWNYM)
lprint("[*]", "Sleeping for 5 secends...", "green")
time.sleep(5)
ip_after_renewel = get_tor_ip()
if (ip_before_renewel != ip_after_renewel):
lprint("[+]", f"Tor IP renewel confirmed, new ip appears to be {ip_after_renewel}", "green")
else:
lprint("[!]", "Tor IP renewel faild!, trying again...", "red")
renew_tor_ip()
## to verify Tor connectivity
def check_tor_conn():
title = str(bs(get("https://check.torproject.org", proxies = proxies).text, "html.parser").title).split("\n")[2]
if "Congratulations." in title:
lps("[+]", "Tor Connectivity Verified", "blue")
elif "not using Tor" in title:
lps("[-]", "Tor is not configured correctly, Try 'sudo service tor restart'. Aborting...", "red")
exit() | [
"pytz.timezone",
"selenium.webdriver.firefox.firefox_profile.FirefoxProfile",
"time.sleep",
"requests.get",
"datetime.datetime.now",
"slack_webhook.Slack",
"stem.control.Controller.from_port"
] | [((333, 357), 'pytz.timezone', 'timezone', (['"""Asia/Kolkata"""'], {}), "('Asia/Kolkata')\n", (341, 357), False, 'from pytz import timezone\n'), ((540, 586), 'slack_webhook.Slack', 'Slack', ([], {'url': '"""<<ADD_YOUR_SLACK_WEBHOOK_URL_HERE"""'}), "(url='<<ADD_YOUR_SLACK_WEBHOOK_URL_HERE')\n", (545, 586), False, 'from slack_webhook import Slack\n'), ((1047, 1063), 'selenium.webdriver.firefox.firefox_profile.FirefoxProfile', 'FirefoxProfile', ([], {}), '()\n', (1061, 1063), False, 'from selenium.webdriver.firefox.firefox_profile import FirefoxProfile\n'), ((2699, 2730), 'stem.control.Controller.from_port', 'Controller.from_port', ([], {'port': '(9056)'}), '(port=9056)\n', (2719, 2730), False, 'from stem.control import Controller\n'), ((2891, 2904), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2901, 2904), False, 'import time\n'), ((2139, 2161), 'datetime.datetime.now', 'datetime.now', (['TIMEZONE'], {}), '(TIMEZONE)\n', (2151, 2161), False, 'from datetime import datetime\n'), ((2207, 2229), 'datetime.datetime.now', 'datetime.now', (['TIMEZONE'], {}), '(TIMEZONE)\n', (2219, 2229), False, 'from datetime import datetime\n'), ((2277, 2299), 'datetime.datetime.now', 'datetime.now', (['TIMEZONE'], {}), '(TIMEZONE)\n', (2289, 2299), False, 'from datetime import datetime\n'), ((2566, 2611), 'requests.get', 'get', (['"""http://httpbin.org/ip"""'], {'proxies': 'proxies'}), "('http://httpbin.org/ip', proxies=proxies)\n", (2569, 2611), False, 'from requests import get\n'), ((3265, 3317), 'requests.get', 'get', (['"""https://check.torproject.org"""'], {'proxies': 'proxies'}), "('https://check.torproject.org', proxies=proxies)\n", (3268, 3317), False, 'from requests import get\n')] |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jar_dependency import JarDependency
class JarDependencyTest(unittest.TestCase):
def test_jar_dependency_excludes_change_hash(self):
exclude = Exclude(org='example.com', name='foo-lib')
with_excludes = JarDependency(org='foo',
name='foo',
excludes=[exclude])
without_excludes = JarDependency(org='foo', name='foo')
self.assertNotEqual(with_excludes.cache_key(), without_excludes.cache_key())
| [
"pants.backend.jvm.targets.jar_dependency.JarDependency",
"pants.backend.jvm.targets.exclude.Exclude"
] | [((553, 595), 'pants.backend.jvm.targets.exclude.Exclude', 'Exclude', ([], {'org': '"""example.com"""', 'name': '"""foo-lib"""'}), "(org='example.com', name='foo-lib')\n", (560, 595), False, 'from pants.backend.jvm.targets.exclude import Exclude\n'), ((616, 672), 'pants.backend.jvm.targets.jar_dependency.JarDependency', 'JarDependency', ([], {'org': '"""foo"""', 'name': '"""foo"""', 'excludes': '[exclude]'}), "(org='foo', name='foo', excludes=[exclude])\n", (629, 672), False, 'from pants.backend.jvm.targets.jar_dependency import JarDependency\n'), ((764, 800), 'pants.backend.jvm.targets.jar_dependency.JarDependency', 'JarDependency', ([], {'org': '"""foo"""', 'name': '"""foo"""'}), "(org='foo', name='foo')\n", (777, 800), False, 'from pants.backend.jvm.targets.jar_dependency import JarDependency\n')] |
import unittest
import pcre2
class MatchTestCase(unittest.TestCase):
def setUp(self):
pass
def test_abc(self):
p = pcre2.PCRE2(r'hello.+'.encode())
match = p.search('this is hello world.'.encode())
self.assertIsNotNone(match)
match = p.search('this should be not found.'.encode())
self.assertIsNone(match)
def test_group(self):
content = 'this is hello world.'.encode()
p = pcre2.PCRE2('hello.+'.encode())
match = p.search(content)
self.assertEqual(match.group(0), b'hello world.')
p = pcre2.PCRE2(r'(hello)(.+)'.encode())
match = p.search(content)
self.assertEqual(match.group(0), b'hello world.')
self.assertEqual(match.group(1), b'hello')
self.assertEqual(match.group(2), b' world.')
self.assertListEqual(match.groups(), [b'hello', b' world.'])
def test_chinese(self):
content = '我来到北京敏感词广场,请遵守中华人民共和国法律.'.encode()
p = pcre2.PCRE2(r'共和国.+'.encode())
match = p.search(content)
self.assertEqual(match.group(0), '共和国法律.'.encode())
p = pcre2.PCRE2(r'(北京)(\w+)广场'.encode())
match = p.search(content)
self.assertIsNone(match)
p = pcre2.PCRE2(r'(北京)(\w+)广场'.encode(), pcre2.UTF | pcre2.UCP)
match = p.search(content)
self.assertEqual(match.group(0), '北京敏感词广场'.encode())
self.assertEqual(match.group(1), '北京'.encode())
self.assertEqual(match.group(2), '敏感词'.encode())
self.assertListEqual(
match.groups(), ['北京'.encode(), '敏感词'.encode()])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((1636, 1651), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1649, 1651), False, 'import unittest\n')] |
import numpy as np
import matplotlib.pyplot as plt
def manual_label(array):
th = 0.1
for i in range(array.shape[0]):
img = (array[i,3,:,:]-array[i,4,:,:])/(array[i,3,:,:]+array[i,4,:,:])
plt.subplot(121)
plt.imshow(img>th)
plt.subplot(122)
plt.imshow(array[i, 5, :, :])
plt.show()
return array
if __name__=='__main__':
th = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
window_size = 3
array = np.load('../data/proj3_test_img.npy')
print(array.shape)
array = manual_label(array)
| [
"matplotlib.pyplot.imshow",
"numpy.load",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((473, 510), 'numpy.load', 'np.load', (['"""../data/proj3_test_img.npy"""'], {}), "('../data/proj3_test_img.npy')\n", (480, 510), True, 'import numpy as np\n'), ((211, 227), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (222, 227), True, 'import matplotlib.pyplot as plt\n'), ((236, 256), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(img > th)'], {}), '(img > th)\n', (246, 256), True, 'import matplotlib.pyplot as plt\n'), ((263, 279), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (274, 279), True, 'import matplotlib.pyplot as plt\n'), ((288, 317), 'matplotlib.pyplot.imshow', 'plt.imshow', (['array[i, 5, :, :]'], {}), '(array[i, 5, :, :])\n', (298, 317), True, 'import matplotlib.pyplot as plt\n'), ((326, 336), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (334, 336), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from lagom.core.multiprocessing import BaseIterativeMaster
class BaseExperimentMaster(BaseIterativeMaster):
"""
Base class of the master for parallelized experiment.
For details about master in general, please refer to
the documentation of the class, BaseIterativeMaster.
All inherited subclasses should implement the following function:
1. process_algo_result(self, config, result)
2. make_configs(self)
"""
def __init__(self,
worker_class,
num_worker,
daemonic_worker=None):
"""
Args:
worker_class (BaseWorker): a callable worker class. Note that it is not recommended to
send instantiated object of the worker class, but send class instead.
num_worker (int): number of workers. Recommended to be the same as number of CPU cores.
daemonic_worker (bool): If True, then set all workers to be daemonic.
Because if main process crashes, we should not cause things to hang.
"""
self.configs = self.make_configs()
num_iteration = int(np.ceil(len(self.configs)/num_worker))
assert len(self.configs) <= num_iteration*num_worker, 'More configurations than capacity. '
assert len(self.configs) > (num_iteration - 1)*num_worker, 'Too many unused iterations. '
super().__init__(num_iteration=num_iteration,
worker_class=worker_class,
num_worker=num_worker,
init_seed=0, # Don't use this internal seeder, but set it in configuration
daemonic_worker=daemonic_worker)
self.splitted_configs = np.array_split(self.configs, num_iteration)
for config in self.splitted_configs:
assert len(config.tolist()) <= num_worker
def make_tasks(self, iteration):
tasks = self.splitted_configs.pop(0).tolist()
return tasks
def _process_workers_result(self, tasks, workers_result):
for config, (task_id, result) in zip(tasks, workers_result):
self.process_algo_result(config, result)
def process_algo_result(self, config, result):
"""
User-defined function to process the result of the execution
of the algorithm given the configuration.
Args:
config (dict): dictionary of configurations.
result (object): result of algorithm execution returned from Algorithm.__call__().
"""
raise NotImplementedError
def make_configs(self):
"""
User-defined function to define all configurations.
e.g. hyperparameters and algorithm settings.
It is recommeded to use Config class, define different
configurations and call make_configs() to return
a list of automatically generated all combination
of configurations.
Returns:
configs (list): output from config.make_configs
Examples:
config = Config()
config.add_item(name='algo', val='RL')
config.add_item(name='iter', val=30)
config.add_item(name='hidden_sizes', val=[64, 32, 16])
config.add_random_eps(name='lr', base=10, low=-6, high=0, num_sample=10)
config.add_random_continuous(name='values', low=-5, high=5, num_sample=5)
config.add_random_discrete(name='select', list_val=[43223, 5434, 21314], num_sample=10, replace=True)
configs = config.make_configs()
return configs
"""
raise NotImplementedError | [
"numpy.array_split"
] | [((1776, 1819), 'numpy.array_split', 'np.array_split', (['self.configs', 'num_iteration'], {}), '(self.configs, num_iteration)\n', (1790, 1819), True, 'import numpy as np\n')] |
from typing import List
from fastapi import Depends, FastAPI, HTTPException
from sqlalchemy.orm import Session
import time
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import RedirectResponse
from typing import TypeVar, Generic, Type, Any
from xml.etree.ElementTree import fromstring
import xml.etree.cElementTree as ET
from starlette.requests import Request
import sys
from pydantic import BaseModel
import os
import json
from model import get_bsimg_pred
# 启动App
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class Message(BaseModel):
bsimg: str
@app.get("/")
async def main():
return RedirectResponse('/docs')
@app.post("/message/")
async def m(msg: Message):
content = get_bsimg_pred(bsimg=msg.bsimg)
return {
'content' : content
}
# 启动服务
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host="127.0.0.1", port=8000) | [
"starlette.responses.RedirectResponse",
"model.get_bsimg_pred",
"fastapi.FastAPI",
"uvicorn.run"
] | [((503, 512), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (510, 512), False, 'from fastapi import Depends, FastAPI, HTTPException\n'), ((746, 771), 'starlette.responses.RedirectResponse', 'RedirectResponse', (['"""/docs"""'], {}), "('/docs')\n", (762, 771), False, 'from starlette.responses import RedirectResponse\n'), ((837, 868), 'model.get_bsimg_pred', 'get_bsimg_pred', ([], {'bsimg': 'msg.bsimg'}), '(bsimg=msg.bsimg)\n', (851, 868), False, 'from model import get_bsimg_pred\n'), ((974, 1019), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""127.0.0.1"""', 'port': '(8000)'}), "(app, host='127.0.0.1', port=8000)\n", (985, 1019), False, 'import uvicorn\n')] |
import tkinter
from PIL import ImageTk
from PIL import Image
import os
root = tkinter.Tk()
img = ImageTk.PhotoImage(Image.open(
"C:\\Users\\mem_d\\Downloads\\external-content.duckduckgo.com.png"))
panel = tkinter.Label(root, image=img)
panel.pack(side="bottom", fill="both", expand="yes")
label1 = Label(root, text="some text")
label2 = Label(root, text="syou ugly ")
label1.grid(row=0, column=0)
label2.grid(row=1, column=1)
input = Entry(root)
input.pack()
def butt_click():
lab = Label(root, text="Moan and groan")
lab.pack()
butt = Button(root, text="moo", command=butt_click)
butt.pack()
root.mainloop()
| [
"tkinter.Tk",
"tkinter.Label",
"PIL.Image.open"
] | [((79, 91), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (89, 91), False, 'import tkinter\n'), ((211, 241), 'tkinter.Label', 'tkinter.Label', (['root'], {'image': 'img'}), '(root, image=img)\n', (224, 241), False, 'import tkinter\n'), ((117, 195), 'PIL.Image.open', 'Image.open', (['"""C:\\\\Users\\\\mem_d\\\\Downloads\\\\external-content.duckduckgo.com.png"""'], {}), "('C:\\\\Users\\\\mem_d\\\\Downloads\\\\external-content.duckduckgo.com.png')\n", (127, 195), False, 'from PIL import Image\n')] |
from django.contrib import admin
from models import UserProfile
class UserProfileAdmin(admin.ModelAdmin):
pass
admin.site.register(UserProfile, UserProfileAdmin)
| [
"django.contrib.admin.site.register"
] | [((120, 170), 'django.contrib.admin.site.register', 'admin.site.register', (['UserProfile', 'UserProfileAdmin'], {}), '(UserProfile, UserProfileAdmin)\n', (139, 170), False, 'from django.contrib import admin\n')] |
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import _pickle as cPickle
from scipy.io import savemat
from scipy.io import loadmat
from alexnet_face_classifier import *
from utils import *
class transfer_learning_graph:
def __init__(self, num_classes, nhid, cnn):
self.num_classes = num_classes
self.f_maps = tf.placeholder(tf.float32, shape = [None, 43264])
self.keep_prob = tf.placeholder(tf.float32)
self.labels_1hot = tf.placeholder(tf.float32, shape=[None, self.num_classes])
self.cnn = cnn(None, self.f_maps, self.num_classes, self.keep_prob)
self.cnn.fc_layers(transfer_learning=True, nhid=nhid)
def train_graph(self, rate, decay_lam=0):
# cost function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.labels_1hot, logits=self.cnn.fc2))
decay_penalty = decay_lam*(tf.reduce_sum(tf.square(self.cnn.fc1W))+tf.reduce_sum(tf.square(self.cnn.fc2W)))
self.cost = cross_entropy + decay_penalty
self.train_step = tf.train.AdamOptimizer(rate).minimize(self.cost)
def predict_graph(self):
correct_prediction = tf.equal(tf.argmax(self.cnn.fc2,1), tf.argmax(self.labels_1hot,1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def train(graph, batch_size, training_set, training_labels, validation_set, validation_labels, decay_lambda, rate, keep_prob, iter, weight_file1, weight_file2, sess):
# create learning graph
graph.train_graph(rate, decay_lambda)
sess.run(tf.global_variables_initializer())
# keep track of best performance
best_validation_cost = 1E10
best_validation_accuracy = 0
# keep track of learning curves
train_accuracies = []
train_costs = []
validation_accuracies = []
validation_costs = []
weight_names = ['fc1W', 'fc1b', 'fc2W', 'fc2b']
for j in range(iter):
batch_xs, batch_ys = get_batch(training_set, training_labels, batch_size, graph.num_classes)
graph.train_step.run(feed_dict={graph.f_maps:batch_xs, graph.labels_1hot:batch_ys, graph.keep_prob:keep_prob})
# if j%2 == 0:
# batch_xs, batch_ys = get_batch(training_set, training_labels, batch_size, graph.num_classes)
# graph.train_step.run(feed_dict={graph.f_maps:batch_xs, graph.labels_1hot:batch_ys, graph.keep_prob:keep_prob})
#
# else:
# batch_xs, batch_ys = get_random_batch(training_set, training_labels, batch_size)
# graph.train_step.run(feed_dict={graph.f_maps:batch_xs, graph.labels_1hot:batch_ys, graph.keep_prob:keep_prob})
# evaluate every 5 steps
if (j+1)%5 == 0:
print('iteration'+str(j+1))
train_accuracy = sess.run(graph.accuracy, feed_dict={graph.f_maps:batch_xs, graph.labels_1hot:batch_ys, graph.keep_prob:1.0})
train_cost = sess.run(graph.cost, feed_dict={graph.f_maps:batch_xs, graph.labels_1hot:batch_ys, graph.keep_prob:1.0})/batch_size/batch_xs.shape[0]
validation_accuracy = sess.run(graph.accuracy, feed_dict={graph.f_maps:validation_set, graph.labels_1hot:validation_labels, graph.keep_prob:1.0})
validation_cost = sess.run(graph.cost, feed_dict={graph.f_maps:validation_set, graph.labels_1hot:validation_labels, graph.keep_prob:1.0})/validation_set.shape[0]
print('est training accuracy is {}'.format(train_accuracy))
print('est training cost is {}'.format(train_cost))
print('validation accuracy is {}'.format(validation_accuracy))
print('validation cost is {}'.format(validation_cost))
train_accuracies.append(train_accuracy)
validation_accuracies.append(validation_accuracy)
train_costs.append(train_cost)
validation_costs.append(validation_cost)
# keep track of weight data for best performance
if validation_accuracy >= best_validation_accuracy:
best_validation_accuracy = validation_accuracy
best_validation_cost = validation_cost
best_weights = {}
for i in range(len(graph.cnn.fc_parameters)):
best_weights[weight_names[i]] = sess.run(graph.cnn.fc_parameters[i])
# plot learning curves
cPickle.dump(best_weights, open('transfer_learning_fc_weights_17.pkl', 'wb'))
f1 = plt.figure(1)
plt.plot(range(5, iter+1, 5), train_accuracies, color='blue', linestyle='solid')
plt.plot(range(5, iter+1, 5), validation_accuracies, color='red', linestyle='solid')
f1.savefig("tl_accuracies_17faces.pdf", bbox_inches='tight')
f2 = plt.figure(2)
plt.plot(range(5, iter+1, 5), train_costs, color='blue', linestyle='solid')
plt.plot(range(5, iter+1, 5), validation_costs, color='red', linestyle='solid')
f2.savefig('tl_costs_17faces.pdf', bbox_inches='tight')
print('best validation accuracy is {}'.format(best_validation_accuracy))
print('best validation cost is {}'.format(best_validation_cost))
print('corresponding training accuracy is {}'.format(sess.run(graph.accuracy, feed_dict={graph.f_maps:batch_xs, graph.labels_1hot:batch_ys, graph.keep_prob:1.0})))
print('corresponding training cost is {}'.format(sess.run(graph.cost, feed_dict={graph.f_maps:batch_xs, graph.labels_1hot:batch_ys, graph.keep_prob:1.0})))
def test(graph, test_set, test_labels, weight_file, sess):
graph.cnn.load_weights(weight_file, sess, fc_only=True)
test_accuracy = sess.run(graph.accuracy, feed_dict={graph.f_maps:test_set, graph.labels_1hot:test_labels, graph.keep_prob:1.0})
print('test accuracy is {}'.format(test_accuracy))
###
full_sets, label_sets = get_data_and_labels('training_set_17_conv5.pkl', 'validation_set_17_conv5.pkl', 'test_set_17_conv5.pkl')
training_set, validation_set, test_set = full_sets
training_labels, validation_labels, test_labels = label_sets
tl_graph = transfer_learning_graph(17, 100, alexnet_face_classifier)
tl_graph.predict_graph()
with tf.Session() as sess:
train(tl_graph, 30, training_set, training_labels, validation_set, validation_labels, 1E-2, 5E-4, 0.5, 1000,'alexnet_weights.pkl', None, sess)
with tf.Session() as sess:
test(tl_graph, test_set, test_labels, 'transfer_learning_fc_weights_17.pkl', sess)
| [
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.figure",
"tensorflow.argmax",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"matplotlib.pyplot.switch_backend",
"tensorflow.train.AdamOptimizer",
"tensorflow.cast",
"tensorflow.square"
] | [((85, 110), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (103, 110), True, 'import matplotlib.pyplot as plt\n'), ((4638, 4651), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4648, 4651), True, 'import matplotlib.pyplot as plt\n'), ((4913, 4926), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (4923, 4926), True, 'import matplotlib.pyplot as plt\n'), ((6330, 6342), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6340, 6342), True, 'import tensorflow as tf\n'), ((6509, 6521), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6519, 6521), True, 'import tensorflow as tf\n'), ((398, 445), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 43264]'}), '(tf.float32, shape=[None, 43264])\n', (412, 445), True, 'import tensorflow as tf\n'), ((473, 499), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (487, 499), True, 'import tensorflow as tf\n'), ((527, 585), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.num_classes]'}), '(tf.float32, shape=[None, self.num_classes])\n', (541, 585), True, 'import tensorflow as tf\n'), ((1666, 1699), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1697, 1699), True, 'import tensorflow as tf\n'), ((847, 937), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'self.labels_1hot', 'logits': 'self.cnn.fc2'}), '(labels=self.labels_1hot, logits=\n self.cnn.fc2)\n', (886, 937), True, 'import tensorflow as tf\n'), ((1262, 1288), 'tensorflow.argmax', 'tf.argmax', (['self.cnn.fc2', '(1)'], {}), '(self.cnn.fc2, 1)\n', (1271, 1288), True, 'import tensorflow as tf\n'), ((1289, 1319), 'tensorflow.argmax', 'tf.argmax', (['self.labels_1hot', '(1)'], {}), '(self.labels_1hot, 1)\n', (1298, 1319), True, 'import tensorflow as tf\n'), ((1359, 1398), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (1366, 1398), True, 'import tensorflow as tf\n'), ((1137, 1165), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['rate'], {}), '(rate)\n', (1159, 1165), True, 'import tensorflow as tf\n'), ((985, 1009), 'tensorflow.square', 'tf.square', (['self.cnn.fc1W'], {}), '(self.cnn.fc1W)\n', (994, 1009), True, 'import tensorflow as tf\n'), ((1025, 1049), 'tensorflow.square', 'tf.square', (['self.cnn.fc2W'], {}), '(self.cnn.fc2W)\n', (1034, 1049), True, 'import tensorflow as tf\n')] |
import heapq
class Solution:
def maxPerformance(self, n, speed, efficient, k):
sq = []
ans = sSum = 0
teams = sorted([(e, s) for e, s in zip(efficient, speed)], reverse=True)
for i, (e, s) in enumerate(teams):
sSum += s
if i >= k:
sSum -= heapq.heappushpop(sq, s)
else:
heapq.heappush(sq, s)
ans = max(ans, sSum * e)
return ans % (10 ** 9 + 7)
| [
"heapq.heappush",
"heapq.heappushpop"
] | [((317, 341), 'heapq.heappushpop', 'heapq.heappushpop', (['sq', 's'], {}), '(sq, s)\n', (334, 341), False, 'import heapq\n'), ((376, 397), 'heapq.heappush', 'heapq.heappush', (['sq', 's'], {}), '(sq, s)\n', (390, 397), False, 'import heapq\n')] |
import requests
from pyowm.owm import OWM
from pyowm.utils import timestamps
import geocoder
import json
import os
from pathlib import Path
#os.chdir('pac')
cwd=Path(__file__).parent
try:
from pac import voice_io
except ModuleNotFoundError:
import voice_io
g = geocoder.ip('me')
ct=(g.city)
with open(f"{cwd}\creds.json", "r") as f:
data = json.load(f)
api=data['apis'][1]['owm']
#weather
def weather_curr():
base_url = "http://api.openweathermap.org/data/2.5/weather?"
url = base_url + "&q=" + ct + "&appid=" + api
response = requests.get(url)
x = response.json()
if x["cod"] == "404":
voice_io.show("Oops! it looks like i ran into a problem fetching your request, maybe try again later?")
else:
y = x["main"]
curr_temperature = y["temp"]
curr_pressure = y["pressure"]
curr_humidity = y["humidity"]
z = x["weather"]
weather_desc = z[0]["description"]
voice_io.show(f"The current temperatre in {ct} is {str(round(curr_temperature-273))}°C" + ". It's a " +str(weather_desc))
#weather forecaster
def weather_forec():
voice_io.show("Sorry i am currently restricted to show weather forecast for tomorrow only. \nLook out for future updates and see if my handcuffs are set free. Here's tomorrow's weather forecast anyway.")
owm = OWM(api)
mgr=owm.weather_manager()
loc = mgr.weather_at_place(ct)
weather = loc.weather
temp = weather.temperature(unit='celsius')
for key,val in temp.items():
if key=="temp":
voice_io.show(f'\nThe temperature tommorow will be around {val}°C.')
else:
continue
loa = mgr.forecast_at_place(ct,'3h')
tomorrow=timestamps.tomorrow()
forecasttt=loa.get_weather_at(tomorrow)
status=(forecasttt.status)
voice_io.show(f'And the sky would remain {status}')
| [
"geocoder.ip",
"pyowm.owm.OWM",
"pathlib.Path",
"pyowm.utils.timestamps.tomorrow",
"requests.get",
"voice_io.show",
"json.load"
] | [((272, 289), 'geocoder.ip', 'geocoder.ip', (['"""me"""'], {}), "('me')\n", (283, 289), False, 'import geocoder\n'), ((162, 176), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (166, 176), False, 'from pathlib import Path\n'), ((357, 369), 'json.load', 'json.load', (['f'], {}), '(f)\n', (366, 369), False, 'import json\n'), ((563, 580), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (575, 580), False, 'import requests\n'), ((1152, 1368), 'voice_io.show', 'voice_io.show', (['"""Sorry i am currently restricted to show weather forecast for tomorrow only. \nLook out for future updates and see if my handcuffs are set free. Here\'s tomorrow\'s weather forecast anyway."""'], {}), '(\n """Sorry i am currently restricted to show weather forecast for tomorrow only. \nLook out for future updates and see if my handcuffs are set free. Here\'s tomorrow\'s weather forecast anyway."""\n )\n', (1165, 1368), False, 'import voice_io\n'), ((1366, 1374), 'pyowm.owm.OWM', 'OWM', (['api'], {}), '(api)\n', (1369, 1374), False, 'from pyowm.owm import OWM\n'), ((1740, 1761), 'pyowm.utils.timestamps.tomorrow', 'timestamps.tomorrow', ([], {}), '()\n', (1759, 1761), False, 'from pyowm.utils import timestamps\n'), ((1841, 1892), 'voice_io.show', 'voice_io.show', (['f"""And the sky would remain {status}"""'], {}), "(f'And the sky would remain {status}')\n", (1854, 1892), False, 'import voice_io\n'), ((645, 758), 'voice_io.show', 'voice_io.show', (['"""Oops! it looks like i ran into a problem fetching your request, maybe try again later?"""'], {}), "(\n 'Oops! it looks like i ran into a problem fetching your request, maybe try again later?'\n )\n", (658, 758), False, 'import voice_io\n'), ((1582, 1653), 'voice_io.show', 'voice_io.show', (['f"""\nThe temperature tommorow will be around {val}°C."""'], {}), '(f"""\nThe temperature tommorow will be around {val}°C.""")\n', (1595, 1653), False, 'import voice_io\n')] |
"""
Module for face detection using face_recognition library.
"""
import os
import time
import cv2
import face_recognition
rect_line_color = (0, 255, 0)
rect_line_width = 2
def detect_face_face_recognition(model, image, save_false_finding=True,
location="face_recognition"):
"""
Detects faces on the received image using face_recognition library.
:param model: model to be used to detect faces
:param image: image on which the face should be detected
:param save_false_finding: flag if incorrectly classified images should be
saved to the disc
:param location: folder name where false findings shall be saved
"""
image_copy = image.copy()
face_locations = face_recognition.face_locations(image_copy, model=model)
if save_false_finding:
save_false_findings_face_recognition(
face_locations, image_copy, location)
return len(face_locations)
def save_false_findings_face_recognition(face_locations, image, location):
"""
Saves images that contain exactly one face, but the algorithm either did
not find any face on them, or found more than one. In latter case
- rectangles are drawn around every place where the face was found.
:param face_locations: detections of faces faces found by algorithm
:param image: image under evaluation
:param location: folder name where false findings shall be saved
"""
if len(face_locations) == 1:
return
if not len(face_locations):
path_none = os.path.join("./false_findings", location, "none/")
if not os.path.exists(path_none):
os.makedirs(path_none)
cv2.imwrite(
path_none + str(time.time()) + ".jpg",
cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if len(face_locations) > 1:
face_landmarks_list = face_recognition.face_landmarks(image)
for landmark in face_landmarks_list:
for face_element in landmark:
for point in landmark[face_element]:
cv2.circle(
image, point, 1, rect_line_color, rect_line_width)
path_too_many = os.path.join(
"./false_findings", location, "too_many/")
if not os.path.exists(path_too_many):
os.makedirs(path_too_many)
cv2.imwrite(
path_too_many + str(time.time()) + ".jpg",
cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
| [
"face_recognition.face_locations",
"os.path.exists",
"os.makedirs",
"face_recognition.face_landmarks",
"os.path.join",
"cv2.circle",
"cv2.cvtColor",
"time.time"
] | [((741, 797), 'face_recognition.face_locations', 'face_recognition.face_locations', (['image_copy'], {'model': 'model'}), '(image_copy, model=model)\n', (772, 797), False, 'import face_recognition\n'), ((1547, 1598), 'os.path.join', 'os.path.join', (['"""./false_findings"""', 'location', '"""none/"""'], {}), "('./false_findings', location, 'none/')\n", (1559, 1598), False, 'import os\n'), ((1862, 1900), 'face_recognition.face_landmarks', 'face_recognition.face_landmarks', (['image'], {}), '(image)\n', (1893, 1900), False, 'import face_recognition\n'), ((2173, 2228), 'os.path.join', 'os.path.join', (['"""./false_findings"""', 'location', '"""too_many/"""'], {}), "('./false_findings', location, 'too_many/')\n", (2185, 2228), False, 'import os\n'), ((1614, 1639), 'os.path.exists', 'os.path.exists', (['path_none'], {}), '(path_none)\n', (1628, 1639), False, 'import os\n'), ((1653, 1675), 'os.makedirs', 'os.makedirs', (['path_none'], {}), '(path_none)\n', (1664, 1675), False, 'import os\n'), ((1760, 1798), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1772, 1798), False, 'import cv2\n'), ((2257, 2286), 'os.path.exists', 'os.path.exists', (['path_too_many'], {}), '(path_too_many)\n', (2271, 2286), False, 'import os\n'), ((2300, 2326), 'os.makedirs', 'os.makedirs', (['path_too_many'], {}), '(path_too_many)\n', (2311, 2326), False, 'import os\n'), ((2415, 2453), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2427, 2453), False, 'import cv2\n'), ((2062, 2123), 'cv2.circle', 'cv2.circle', (['image', 'point', '(1)', 'rect_line_color', 'rect_line_width'], {}), '(image, point, 1, rect_line_color, rect_line_width)\n', (2072, 2123), False, 'import cv2\n'), ((1725, 1736), 'time.time', 'time.time', ([], {}), '()\n', (1734, 1736), False, 'import time\n'), ((2380, 2391), 'time.time', 'time.time', ([], {}), '()\n', (2389, 2391), False, 'import time\n')] |
# -*- coding: utf-8 -*-
#
# This file is part of django-email-change.
#
# django-email-change adds support for email address change and confirmation.
#
# Development Web Site:
# - http://www.codetrax.org/projects/django-email-change
# Public Source Code Repository:
# - https://source.codetrax.org/hgroot/django-email-change
#
# Copyright 2010 <NAME> <gnot [at] g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.apps import apps
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.core.mail import send_mail
from email_change.forms import EmailChangeForm
from email_change.utils import generate_key
@login_required
def email_change_view(request, extra_context={},
success_url='email_verification_sent',
template_name='email_change/email_change_form.html',
email_message_template_name='email_change/emails/verification_email_message.html',
email_subject_template_name='email_change/emails/verification_email_subject.html',
form_class=EmailChangeForm):
"""Allow a user to change the email address associated with the
user account.
"""
if request.method == 'POST':
form = form_class(username=request.user.username,
data=request.POST,
files=request.FILES)
if form.is_valid():
EmailChangeRequest = apps.get_model('email_change',
'EmailChangeRequest')
email = form.cleaned_data.get('email')
verification_key = generate_key(request.user, email)
site_name = getattr(settings, 'SITE_NAME',
'Please define settings.SITE_NAME')
domain = getattr(settings, 'SITE_URL', None)
if domain is None:
Site = apps.get_model('sites', 'Site')
current_site = Site.objects.get_current()
site_name = current_site.name
domain = current_site.domain
protocol = 'http'
if request.is_secure():
protocol = 'https'
# First clean all email change requests made by this user
qs = EmailChangeRequest.objects.filter(user=request.user)
qs.delete()
# Create an email change request
EmailChangeRequest.objects.create(
user=request.user,
verification_key=verification_key,
email=email
)
# Prepare context
c = {
'email': email,
'site_domain': domain,
'site_name': site_name,
'support_email': settings.SUPPORT_EMAIL,
'user': request.user,
'verification_key': verification_key,
'protocol': protocol,
}
c.update(extra_context)
# Send success email
subject = render_to_string(email_subject_template_name, c).strip()
message = render_to_string(email_message_template_name, c)
send_mail(subject, message, None, [email])
# Redirect
return redirect(success_url)
else:
form = form_class(username=request.user.username)
extra_context['form'] = form
return render(request, template_name, extra_context)
@login_required
def email_verify_view(request, verification_key, extra_context={},
success_url='email_change_complete',
template_name='email_change/email_verify.html'):
"""
"""
EmailChangeRequest = apps.get_model('email_change', 'EmailChangeRequest')
try:
ecr = EmailChangeRequest.objects.get(
user=request.user, verification_key=verification_key)
except EmailChangeRequest.DoesNotExist:
# Return failure response
return render(request, template_name, extra_context)
else:
# Check if the email change request has expired
if ecr.has_expired():
ecr.delete()
# Return failure response
return render_to_response(request, template_name, extra_context)
# Success. Replace the user's email with the new email
request.user.email = ecr.email
request.user.save()
# Delete the email change request
ecr.delete()
# Redirect to success URL
return redirect(success_url)
| [
"django.shortcuts.render",
"django.apps.apps.get_model",
"django.core.mail.send_mail",
"django.shortcuts.redirect",
"email_change.utils.generate_key",
"django.template.loader.render_to_string"
] | [((4037, 4082), 'django.shortcuts.render', 'render', (['request', 'template_name', 'extra_context'], {}), '(request, template_name, extra_context)\n', (4043, 4082), False, 'from django.shortcuts import render, redirect\n'), ((4339, 4391), 'django.apps.apps.get_model', 'apps.get_model', (['"""email_change"""', '"""EmailChangeRequest"""'], {}), "('email_change', 'EmailChangeRequest')\n", (4353, 4391), False, 'from django.apps import apps\n'), ((5133, 5154), 'django.shortcuts.redirect', 'redirect', (['success_url'], {}), '(success_url)\n', (5141, 5154), False, 'from django.shortcuts import render, redirect\n'), ((2096, 2148), 'django.apps.apps.get_model', 'apps.get_model', (['"""email_change"""', '"""EmailChangeRequest"""'], {}), "('email_change', 'EmailChangeRequest')\n", (2110, 2148), False, 'from django.apps import apps\n'), ((2281, 2314), 'email_change.utils.generate_key', 'generate_key', (['request.user', 'email'], {}), '(request.user, email)\n', (2293, 2314), False, 'from email_change.utils import generate_key\n'), ((3752, 3800), 'django.template.loader.render_to_string', 'render_to_string', (['email_message_template_name', 'c'], {}), '(email_message_template_name, c)\n', (3768, 3800), False, 'from django.template.loader import render_to_string\n'), ((3814, 3856), 'django.core.mail.send_mail', 'send_mail', (['subject', 'message', 'None', '[email]'], {}), '(subject, message, None, [email])\n', (3823, 3856), False, 'from django.core.mail import send_mail\n'), ((3900, 3921), 'django.shortcuts.redirect', 'redirect', (['success_url'], {}), '(success_url)\n', (3908, 3921), False, 'from django.shortcuts import render, redirect\n'), ((4606, 4651), 'django.shortcuts.render', 'render', (['request', 'template_name', 'extra_context'], {}), '(request, template_name, extra_context)\n', (4612, 4651), False, 'from django.shortcuts import render, redirect\n'), ((2551, 2582), 'django.apps.apps.get_model', 'apps.get_model', (['"""sites"""', '"""Site"""'], {}), "('sites', 'Site')\n", (2565, 2582), False, 'from django.apps import apps\n'), ((3673, 3721), 'django.template.loader.render_to_string', 'render_to_string', (['email_subject_template_name', 'c'], {}), '(email_subject_template_name, c)\n', (3689, 3721), False, 'from django.template.loader import render_to_string\n')] |
import sqlite3
from typing import Any, Iterable
from paper_trader.utils.dataclasses import DataclassDesc
from .converter import _convert_val_from_db, _convert_val_to_db
class _ClassDesc(DataclassDesc):
@property
def create_table_str(self):
cols = ", ".join(self.fields.keys())
pks = "PRIMARY KEY ({})".format(
", ".join(k for k in self.primary_keys_names)
)
return f"CREATE TABLE IF NOT EXISTS {self.name} ({cols}, {pks})"
@property
def upsert_str(self):
return f'INSERT OR REPLACE INTO {self.name} VALUES ({", ".join("?" for _ in range(len(self.fields)))})'
@property
def delete_str(self):
return f"DELETE FROM {self.name} WHERE " + " AND ".join(
f"{pk}=?" for pk in self.primary_keys_names
)
@property
def get_all_str(self):
return f"SELECT * FROM {self.name}"
def get_fields(self, inst):
return [_convert_val_to_db(getattr(inst, f)) for f in self.fields]
class SqliteDb:
def __init__(self, filename: str):
self._db = sqlite3.connect(filename)
def __enter__(self):
self._db.__enter__()
return self
# NOTE: This just commits or rolls back doesn't close the connection, bit misleading
def __exit__(self, exc_type, exc_value, traceback):
self._db.__exit__(exc_type, exc_value, traceback)
def close(self):
self._db.close()
self._db = None
def init_table(self, clazz):
desc = _ClassDesc(clazz=clazz)
cursor = self._db.cursor()
cursor.execute(desc.create_table_str)
def upsert(self, obj: Any):
desc = _ClassDesc(inst=obj)
cursor = self._db.cursor()
cursor.execute(desc.upsert_str, desc.get_fields(obj))
def upsert_all(self, objs: Iterable):
desc = None
cursor = self._db.cursor()
for obj in objs:
if desc is None:
desc = _ClassDesc(inst=obj)
cursor.execute(desc.upsert_str, desc.get_fields(obj))
def delete(self, obj):
desc = _ClassDesc(inst=obj)
cursor = self._db.cursor()
cursor.execute(desc.delete_str, desc.get_primary_keys(obj))
def get_all(self, clazz):
desc = _ClassDesc(clazz=clazz)
for vals in self._db.cursor().execute(desc.get_all_str).fetchall():
yield clazz(
*(
_convert_val_from_db(t, v)
for t, v in zip(desc.field_types, vals)
)
)
def commit(self):
self._db.commit()
def rollback(self):
self._db.rollback()
| [
"sqlite3.connect"
] | [((1111, 1136), 'sqlite3.connect', 'sqlite3.connect', (['filename'], {}), '(filename)\n', (1126, 1136), False, 'import sqlite3\n')] |
from django.conf.urls import url
# from .views import hello_word_view
from apps.models_tensorflow2.DIN_CTR.views import hello_word_view
import os
app_name = os.path.dirname(__file__).split('/')[-1].split('\\')[-1]
urlpatterns = [
url(r'^hello_word/$', hello_word_view.as_view(), name='hello_word')
]
| [
"os.path.dirname",
"apps.models_tensorflow2.DIN_CTR.views.hello_word_view.as_view"
] | [((256, 281), 'apps.models_tensorflow2.DIN_CTR.views.hello_word_view.as_view', 'hello_word_view.as_view', ([], {}), '()\n', (279, 281), False, 'from apps.models_tensorflow2.DIN_CTR.views import hello_word_view\n'), ((157, 182), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (172, 182), False, 'import os\n')] |
import sys
from Ziggeo import Ziggeo
if(len(sys.argv) < 4):
print ("Error\n")
print ("Usage: $>python video_get_stats_bulk.py YOUR_API_TOKEN YOUR_PRIVATE_KEY VIDEO_TOKEN1,VIDEO_TOKEN2 \n")
sys.exit()
api_token = sys.argv[1]
private_key = sys.argv[2]
video_token = sys.argv[3]
ziggeo = Ziggeo(api_token, private_key)
bulk_arguments = {}
bulk_arguments["tokens_or_keys"] = video_token
print (ziggeo.videos().stats_bulk(bulk_arguments))
| [
"Ziggeo.Ziggeo",
"sys.exit"
] | [((292, 322), 'Ziggeo.Ziggeo', 'Ziggeo', (['api_token', 'private_key'], {}), '(api_token, private_key)\n', (298, 322), False, 'from Ziggeo import Ziggeo\n'), ((194, 204), 'sys.exit', 'sys.exit', ([], {}), '()\n', (202, 204), False, 'import sys\n')] |
from intelhex import IntelHex
ih = IntelHex() # create empty object
ih.loadhex('rc_calib_tiny85.hex') # load from hex
for i in range(32):
if ih[i]==255:
ih[i]=255
ih.dump()
from cStringIO import StringIO
sio = StringIO()
ih.write_hex_file(sio)
hexstr = sio.getvalue()
sio.close()
print("\n\n\n\n")
print(hexstr)
| [
"cStringIO.StringIO",
"intelhex.IntelHex"
] | [((35, 45), 'intelhex.IntelHex', 'IntelHex', ([], {}), '()\n', (43, 45), False, 'from intelhex import IntelHex\n'), ((258, 268), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (266, 268), False, 'from cStringIO import StringIO\n')] |
#
# Copyright (c) 2018-2020 by <NAME> <<EMAIL>>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# <NAME> - initial implementation
#
import hashlib
import logging
import os
import sys
import tracemalloc
from tempfile import TemporaryDirectory
from unittest import TestCase
from angelos.archive7.streams import SingleStreamManager, VirtualFileObject
class BaseArchiveTestCase(TestCase):
@classmethod
def setUpClass(cls) -> None:
"""Setup test class with a facade and ten contacts."""
tracemalloc.start()
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
cls.secret = os.urandom(32)
@classmethod
def tearDownClass(cls) -> None:
"""Clean up after test suite."""
def setUp(self) -> None:
"""Set up a case with a fresh copy of portfolios and facade"""
self.dir = TemporaryDirectory()
self.home = self.dir.name
def tearDown(self) -> None:
"""Tear down after the test."""
self.dir.cleanup()
class TestStreamBlock(TestCase):
def test_position(self):
self.fail()
def test_load_meta(self):
self.fail()
class TestBaseStream(TestCase):
def test_identity(self):
self.fail()
def test_manager(self):
self.fail()
def test_data(self):
self.fail()
def test_load_meta(self):
self.fail()
def test_meta_unpack(self):
self.fail()
def test_length(self):
self.fail()
def test_changed(self):
self.fail()
def test_save(self):
self.fail()
def test_next(self):
self.fail()
def test_previous(self):
self.fail()
def test_extend(self):
self.fail()
def test_push(self):
self.fail()
def test_pop(self):
self.fail()
def test_truncate(self):
self.fail()
def test_wind(self):
self.fail()
def test_close(self):
self.fail()
class TestInternalStream(TestCase):
def test_close(self):
self.fail()
class TestDataStream(TestCase):
def test_close(self):
self.fail()
class TestVirtualFileObject(TestCase):
def test__close(self):
self.fail()
def test__flush(self):
self.fail()
def test__readinto(self):
self.fail()
def test__seek(self):
self.fail()
def test__truncate(self):
self.fail()
def test__write(self):
self.fail()
class TestRegistry(TestCase):
def test_tree(self):
self.fail()
def test_close(self):
self.fail()
def test__init_tree(self):
self.fail()
def test__checkpoint(self):
self.fail()
class TestStreamRegistry(TestCase):
def test__init_tree(self):
self.fail()
def test_register(self):
self.fail()
def test_unregister(self):
self.fail()
def test_update(self):
self.fail()
def test_search(self):
self.fail()
class TestStreamManager(TestCase):
def test_closed(self):
self.fail()
def test_created(self):
self.fail()
def test_close(self):
self.fail()
def test_save_meta(self):
self.fail()
def test_meta(self):
self.fail()
def test_special_block(self):
self.fail()
def test_new_block(self):
self.fail()
def test_load_block(self):
self.fail()
def test_save_block(self):
self.fail()
def test_special_stream(self):
self.fail()
def test__setup(self):
self.fail()
def test__open(self):
self.fail()
def test__close(self):
self.fail()
def test_recycle(self):
self.fail()
def test_reuse(self):
self.fail()
class TestSingleStreamManager(BaseArchiveTestCase):
def test_recycle(self):
self.fail()
def test_reuse(self):
self.fail()
def test_run(self):
data = bytes(os.urandom(2**20))
mgr = SingleStreamManager(os.path.join(self.home, "test.ar7"), self.secret)
stream = mgr.special_stream(SingleStreamManager.STREAM_DATA)
fileobj = VirtualFileObject(stream, "test", "wb+")
fileobj.write(data)
fileobj.close()
mgr.close()
del mgr
mgr = SingleStreamManager(os.path.join(self.home, "test.ar7"), self.secret)
fileobj = VirtualFileObject(mgr.special_stream(SingleStreamManager.STREAM_DATA), "test")
data2 = fileobj.read()
self.assertEqual(
hashlib.sha1(data).digest(),
hashlib.sha1(data2).digest()
)
fileobj.close()
mgr.close()
class TestFixedMultiStreamManager(TestCase):
def test_recycle(self):
self.fail()
def test_reuse(self):
self.fail()
class TestDynamicMultiStreamManager(TestCase):
def test__close(self):
self.fail()
def test_new_stream(self):
self.fail()
def test_open_stream(self):
self.fail()
def test_close_stream(self):
self.fail()
def test_del_stream(self):
self.fail()
| [
"logging.basicConfig",
"tempfile.TemporaryDirectory",
"tracemalloc.start",
"os.urandom",
"os.path.join",
"angelos.archive7.streams.VirtualFileObject",
"hashlib.sha1"
] | [((760, 779), 'tracemalloc.start', 'tracemalloc.start', ([], {}), '()\n', (777, 779), False, 'import tracemalloc\n'), ((788, 846), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stderr', 'level': 'logging.INFO'}), '(stream=sys.stderr, level=logging.INFO)\n', (807, 846), False, 'import logging\n'), ((869, 883), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (879, 883), False, 'import os\n'), ((1100, 1120), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (1118, 1120), False, 'from tempfile import TemporaryDirectory\n'), ((4373, 4413), 'angelos.archive7.streams.VirtualFileObject', 'VirtualFileObject', (['stream', '"""test"""', '"""wb+"""'], {}), "(stream, 'test', 'wb+')\n", (4390, 4413), False, 'from angelos.archive7.streams import SingleStreamManager, VirtualFileObject\n'), ((4183, 4202), 'os.urandom', 'os.urandom', (['(2 ** 20)'], {}), '(2 ** 20)\n', (4193, 4202), False, 'import os\n'), ((4236, 4271), 'os.path.join', 'os.path.join', (['self.home', '"""test.ar7"""'], {}), "(self.home, 'test.ar7')\n", (4248, 4271), False, 'import os\n'), ((4537, 4572), 'os.path.join', 'os.path.join', (['self.home', '"""test.ar7"""'], {}), "(self.home, 'test.ar7')\n", (4549, 4572), False, 'import os\n'), ((4755, 4773), 'hashlib.sha1', 'hashlib.sha1', (['data'], {}), '(data)\n', (4767, 4773), False, 'import hashlib\n'), ((4797, 4816), 'hashlib.sha1', 'hashlib.sha1', (['data2'], {}), '(data2)\n', (4809, 4816), False, 'import hashlib\n')] |
from django.contrib import admin
from . import models
class PostAdmin(admin.ModelAdmin):
date_hierarchy = 'pub_date'
list_filter = ('pub_status', )
readonly_fields = ('wppost', )
admin.site.register(models.Post, PostAdmin)
admin.site.register(models.RemoteImage)
| [
"django.contrib.admin.site.register"
] | [((195, 238), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Post', 'PostAdmin'], {}), '(models.Post, PostAdmin)\n', (214, 238), False, 'from django.contrib import admin\n'), ((241, 280), 'django.contrib.admin.site.register', 'admin.site.register', (['models.RemoteImage'], {}), '(models.RemoteImage)\n', (260, 280), False, 'from django.contrib import admin\n')] |
from django.urls import path
from .views import ping, ferry, sailing, route, terminal
urlpatterns = [
path('ping', ping, name="ping"),
path('ferries', ferry.get_all, name="get_all_ferries"),
path('sailings', sailing.get_all, name="get_all_sailings"),
path('all-sailings', sailing.get_really_all, name="get_really_all_sailings"),
path('sailings/route/<int:route_id>', sailing.get_sailing_by_route_id, name="get_sailing_by_route_id"),
path('sailings/<int:sailing>', sailing.get_sailing, name="get_sailing"),
path('sailings/<str:source>', sailing.get_sailing_by_route, name="get_sailing_from"),
path('sailings/<str:source>/<str:destination>', sailing.get_sailing_by_route, name="get_sailing_from_and_to"),
path('routes', route.get_all, name="get_all_routes"),
path('terminals', terminal.get_all, name="get_all_terminals"),
]
| [
"django.urls.path"
] | [((107, 138), 'django.urls.path', 'path', (['"""ping"""', 'ping'], {'name': '"""ping"""'}), "('ping', ping, name='ping')\n", (111, 138), False, 'from django.urls import path\n'), ((144, 198), 'django.urls.path', 'path', (['"""ferries"""', 'ferry.get_all'], {'name': '"""get_all_ferries"""'}), "('ferries', ferry.get_all, name='get_all_ferries')\n", (148, 198), False, 'from django.urls import path\n'), ((204, 262), 'django.urls.path', 'path', (['"""sailings"""', 'sailing.get_all'], {'name': '"""get_all_sailings"""'}), "('sailings', sailing.get_all, name='get_all_sailings')\n", (208, 262), False, 'from django.urls import path\n'), ((268, 344), 'django.urls.path', 'path', (['"""all-sailings"""', 'sailing.get_really_all'], {'name': '"""get_really_all_sailings"""'}), "('all-sailings', sailing.get_really_all, name='get_really_all_sailings')\n", (272, 344), False, 'from django.urls import path\n'), ((350, 457), 'django.urls.path', 'path', (['"""sailings/route/<int:route_id>"""', 'sailing.get_sailing_by_route_id'], {'name': '"""get_sailing_by_route_id"""'}), "('sailings/route/<int:route_id>', sailing.get_sailing_by_route_id, name\n ='get_sailing_by_route_id')\n", (354, 457), False, 'from django.urls import path\n'), ((458, 529), 'django.urls.path', 'path', (['"""sailings/<int:sailing>"""', 'sailing.get_sailing'], {'name': '"""get_sailing"""'}), "('sailings/<int:sailing>', sailing.get_sailing, name='get_sailing')\n", (462, 529), False, 'from django.urls import path\n'), ((535, 624), 'django.urls.path', 'path', (['"""sailings/<str:source>"""', 'sailing.get_sailing_by_route'], {'name': '"""get_sailing_from"""'}), "('sailings/<str:source>', sailing.get_sailing_by_route, name=\n 'get_sailing_from')\n", (539, 624), False, 'from django.urls import path\n'), ((625, 739), 'django.urls.path', 'path', (['"""sailings/<str:source>/<str:destination>"""', 'sailing.get_sailing_by_route'], {'name': '"""get_sailing_from_and_to"""'}), "('sailings/<str:source>/<str:destination>', sailing.\n get_sailing_by_route, name='get_sailing_from_and_to')\n", (629, 739), False, 'from django.urls import path\n'), ((740, 792), 'django.urls.path', 'path', (['"""routes"""', 'route.get_all'], {'name': '"""get_all_routes"""'}), "('routes', route.get_all, name='get_all_routes')\n", (744, 792), False, 'from django.urls import path\n'), ((798, 859), 'django.urls.path', 'path', (['"""terminals"""', 'terminal.get_all'], {'name': '"""get_all_terminals"""'}), "('terminals', terminal.get_all, name='get_all_terminals')\n", (802, 859), False, 'from django.urls import path\n')] |
##############################################################################
#
# Copyright (c) 2001-2012 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from BTrees import OOBTree
from .common import BTreeTests
from ._test_builder import update_module
class OOBTreeTest(BTreeTests):
def test_byValue(self):
ITEMS = [(y, x) for x, y in enumerate('abcdefghijklmnopqrstuvwxyz')]
tree = self._makeOne(ITEMS)
self.assertEqual(list(tree.byValue(22)),
[(y, x) for x, y in reversed(ITEMS[22:])])
def testRejectDefaultComparisonOnSet(self):
# Check that passing int keys w default comparison fails.
# Only applies to new-style class instances. Old-style
# instances are too hard to introspect.
# This is white box because we know that the check is being
# used in a function that's used in lots of places.
# Otherwise, there are many permutations that would have to be
# checked.
from .._compat import PY2
t = self._makeOne()
class C(object):
pass
with self.assertRaises(TypeError) as raising:
t[C()] = 1
self.assertEqual(raising.exception.args[0], "Object has default comparison")
if PY2: # we only check for __cmp__ on Python2
class With___cmp__(object):
def __cmp__(*args):
return 1
c = With___cmp__()
t[c] = 1
t.clear()
class With___lt__(object):
def __lt__(*args):
return 1
c = With___lt__()
t[c] = 1
t.clear()
def testAcceptDefaultComparisonOnGet(self):
# Issue #42
t = self._makeOne()
class C(object):
pass
self.assertEqual(t.get(C(), 42), 42)
self.assertRaises(KeyError, t.__getitem__, C())
self.assertFalse(C() in t)
def test_None_is_smallest(self):
t = self._makeOne()
for i in range(999): # Make sure we multiple buckets
t[i] = i*i
t[None] = -1
for i in range(-99, 0): # Make sure we multiple buckets
t[i] = i*i
self.assertEqual(list(t), [None] + list(range(-99, 999)))
self.assertEqual(list(t.values()),
[-1] + [i*i for i in range(-99, 999)])
self.assertEqual(t[2], 4)
self.assertEqual(t[-2], 4)
self.assertEqual(t[None], -1)
t[None] = -2
self.assertEqual(t[None], -2)
t2 = t.__class__(t)
del t[None]
self.assertEqual(list(t), list(range(-99, 999)))
if 'Py' in self.__class__.__name__:
return
from BTrees.OOBTree import difference, union, intersection
self.assertEqual(list(difference(t2, t).items()), [(None, -2)])
self.assertEqual(list(union(t, t2)), list(t2))
self.assertEqual(list(intersection(t, t2)), list(t))
def testDeleteNoneKey(self):
# Check that a None key can be deleted in Python 2.
# This doesn't work on Python 3 because None is unorderable,
# so the tree can't be searched. But None also can't be inserted,
# and we don't support migrating Python 2 databases to Python 3.
t = self._makeOne()
bucket_state = ((None, 42),)
tree_state = ((bucket_state,),)
t.__setstate__(tree_state)
self.assertEqual(t[None], 42)
del t[None]
def testUnpickleNoneKey(self):
# All versions (py2 and py3, C and Python) can unpickle
# data that looks like this: {None: 42}, even though None
# is unorderable..
# This pickle was captured in BTree/ZODB3 3.10.7
data = b'ccopy_reg\n__newobj__\np0\n(cBTrees.OOBTree\nOOBTree\np1\ntp2\nRp3\n((((NI42\ntp4\ntp5\ntp6\ntp7\nb.'
import pickle
t = pickle.loads(data)
keys = list(t)
self.assertEqual([None], keys)
def testIdentityTrumpsBrokenComparison(self):
# Identical keys always match, even if their comparison is
# broken. See https://github.com/zopefoundation/BTrees/issues/50
from functools import total_ordering
@total_ordering
class Bad(object):
def __eq__(self, other):
return False
__lt__ = __cmp__ = __eq__
t = self._makeOne()
bad_key = Bad()
t[bad_key] = 42
self.assertIn(bad_key, t)
self.assertEqual(list(t), [bad_key])
del t[bad_key]
self.assertNotIn(bad_key, t)
self.assertEqual(list(t), [])
update_module(globals(), OOBTree, btree_tests_base=OOBTreeTest)
| [
"pickle.loads",
"BTrees.OOBTree.difference",
"BTrees.OOBTree.intersection",
"BTrees.OOBTree.union"
] | [((4347, 4365), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (4359, 4365), False, 'import pickle\n'), ((3348, 3360), 'BTrees.OOBTree.union', 'union', (['t', 't2'], {}), '(t, t2)\n', (3353, 3360), False, 'from BTrees.OOBTree import difference, union, intersection\n'), ((3403, 3422), 'BTrees.OOBTree.intersection', 'intersection', (['t', 't2'], {}), '(t, t2)\n', (3415, 3422), False, 'from BTrees.OOBTree import difference, union, intersection\n'), ((3276, 3293), 'BTrees.OOBTree.difference', 'difference', (['t2', 't'], {}), '(t2, t)\n', (3286, 3293), False, 'from BTrees.OOBTree import difference, union, intersection\n')] |
import os
import math
import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
import traceback
from .model import Alice, Bob, Eve
from .validation import validate
def train(args, trainloader, valloader, writer, logger, hp, hp_str):
alice = Alice(hp).cuda()
bob = Bob(hp).cuda()
eve = Eve(hp).cuda()
optim_e = torch.optim.Adam(
eve.parameters(),
lr=hp.train.adam.lr)
optim_ab = torch.optim.Adam(
list(alice.parameters()) + list(bob.parameters()),
lr=hp.train.adam.lr)
step = 0
criterion = nn.L1Loss()
try:
alice.train(); bob.train(); eve.train()
for epoch in itertools.count(0):
if epoch % hp.log.validation == 0:
with torch.no_grad():
validate(hp, args, alice, bob, eve, valloader, writer, step)
loader = tqdm.tqdm(trainloader)
for plainE, keyE, plainAB, keyAB in loader:
plainE = plainE.cuda()
keyE = keyE.cuda()
plainAB = plainAB.cuda()
keyAB = keyAB.cuda()
# Eve
optim_e.zero_grad()
cipher = alice(plainE, keyE).detach()
out_e = eve(cipher)
loss_e = criterion(plainE, out_e)
loss_e.backward()
optim_e.step()
loss_e_temp = loss_e.item()
# Alice & Bob
optim_ab.zero_grad()
cipher = alice(plainAB, keyAB)
out_e = eve(cipher)
out_b = bob(cipher, keyAB)
loss_e = criterion(plainAB, out_e)
loss_b = criterion(plainAB, out_b)
loss_ab = loss_b + (1. - loss_e).pow(2)
loss_ab.backward()
optim_ab.step()
loss_b = loss_b.item()
loss_ab = loss_ab.item()
# logging
step += 1
tmp = max(loss_ab, loss_b, loss_e_temp)
if tmp > 1e8 or math.isnan(tmp):
logger.error("loss exploded AB %f B %f E %f" % (loss_ab, loss_b, loss_e_temp))
raise Exception("Loss exploded")
writer.log_train(loss_ab, loss_b, loss_e_temp, step)
loader.set_description("AB %.04f B %.04f E %.04f step %d" % (loss_ab, loss_b, loss_e_temp, step))
except Exception as e:
logger.info("Exiting due to exception: %s" % e)
traceback.print_exc()
| [
"torch.nn.L1Loss",
"tqdm.tqdm",
"itertools.count",
"torch.no_grad",
"traceback.print_exc",
"math.isnan"
] | [((593, 604), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (602, 604), True, 'import torch.nn as nn\n'), ((683, 701), 'itertools.count', 'itertools.count', (['(0)'], {}), '(0)\n', (698, 701), False, 'import itertools\n'), ((891, 913), 'tqdm.tqdm', 'tqdm.tqdm', (['trainloader'], {}), '(trainloader)\n', (900, 913), False, 'import tqdm\n'), ((2515, 2536), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2534, 2536), False, 'import traceback\n'), ((771, 786), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (784, 786), False, 'import torch\n'), ((2070, 2085), 'math.isnan', 'math.isnan', (['tmp'], {}), '(tmp)\n', (2080, 2085), False, 'import math\n')] |
import spacy
from spacy.matcher import PhraseMatcher
from app.utils import pdf2text
nlp = spacy.load('en_core_web_sm')
def model_rank(filename, requirements):
requirements = ['management', 'business']
# print('WOI PANTEK "{}" == "uploads/c8106a5d9eb786b8048d6c9ade0368fff6612d67.pdf"'.format(filename))
# print("uploads/c8106a5d9eb786b8048d6c9ade0368fff6612d67.pdf" == filename)
text = pdf2text(filename).strip().lower()
words = [nlp(i) for i in requirements]
matcher = PhraseMatcher(nlp.vocab)
matcher.add('requirements', None, *words)
score = len(matcher(nlp(text)))
return score
| [
"spacy.load",
"app.utils.pdf2text",
"spacy.matcher.PhraseMatcher"
] | [((91, 119), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (101, 119), False, 'import spacy\n'), ((497, 521), 'spacy.matcher.PhraseMatcher', 'PhraseMatcher', (['nlp.vocab'], {}), '(nlp.vocab)\n', (510, 521), False, 'from spacy.matcher import PhraseMatcher\n'), ((404, 422), 'app.utils.pdf2text', 'pdf2text', (['filename'], {}), '(filename)\n', (412, 422), False, 'from app.utils import pdf2text\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 26 08:53:26 2016
@author: tomas
"""
import sys
import json
import numpy as np
from util import MAP, MAP_qbs
if __name__ == '__main__':
fold = int(sys.argv[1])
tmp_file = str(sys.argv[2])
ub = int(sys.argv[3])
with open('data/washington/washington_preprocessed.json', 'r') as f:
json_data = json.load(f)
data = json_data['data']
labels, texts, splits = [], [], []
for datum in data:
labels.append(datum['label'])
texts.append(datum['text'])
splits.append(datum['split'])
X = np.load('tmp/%s_descriptors.npy' % tmp_file)
if tmp_file.find('dct3') > 0:
we = np.load('embeddings/washington_dct3_embeddings.npy')
elif tmp_file.find('phoc') > 0:
we = np.load('embeddings/washington_phoc_embeddings.npy')
we = (we > 0).astype(np.float32)
elif tmp_file.find('ngram') > 0:
we = np.load('embeddings/washington_ngram_embeddings.npy')
elif tmp_file.find('semantic') > 0:
we = np.load('embeddings/washington_semantic_embeddings.npy')
else:
we = None
#only keep train & val splits
db = []
itargets = []
targets = []
qtargets = []
queries = []
used = []
if we != None:
for i, (x, w, label, text, split) in enumerate(zip(X, we, labels, texts, splits)):
if split == 'val' or split == 'train':
db.append(x)
itargets.append(label)
targets.append(text)
if label not in used:
queries.append(w)
qtargets.append(label)
used.append(label)
else:
for i, (x, label, text, split) in enumerate(zip(X, labels, texts, splits)):
if split == 'val' or split == 'train':
db.append(x)
itargets.append(label)
targets.append(text)
db = np.array(db)
itargets = np.array(itargets)
targets = np.array(targets)
if ub < 1:
ub = db.shape[0] + 1
#use entire db as query
if we != None:
mAP_qbs = MAP_qbs(queries[:ub], qtargets, db, itargets)
else:
mAP_qbs = -1
mAP_qbe = MAP(db[:ub], itargets, db, itargets)
jdata = {}
jdata['MaP_qbe'] = mAP_qbe
jdata['MaP_qbs'] = mAP_qbs
#store results in a json file
with open('tmp/' + tmp_file + '_ws_results.json', 'w') as f:
json.dump(jdata, f)
| [
"util.MAP_qbs",
"json.load",
"numpy.array",
"util.MAP",
"numpy.load",
"json.dump"
] | [((604, 648), 'numpy.load', 'np.load', (["('tmp/%s_descriptors.npy' % tmp_file)"], {}), "('tmp/%s_descriptors.npy' % tmp_file)\n", (611, 648), True, 'import numpy as np\n'), ((1966, 1978), 'numpy.array', 'np.array', (['db'], {}), '(db)\n', (1974, 1978), True, 'import numpy as np\n'), ((1994, 2012), 'numpy.array', 'np.array', (['itargets'], {}), '(itargets)\n', (2002, 2012), True, 'import numpy as np\n'), ((2027, 2044), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (2035, 2044), True, 'import numpy as np\n'), ((2255, 2291), 'util.MAP', 'MAP', (['db[:ub]', 'itargets', 'db', 'itargets'], {}), '(db[:ub], itargets, db, itargets)\n', (2258, 2291), False, 'from util import MAP, MAP_qbs\n'), ((370, 382), 'json.load', 'json.load', (['f'], {}), '(f)\n', (379, 382), False, 'import json\n'), ((701, 753), 'numpy.load', 'np.load', (['"""embeddings/washington_dct3_embeddings.npy"""'], {}), "('embeddings/washington_dct3_embeddings.npy')\n", (708, 753), True, 'import numpy as np\n'), ((2164, 2209), 'util.MAP_qbs', 'MAP_qbs', (['queries[:ub]', 'qtargets', 'db', 'itargets'], {}), '(queries[:ub], qtargets, db, itargets)\n', (2171, 2209), False, 'from util import MAP, MAP_qbs\n'), ((2481, 2500), 'json.dump', 'json.dump', (['jdata', 'f'], {}), '(jdata, f)\n', (2490, 2500), False, 'import json\n'), ((803, 855), 'numpy.load', 'np.load', (['"""embeddings/washington_phoc_embeddings.npy"""'], {}), "('embeddings/washington_phoc_embeddings.npy')\n", (810, 855), True, 'import numpy as np\n'), ((947, 1000), 'numpy.load', 'np.load', (['"""embeddings/washington_ngram_embeddings.npy"""'], {}), "('embeddings/washington_ngram_embeddings.npy')\n", (954, 1000), True, 'import numpy as np\n'), ((1054, 1110), 'numpy.load', 'np.load', (['"""embeddings/washington_semantic_embeddings.npy"""'], {}), "('embeddings/washington_semantic_embeddings.npy')\n", (1061, 1110), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
extract reads from a bam file and a list
write a fasta file
useful benchmark:
https://timoast.github.io/blog/2015-10-12-extractreads/
"""
import pysam
def extract_reads(options):
with open(options.names, "r") as f:
n = f.readlines()
bamfile = pysam.AlignmentFile(options.bam, 'rb')
name_indexed = pysam.IndexedReads(bamfile)
name_indexed.build()
f_out = open(options.out, "w")
for name in n:
name = name.rstrip()
try:
name_indexed.find(name)
except KeyError:
pass
else:
iterator = name_indexed.find(name)
for x in iterator:
f_out.write(f">{x.query_name}_{x.reference_name}_{x.reference_start+1}_{x.cigarstring}\n")
f_out.write(x.query_alignment_sequence + "\n")
f_out.close()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description = "Extract reads by read name from the bam (all hits) and write to fasta")
parser.add_argument("-b", "--bam", help = "bam file", required = True)
parser.add_argument("-n", "--names", help = "list of read names to extract", required = True)
parser.add_argument("-o", "--out", help = "output.fasta", required = True)
options = parser.parse_args()
extract_reads(options) | [
"argparse.ArgumentParser",
"pysam.AlignmentFile",
"pysam.IndexedReads"
] | [((295, 333), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['options.bam', '"""rb"""'], {}), "(options.bam, 'rb')\n", (314, 333), False, 'import pysam\n'), ((353, 380), 'pysam.IndexedReads', 'pysam.IndexedReads', (['bamfile'], {}), '(bamfile)\n', (371, 380), False, 'import pysam\n'), ((967, 1071), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Extract reads by read name from the bam (all hits) and write to fasta"""'}), "(description=\n 'Extract reads by read name from the bam (all hits) and write to fasta')\n", (981, 1071), False, 'from argparse import ArgumentParser\n')] |
from base_config import get_baseconfig_by_epoch
from gsm.gsm_prune_pipeline import gsm_prune_pipeline
def gsm_rc56():
network_type = 'rc56'
dataset_name = 'cifar10'
batch_size = 64
base_log_dir = 'gsm_exps/{}_base_train'.format(network_type)
gsm_log_dir = 'gsm_exps/{}_gsm'.format(network_type)
base_train_config = get_baseconfig_by_epoch(network_type=network_type, dataset_name=dataset_name, dataset_subset='train',
global_batch_size=batch_size, num_node=1, weight_decay=1e-4, optimizer_type='sgd',
momentum=0.9, max_epochs=500, base_lr=0.1, lr_epoch_boundaries=[100, 200, 300, 400],
lr_decay_factor=0.1, linear_final_lr=None, warmup_epochs=5, warmup_method='linear',
warmup_factor=0, ckpt_iter_period=40000, tb_iter_period=100,
output_dir=base_log_dir, tb_dir=base_log_dir, save_weights=None,
val_epoch_period=2)
gsm_config = get_baseconfig_by_epoch(network_type=network_type, dataset_name=dataset_name, dataset_subset='train',
global_batch_size=batch_size, num_node=1, weight_decay=1e-4, optimizer_type='sgd',
momentum=0.98, max_epochs=600, base_lr=5e-3, lr_epoch_boundaries=[400, 500], # Note this line
lr_decay_factor=0.1, linear_final_lr=None, warmup_epochs=5, warmup_method='linear',
warmup_factor=0, ckpt_iter_period=40000, tb_iter_period=100,
output_dir=gsm_log_dir, tb_dir=gsm_log_dir, save_weights=None,
val_epoch_period=2)
gsm_prune_pipeline(init_hdf5=None, base_train_config=base_train_config, gsm_config=gsm_config, nonzero_ratio=0.10)
if __name__ == '__main__':
gsm_rc56() | [
"base_config.get_baseconfig_by_epoch",
"gsm.gsm_prune_pipeline.gsm_prune_pipeline"
] | [((340, 870), 'base_config.get_baseconfig_by_epoch', 'get_baseconfig_by_epoch', ([], {'network_type': 'network_type', 'dataset_name': 'dataset_name', 'dataset_subset': '"""train"""', 'global_batch_size': 'batch_size', 'num_node': '(1)', 'weight_decay': '(0.0001)', 'optimizer_type': '"""sgd"""', 'momentum': '(0.9)', 'max_epochs': '(500)', 'base_lr': '(0.1)', 'lr_epoch_boundaries': '[100, 200, 300, 400]', 'lr_decay_factor': '(0.1)', 'linear_final_lr': 'None', 'warmup_epochs': '(5)', 'warmup_method': '"""linear"""', 'warmup_factor': '(0)', 'ckpt_iter_period': '(40000)', 'tb_iter_period': '(100)', 'output_dir': 'base_log_dir', 'tb_dir': 'base_log_dir', 'save_weights': 'None', 'val_epoch_period': '(2)'}), "(network_type=network_type, dataset_name=\n dataset_name, dataset_subset='train', global_batch_size=batch_size,\n num_node=1, weight_decay=0.0001, optimizer_type='sgd', momentum=0.9,\n max_epochs=500, base_lr=0.1, lr_epoch_boundaries=[100, 200, 300, 400],\n lr_decay_factor=0.1, linear_final_lr=None, warmup_epochs=5,\n warmup_method='linear', warmup_factor=0, ckpt_iter_period=40000,\n tb_iter_period=100, output_dir=base_log_dir, tb_dir=base_log_dir,\n save_weights=None, val_epoch_period=2)\n", (363, 870), False, 'from base_config import get_baseconfig_by_epoch\n'), ((1146, 1667), 'base_config.get_baseconfig_by_epoch', 'get_baseconfig_by_epoch', ([], {'network_type': 'network_type', 'dataset_name': 'dataset_name', 'dataset_subset': '"""train"""', 'global_batch_size': 'batch_size', 'num_node': '(1)', 'weight_decay': '(0.0001)', 'optimizer_type': '"""sgd"""', 'momentum': '(0.98)', 'max_epochs': '(600)', 'base_lr': '(0.005)', 'lr_epoch_boundaries': '[400, 500]', 'lr_decay_factor': '(0.1)', 'linear_final_lr': 'None', 'warmup_epochs': '(5)', 'warmup_method': '"""linear"""', 'warmup_factor': '(0)', 'ckpt_iter_period': '(40000)', 'tb_iter_period': '(100)', 'output_dir': 'gsm_log_dir', 'tb_dir': 'gsm_log_dir', 'save_weights': 'None', 'val_epoch_period': '(2)'}), "(network_type=network_type, dataset_name=\n dataset_name, dataset_subset='train', global_batch_size=batch_size,\n num_node=1, weight_decay=0.0001, optimizer_type='sgd', momentum=0.98,\n max_epochs=600, base_lr=0.005, lr_epoch_boundaries=[400, 500],\n lr_decay_factor=0.1, linear_final_lr=None, warmup_epochs=5,\n warmup_method='linear', warmup_factor=0, ckpt_iter_period=40000,\n tb_iter_period=100, output_dir=gsm_log_dir, tb_dir=gsm_log_dir,\n save_weights=None, val_epoch_period=2)\n", (1169, 1667), False, 'from base_config import get_baseconfig_by_epoch\n'), ((1949, 2066), 'gsm.gsm_prune_pipeline.gsm_prune_pipeline', 'gsm_prune_pipeline', ([], {'init_hdf5': 'None', 'base_train_config': 'base_train_config', 'gsm_config': 'gsm_config', 'nonzero_ratio': '(0.1)'}), '(init_hdf5=None, base_train_config=base_train_config,\n gsm_config=gsm_config, nonzero_ratio=0.1)\n', (1967, 2066), False, 'from gsm.gsm_prune_pipeline import gsm_prune_pipeline\n')] |
# -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.forms import SetPasswordForm, PasswordChangeForm, \
AdminPasswordChangeForm as DjagnoAdminPasswordChangeForm, UserCreationForm, \
AuthenticationForm as DjangoAuthenticationForm
from localflavor.pl.forms import PLPESELField
from django.utils.translation import ugettext_lazy as _
from passwords.fields import PasswordField
from localflavor.pl.forms import PLPESELField
from sew_django.profiles.models import Profile
class ValidatingSetPasswordForm(SetPasswordForm):
new_password1 = PasswordField(label=_("New password"))
new_password2 = PasswordField(label=_("New password confirm"))
class ValidatingPasswordChangeForm(PasswordChangeForm):
new_password1 = PasswordField(label=_("New password"))
new_password2 = PasswordField(label=_("New password confirmation"))
class AdminPasswordChangeForm(DjagnoAdminPasswordChangeForm):
password1 = PasswordField(label=_("New password"))
password2 = PasswordField(label=_("New password confirm"))
class AuthenticationForm(DjangoAuthenticationForm):
def __init__(self, *args, **kwargs):
super(AuthenticationForm, self).__init__(*args, **kwargs)
self.fields['username'].label = _(u"Numer PESEL lub adres email")
self.error_messages['invalid_login'] = _(u"Wprowadź poprawny numer PESEL lub adres email.")
class PeselForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['pesel',]
class RegisterUserFullForm(forms.ModelForm):
password = PasswordField(label=_("Password"))
password_confirm = PasswordField(label=_("Password confirmation"))
def __init__(self, *args, **kwargs):
super(RegisterUserFullForm, self).__init__(*args, **kwargs)
self.fields['consent_processing_of_personal_data'].is_checkbox = True
self.fields['consent_processing_of_personal_data'].required = True
self.fields['consent_processing_of_personal_data'].initial = False
self.fields['accept_of_sending_data_to_WOSP'].is_checkbox = True
self.fields['accept_of_sending_data_to_WOSP'].required = True
self.fields['accept_of_sending_data_to_WOSP'].initial = False
def clean_password_confirm(self):
password = self.cleaned_data.get('password')
password_confirm = self.cleaned_data.get('password_confirm')
if password != password_confirm:
raise forms.ValidationError(_("Passwords doesn't match."))
return password_confirm
def save(self, commit=True):
profile = super(RegisterUserFullForm, self).save(commit=False)
profile.set_password(self.cleaned_data["password"])
if commit:
profile.save()
return profile
class Meta:
model = Profile
fields = ['pesel','email', 'photo', 'first_name', 'last_name', 'street', 'house', 'flat', 'zip', 'city', 'phone',
'workplace_name', 'workplace_address', 'workplace_zip', 'workplace_city', 'password','<PASSWORD>',
"consent_processing_of_personal_data", "accept_of_sending_data_to_WOSP"]
class AdminRegisterUserFullForm(RegisterUserFullForm):
#small hack to show those fields
consent_processing_of_personal_data = forms.BooleanField(required=False, initial=True)
accept_of_sending_data_to_WOSP = forms.BooleanField(required=False, initial=True)
def __init__(self, *args, **kwargs):
super(AdminRegisterUserFullForm, self).__init__(*args, **kwargs)
self.fields['consent_processing_of_personal_data'].required = False
self.fields['accept_of_sending_data_to_WOSP'].required = False
def clean(self):
cleaned_data = self.cleaned_data
cleaned_data['consent_processing_of_personal_data'] = True
cleaned_data['accept_of_sending_data_to_WOSP'] = True
return cleaned_data
| [
"django.forms.BooleanField",
"django.utils.translation.ugettext_lazy"
] | [((3316, 3364), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'required': '(False)', 'initial': '(True)'}), '(required=False, initial=True)\n', (3334, 3364), False, 'from django import forms\n'), ((3402, 3450), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'required': '(False)', 'initial': '(True)'}), '(required=False, initial=True)\n', (3420, 3450), False, 'from django import forms\n'), ((1324, 1357), 'django.utils.translation.ugettext_lazy', '_', (['u"""Numer PESEL lub adres email"""'], {}), "(u'Numer PESEL lub adres email')\n", (1325, 1357), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1405, 1457), 'django.utils.translation.ugettext_lazy', '_', (['u"""Wprowadź poprawny numer PESEL lub adres email."""'], {}), "(u'Wprowadź poprawny numer PESEL lub adres email.')\n", (1406, 1457), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((668, 685), 'django.utils.translation.ugettext_lazy', '_', (['"""New password"""'], {}), "('New password')\n", (669, 685), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((727, 752), 'django.utils.translation.ugettext_lazy', '_', (['"""New password confirm"""'], {}), "('New password confirm')\n", (728, 752), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((852, 869), 'django.utils.translation.ugettext_lazy', '_', (['"""New password"""'], {}), "('New password')\n", (853, 869), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((911, 941), 'django.utils.translation.ugettext_lazy', '_', (['"""New password confirmation"""'], {}), "('New password confirmation')\n", (912, 941), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1042, 1059), 'django.utils.translation.ugettext_lazy', '_', (['"""New password"""'], {}), "('New password')\n", (1043, 1059), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1097, 1122), 'django.utils.translation.ugettext_lazy', '_', (['"""New password confirm"""'], {}), "('New password confirm')\n", (1098, 1122), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1642, 1655), 'django.utils.translation.ugettext_lazy', '_', (['"""Password"""'], {}), "('Password')\n", (1643, 1655), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1700, 1726), 'django.utils.translation.ugettext_lazy', '_', (['"""Password confirmation"""'], {}), "('Password confirmation')\n", (1701, 1726), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2521, 2550), 'django.utils.translation.ugettext_lazy', '_', (['"""Passwords doesn\'t match."""'], {}), '("Passwords doesn\'t match.")\n', (2522, 2550), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
def welcome():
"""
Perform a bunch of sanity tests to make sure the Add-on SDK
environment is sane, and then display a welcome message.
"""
try:
if sys.version_info[0] > 2:
print ("Error: You appear to be using Python %d, but "
"the Add-on SDK only supports the Python 2.x line." %
(sys.version_info[0]))
return
import mozrunner
if 'CUDDLEFISH_ROOT' not in os.environ:
print ("Error: CUDDLEFISH_ROOT environment variable does "
"not exist! It should point to the root of the "
"Add-on SDK repository.")
return
env_root = os.environ['CUDDLEFISH_ROOT']
bin_dir = os.path.join(env_root, 'bin')
python_lib_dir = os.path.join(env_root, 'python-lib')
path = os.environ['PATH'].split(os.path.pathsep)
if bin_dir not in path:
print ("Warning: the Add-on SDK binary directory %s "
"does not appear to be in your PATH. You may "
"not be able to run 'cfx' or other SDK tools." %
bin_dir)
if python_lib_dir not in sys.path:
print ("Warning: the Add-on SDK python-lib directory %s "
"does not appear to be in your sys.path, which "
"is odd because I'm running from it." % python_lib_dir)
if not mozrunner.__path__[0].startswith(env_root):
print ("Warning: your mozrunner package is installed at %s, "
"which does not seem to be located inside the Jetpack "
"SDK. This may cause problems, and you may want to "
"uninstall the other version. See bug 556562 for "
"more information." % mozrunner.__path__[0])
except Exception:
# Apparently we can't get the actual exception object in the
# 'except' clause in a way that's syntax-compatible for both
# Python 2.x and 3.x, so we'll have to use the traceback module.
import traceback
_, e, _ = sys.exc_info()
print ("Verification of Add-on SDK environment failed (%s)." % e)
print ("Your SDK may not work properly.")
return
print ("Welcome to the Add-on SDK. Run 'cfx docs' for assistance.")
if __name__ == '__main__':
welcome()
| [
"sys.exc_info",
"os.path.join"
] | [((972, 1001), 'os.path.join', 'os.path.join', (['env_root', '"""bin"""'], {}), "(env_root, 'bin')\n", (984, 1001), False, 'import os\n'), ((1027, 1063), 'os.path.join', 'os.path.join', (['env_root', '"""python-lib"""'], {}), "(env_root, 'python-lib')\n", (1039, 1063), False, 'import os\n'), ((2331, 2345), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2343, 2345), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
"""
.. _training-example:
Train Your Own Neural Network Potential
=======================================
This example shows how to use TorchANI train your own neural network potential.
"""
###############################################################################
# To begin with, let's first import the modules we will use:
import torch
import ignite
import torchani
import timeit
import tensorboardX
import os
import ignite.contrib.handlers
###############################################################################
# Now let's setup training hyperparameters. Note that here for our demo purpose
# , we set both training set and validation set the ``ani_gdb_s01.h5`` in
# TorchANI's repository. This allows this program to finish very quick, because
# that dataset is very small. But this is wrong and should be avoided for any
# serious training. These paths assumes the user run this script under the
# ``examples`` directory of TorchANI's repository. If you download this script,
# you should manually set the path of these files in your system before this
# script can run successfully.
# training and validation set
try:
path = os.path.dirname(os.path.realpath(__file__))
except NameError:
path = os.getcwd()
training_path = os.path.join(path, '../dataset/ani_gdb_s01.h5')
validation_path = os.path.join(path, '../dataset/ani_gdb_s01.h5')
# checkpoint file to save model when validation RMSE improves
model_checkpoint = 'model.pt'
# max epochs to run the training
max_epochs = 20
# Compute training RMSE every this steps. Since the training set is usually
# huge and the loss funcition does not directly gives us RMSE, we need to
# check the training RMSE to see overfitting.
training_rmse_every = 5
# device to run the training
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# batch size
batch_size = 1024
# log directory for tensorboardX
log = 'runs'
###############################################################################
# Now let's read our constants and self energies from constant files and
# construct AEV computer.
const_file = os.path.join(path, '../torchani/resources/ani-1x_dft_x8ens/rHCNO-5.2R_16-3.5A_a4-8.params') # noqa: E501
sae_file = os.path.join(path, '../torchani/resources/ani-1x_dft_x8ens/sae_linfit.dat') # noqa: E501
consts = torchani.neurochem.Constants(const_file)
aev_computer = torchani.AEVComputer(**consts)
energy_shifter = torchani.neurochem.load_sae(sae_file)
###############################################################################
# Now let's define atomic neural networks. Here in this demo, we use the same
# size of neural network for all atom types, but this is not necessary.
def atomic():
model = torch.nn.Sequential(
torch.nn.Linear(384, 128),
torch.nn.CELU(0.1),
torch.nn.Linear(128, 128),
torch.nn.CELU(0.1),
torch.nn.Linear(128, 64),
torch.nn.CELU(0.1),
torch.nn.Linear(64, 1)
)
return model
nn = torchani.ANIModel([atomic() for _ in range(4)])
print(nn)
###############################################################################
# If checkpoint from previous training exists, then load it.
if os.path.isfile(model_checkpoint):
nn.load_state_dict(torch.load(model_checkpoint))
else:
torch.save(nn.state_dict(), model_checkpoint)
model = torch.nn.Sequential(aev_computer, nn).to(device)
###############################################################################
# Now setup tensorboardX.
writer = tensorboardX.SummaryWriter(log_dir=log)
###############################################################################
# Now load training and validation datasets into memory. Note that we need to
# subtracting energies by the self energies of all atoms for each molecule.
# This makes the range of energies in a reasonable range. The second argument
# defines how to convert species as a list of string to tensor, that is, for
# all supported chemical symbols, which is correspond to ``0``, which
# correspond to ``1``, etc.
training = torchani.data.BatchedANIDataset(
training_path, consts.species_to_tensor, batch_size, device=device,
transform=[energy_shifter.subtract_from_dataset])
validation = torchani.data.BatchedANIDataset(
validation_path, consts.species_to_tensor, batch_size, device=device,
transform=[energy_shifter.subtract_from_dataset])
###############################################################################
# When iterating the dataset, we will get pairs of input and output
# ``(species_coordinates, properties)``, where ``species_coordinates`` is the
# input and ``properties`` is the output.
#
# ``species_coordinates`` is a list of species-coordinate pairs, with shape
# ``(N, Na)`` and ``(N, Na, 3)``. The reason for getting this type is, when
# loading the dataset and generating minibatches, the whole dataset are
# shuffled and each minibatch contains structures of molecules with a wide
# range of number of atoms. Molecules of different number of atoms are batched
# into single by padding. The way padding works is: adding ghost atoms, with
# species 'X', and do computations as if they were normal atoms. But when
# computing AEVs, atoms with species `X` would be ignored. To avoid computation
# wasting on padding atoms, minibatches are further splitted into chunks. Each
# chunk contains structures of molecules of similar size, which minimize the
# total number of padding atoms required to add. The input list
# ``species_coordinates`` contains chunks of that minibatch we are getting. The
# batching and chunking happens automatically, so the user does not need to
# worry how to construct chunks, but the user need to compute the energies for
# each chunk and concat them into single tensor.
#
# The output, i.e. ``properties`` is a dictionary holding each property. This
# allows us to extend TorchANI in the future to training forces and properties.
#
# We have tools to deal with these data types at :attr:`torchani.ignite` that
# allow us to easily combine the dataset with pytorch ignite. These tools can
# be used as follows:
container = torchani.ignite.Container({'energies': model})
optimizer = torch.optim.Adam(model.parameters())
trainer = ignite.engine.create_supervised_trainer(
container, optimizer, torchani.ignite.MSELoss('energies'))
evaluator = ignite.engine.create_supervised_evaluator(container, metrics={
'RMSE': torchani.ignite.RMSEMetric('energies')
})
###############################################################################
# Let's add a progress bar for the trainer
pbar = ignite.contrib.handlers.ProgressBar()
pbar.attach(trainer)
###############################################################################
# And some event handlers to compute validation and training metrics:
def hartree2kcal(x):
return 627.509 * x
@trainer.on(ignite.engine.Events.EPOCH_STARTED)
def validation_and_checkpoint(trainer):
def evaluate(dataset, name):
evaluator = ignite.engine.create_supervised_evaluator(
container,
metrics={
'RMSE': torchani.ignite.RMSEMetric('energies')
}
)
evaluator.run(dataset)
metrics = evaluator.state.metrics
rmse = hartree2kcal(metrics['RMSE'])
writer.add_scalar(name, rmse, trainer.state.epoch)
# compute validation RMSE
evaluate(validation, 'validation_rmse_vs_epoch')
# compute training RMSE
if trainer.state.epoch % training_rmse_every == 1:
evaluate(training, 'training_rmse_vs_epoch')
# checkpoint model
torch.save(nn.state_dict(), model_checkpoint)
###############################################################################
# Also some to log elapsed time:
start = timeit.default_timer()
@trainer.on(ignite.engine.Events.EPOCH_STARTED)
def log_time(trainer):
elapsed = round(timeit.default_timer() - start, 2)
writer.add_scalar('time_vs_epoch', elapsed, trainer.state.epoch)
###############################################################################
# Also log the loss per iteration:
@trainer.on(ignite.engine.Events.ITERATION_COMPLETED)
def log_loss(trainer):
iteration = trainer.state.iteration
writer.add_scalar('loss_vs_iteration', trainer.state.output, iteration)
###############################################################################
# And finally, we are ready to run:
trainer.run(training, max_epochs)
| [
"torch.nn.Sequential",
"torchani.AEVComputer",
"torch.cuda.is_available",
"torchani.neurochem.Constants",
"tensorboardX.SummaryWriter",
"torch.nn.CELU",
"ignite.contrib.handlers.ProgressBar",
"torchani.ignite.Container",
"os.path.isfile",
"torchani.ignite.MSELoss",
"torchani.ignite.RMSEMetric",
"torchani.data.BatchedANIDataset",
"timeit.default_timer",
"torchani.neurochem.load_sae",
"torch.load",
"os.path.join",
"os.getcwd",
"os.path.realpath",
"torch.nn.Linear"
] | [((1281, 1328), 'os.path.join', 'os.path.join', (['path', '"""../dataset/ani_gdb_s01.h5"""'], {}), "(path, '../dataset/ani_gdb_s01.h5')\n", (1293, 1328), False, 'import os\n'), ((1347, 1394), 'os.path.join', 'os.path.join', (['path', '"""../dataset/ani_gdb_s01.h5"""'], {}), "(path, '../dataset/ani_gdb_s01.h5')\n", (1359, 1394), False, 'import os\n'), ((2132, 2227), 'os.path.join', 'os.path.join', (['path', '"""../torchani/resources/ani-1x_dft_x8ens/rHCNO-5.2R_16-3.5A_a4-8.params"""'], {}), "(path,\n '../torchani/resources/ani-1x_dft_x8ens/rHCNO-5.2R_16-3.5A_a4-8.params')\n", (2144, 2227), False, 'import os\n'), ((2249, 2324), 'os.path.join', 'os.path.join', (['path', '"""../torchani/resources/ani-1x_dft_x8ens/sae_linfit.dat"""'], {}), "(path, '../torchani/resources/ani-1x_dft_x8ens/sae_linfit.dat')\n", (2261, 2324), False, 'import os\n'), ((2348, 2388), 'torchani.neurochem.Constants', 'torchani.neurochem.Constants', (['const_file'], {}), '(const_file)\n', (2376, 2388), False, 'import torchani\n'), ((2404, 2434), 'torchani.AEVComputer', 'torchani.AEVComputer', ([], {}), '(**consts)\n', (2424, 2434), False, 'import torchani\n'), ((2452, 2489), 'torchani.neurochem.load_sae', 'torchani.neurochem.load_sae', (['sae_file'], {}), '(sae_file)\n', (2479, 2489), False, 'import torchani\n'), ((3221, 3253), 'os.path.isfile', 'os.path.isfile', (['model_checkpoint'], {}), '(model_checkpoint)\n', (3235, 3253), False, 'import os\n'), ((3538, 3577), 'tensorboardX.SummaryWriter', 'tensorboardX.SummaryWriter', ([], {'log_dir': 'log'}), '(log_dir=log)\n', (3564, 3577), False, 'import tensorboardX\n'), ((4077, 4235), 'torchani.data.BatchedANIDataset', 'torchani.data.BatchedANIDataset', (['training_path', 'consts.species_to_tensor', 'batch_size'], {'device': 'device', 'transform': '[energy_shifter.subtract_from_dataset]'}), '(training_path, consts.species_to_tensor,\n batch_size, device=device, transform=[energy_shifter.subtract_from_dataset]\n )\n', (4108, 4235), False, 'import torchani\n'), ((4250, 4410), 'torchani.data.BatchedANIDataset', 'torchani.data.BatchedANIDataset', (['validation_path', 'consts.species_to_tensor', 'batch_size'], {'device': 'device', 'transform': '[energy_shifter.subtract_from_dataset]'}), '(validation_path, consts.species_to_tensor,\n batch_size, device=device, transform=[energy_shifter.subtract_from_dataset]\n )\n', (4281, 4410), False, 'import torchani\n'), ((6147, 6193), 'torchani.ignite.Container', 'torchani.ignite.Container', (["{'energies': model}"], {}), "({'energies': model})\n", (6172, 6193), False, 'import torchani\n'), ((6626, 6663), 'ignite.contrib.handlers.ProgressBar', 'ignite.contrib.handlers.ProgressBar', ([], {}), '()\n', (6661, 6663), False, 'import ignite\n'), ((7794, 7816), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7814, 7816), False, 'import timeit\n'), ((6320, 6355), 'torchani.ignite.MSELoss', 'torchani.ignite.MSELoss', (['"""energies"""'], {}), "('energies')\n", (6343, 6355), False, 'import torchani\n'), ((1196, 1222), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1212, 1222), False, 'import os\n'), ((1253, 1264), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1262, 1264), False, 'import os\n'), ((1821, 1846), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1844, 1846), False, 'import torch\n'), ((2777, 2802), 'torch.nn.Linear', 'torch.nn.Linear', (['(384)', '(128)'], {}), '(384, 128)\n', (2792, 2802), False, 'import torch\n'), ((2812, 2830), 'torch.nn.CELU', 'torch.nn.CELU', (['(0.1)'], {}), '(0.1)\n', (2825, 2830), False, 'import torch\n'), ((2840, 2865), 'torch.nn.Linear', 'torch.nn.Linear', (['(128)', '(128)'], {}), '(128, 128)\n', (2855, 2865), False, 'import torch\n'), ((2875, 2893), 'torch.nn.CELU', 'torch.nn.CELU', (['(0.1)'], {}), '(0.1)\n', (2888, 2893), False, 'import torch\n'), ((2903, 2927), 'torch.nn.Linear', 'torch.nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (2918, 2927), False, 'import torch\n'), ((2937, 2955), 'torch.nn.CELU', 'torch.nn.CELU', (['(0.1)'], {}), '(0.1)\n', (2950, 2955), False, 'import torch\n'), ((2965, 2987), 'torch.nn.Linear', 'torch.nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (2980, 2987), False, 'import torch\n'), ((3278, 3306), 'torch.load', 'torch.load', (['model_checkpoint'], {}), '(model_checkpoint)\n', (3288, 3306), False, 'import torch\n'), ((3373, 3410), 'torch.nn.Sequential', 'torch.nn.Sequential', (['aev_computer', 'nn'], {}), '(aev_computer, nn)\n', (3392, 3410), False, 'import torch\n'), ((6448, 6486), 'torchani.ignite.RMSEMetric', 'torchani.ignite.RMSEMetric', (['"""energies"""'], {}), "('energies')\n", (6474, 6486), False, 'import torchani\n'), ((7910, 7932), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7930, 7932), False, 'import timeit\n'), ((7136, 7174), 'torchani.ignite.RMSEMetric', 'torchani.ignite.RMSEMetric', (['"""energies"""'], {}), "('energies')\n", (7162, 7174), False, 'import torchani\n')] |
import scrapy
from selenium import webdriver
import re
import json
import requests
import os
from kototo.items import KototoItem
import pymysql
class KototoSpider(scrapy.Spider):
name = 'kototo'
start_urls = []
# 指定的需要爬取的up主的b站投稿页面
space_url = 'https://space.bilibili.com/17485141/video'
def __init__(self):
"""
构造器,主要初始化了selenium对象并实现无头浏览器,以及
初始化需要爬取的url地址,因为b站的翻页是js实现的,所以要手动处理一下
"""
super().__init__()
# 构造无头浏览器
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
self.bro = webdriver.Chrome(chrome_options=chrome_options)
# 初始化需要爬取的列表页
self.init_start_urls(self.start_urls, self.space_url)
# 创建桌面文件夹
self.desktop_path = os.path.join(os.path.expanduser('~'), 'Desktop\\' + self.name + '\\')
if not os.path.exists(self.desktop_path):
os.mkdir(self.desktop_path)
def parse(self, response):
"""
解析方法,解析列表页的视频li,拿到标题和详情页,然后主动请求详情页
:param response:
:return:
"""
li_list = response.xpath('//*[@id="submit-video-list"]/ul[2]/li')
for li in li_list:
print(li.xpath('./a[2]/@title').extract_first())
print(detail_url := 'https://' + li.xpath('./a[2]/@href').extract_first()[2:])
yield scrapy.Request(url=detail_url, callback=self.parse_detail)
def parse_detail(self, response):
"""
增量爬取: 解析详情页的音视频地址并交给管道处理
使用mysql实现
:param response:
:return:
"""
title = response.xpath('//*[@id="viewbox_report"]/h1/@title').extract_first()
# 替换掉视频名称中无法用在文件名中或会导致cmd命令出错的字符
title = title.replace('-', '').replace(' ', '').replace('/', '').replace('|', '')
play_info_list = self.get_play_info(response)
# 这里使用mysql的唯一索引实现增量爬取,如果是服务器上跑也可以用redis
if self.insert_info(title, play_info_list[1]):
video_temp_path = (self.desktop_path + title + '_temp.mp4').replace('-', '')
video_path = self.desktop_path + title + '.mp4'
audio_path = self.desktop_path + title + '.mp3'
item = KototoItem()
item['video_url'] = play_info_list[0]
item['audio_url'] = play_info_list[1]
item['video_path'] = video_path
item['audio_path'] = audio_path
item['video_temp_path'] = video_temp_path
yield item
else:
print(title + ': 已经下载过了!')
def insert_info(self, vtitle, vurl):
"""
mysql持久化存储爬取过的视频内容信息
:param vtitle: 标题
:param vurl: 视频链接
:return:
"""
with Mysql() as conn:
cursor = conn.cursor(pymysql.cursors.DictCursor)
try:
sql = 'insert into tb_kototo(title,url) values("%s","%s")' % (vtitle, vurl)
res = cursor.execute(sql)
conn.commit()
if res == 1:
return True
else:
return False
except:
return False
def get_play_info(self, resp):
"""
解析详情页的源代码,提取其中的视频和文件真实地址
:param resp:
:return:
"""
json_data = json.loads(re.findall('<script>window\.__playinfo__=(.*?)</script>', resp.text)[0])
# 拿到视频和音频的真实链接地址
video_url = json_data['data']['dash']['video'][0]['baseUrl']
audio_url = json_data['data']['dash']['audio'][0]['backupUrl'][0]
return video_url, audio_url
def init_start_urls(self, url_list, person_page):
"""
初始化需要爬取的列表页,由于b站使用js翻页,无法在源码中找到翻页地址,
需要自己手动实现解析翻页url的操作
:param url_list:
:param person_page:
:return:
"""
mid = re.findall('https://space.bilibili.com/(.*?)/video\w*', person_page)[0]
url = 'https://api.bilibili.com/x/space/arc/search?mid=' + mid + '&ps=30&tid=0&pn=1&keyword=&order=pubdate&jsonp=jsonp'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36',
'Referer': 'https://www.bilibili.com'
}
json_data = requests.get(url=url, headers=headers).json()
total_count = json_data['data']['page']['count']
page_size = json_data['data']['page']['ps']
if total_count <= page_size:
page_count = 1
elif total_count % page_size == 0:
page_count = total_count // page_size
else:
page_count = total_count // page_size + 1
url_template = 'https://space.bilibili.com/' + mid + '/video?tid=0&page=' + '%d' + '&keyword=&order=pubdate'
for i in range(page_count):
page_no = i + 1
url_list.append(url_template % page_no)
def closed(self, spider):
"""
爬虫结束关闭selenium窗口
:param spider:
:return:
"""
self.bro.quit()
class Mysql(object):
def __enter__(self):
self.connection = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', database='python')
return self.connection
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
| [
"selenium.webdriver.chrome.options.Options",
"os.path.exists",
"selenium.webdriver.Chrome",
"pymysql.connect",
"requests.get",
"scrapy.Request",
"kototo.items.KototoItem",
"os.mkdir",
"re.findall",
"os.path.expanduser"
] | [((573, 582), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (580, 582), False, 'from selenium.webdriver.chrome.options import Options\n'), ((705, 752), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'chrome_options': 'chrome_options'}), '(chrome_options=chrome_options)\n', (721, 752), False, 'from selenium import webdriver\n'), ((5155, 5259), 'pymysql.connect', 'pymysql.connect', ([], {'host': '"""127.0.0.1"""', 'port': '(3306)', 'user': '"""root"""', 'password': '"""<PASSWORD>"""', 'database': '"""python"""'}), "(host='127.0.0.1', port=3306, user='root', password=\n '<PASSWORD>', database='python')\n", (5170, 5259), False, 'import pymysql\n'), ((896, 919), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (914, 919), False, 'import os\n'), ((968, 1001), 'os.path.exists', 'os.path.exists', (['self.desktop_path'], {}), '(self.desktop_path)\n', (982, 1001), False, 'import os\n'), ((1015, 1042), 'os.mkdir', 'os.mkdir', (['self.desktop_path'], {}), '(self.desktop_path)\n', (1023, 1042), False, 'import os\n'), ((2273, 2285), 'kototo.items.KototoItem', 'KototoItem', ([], {}), '()\n', (2283, 2285), False, 'from kototo.items import KototoItem\n'), ((3879, 3948), 're.findall', 're.findall', (['"""https://space.bilibili.com/(.*?)/video\\\\w*"""', 'person_page'], {}), "('https://space.bilibili.com/(.*?)/video\\\\w*', person_page)\n", (3889, 3948), False, 'import re\n'), ((1455, 1513), 'scrapy.Request', 'scrapy.Request', ([], {'url': 'detail_url', 'callback': 'self.parse_detail'}), '(url=detail_url, callback=self.parse_detail)\n', (1469, 1513), False, 'import scrapy\n'), ((3367, 3436), 're.findall', 're.findall', (['"""<script>window\\\\.__playinfo__=(.*?)</script>"""', 'resp.text'], {}), "('<script>window\\\\.__playinfo__=(.*?)</script>', resp.text)\n", (3377, 3436), False, 'import re\n'), ((4323, 4361), 'requests.get', 'requests.get', ([], {'url': 'url', 'headers': 'headers'}), '(url=url, headers=headers)\n', (4335, 4361), False, 'import requests\n')] |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
from typing import Dict, Any
from asyncio import Event, Future, get_event_loop, AbstractEventLoop
__all__ = [
'DictIsFull',
'DictIsEmpty',
'AsyncCoordinator'
]
class DictIsFull(Exception):
pass
class DictIsEmpty(Exception):
pass
class AsyncCoordinator:
_loop: AbstractEventLoop
_maxsize: int
_getters: Dict[str, Future]
_putters: Dict[str, Future]
_unfinished_tasks: int
_finished: Event
_dict: Dict[str, Any]
def __init__(self, maxsize=0, *, loop=None):
if loop is None:
self._loop = get_event_loop()
else:
self._loop = loop
self._maxsize = maxsize
self._getters: Dict[str, Future] = dict()
self._putters: Dict[str, Future] = dict()
self._unfinished_tasks = 0
self._finished = Event(loop=self._loop)
self._finished.set()
self._dict = dict()
def get_dict_copy(self) -> Dict[str, Any]:
return self._dict.copy()
def _get(self, key: str) -> Any:
value = self._dict[key]
del self._dict[key]
return value
def _put(self, key: str, value: Any) -> None:
self._dict[key] = value
def _wake_up_getter(self, key: str) -> None:
if key in self._getters:
waiter = self._getters[key]
if not waiter.done():
waiter.set_result(None)
def _wake_up_putter(self, key: str) -> None:
if key in self._putters:
waiter = self._putters[key]
if not waiter.done():
waiter.set_result(None)
def __repr__(self) -> str:
return f'<{type(self).__name__} at {id(self):#x} {self._format()}>'
def __str__(self) -> str:
return f'<{type(self).__name__} {self._format()}>'
def _format(self) -> str:
result = f'maxsize={self._maxsize!r}'
if getattr(self, '_dict', None):
result += f' _dict={dict(self._dict)!r}'
if self._getters:
result += f' _getters[{len(self._getters)}]'
if self._putters:
result += f' _putters[{len(self._putters)}]'
if self._unfinished_tasks:
result += f' tasks={self._unfinished_tasks}'
return result
def qsize(self) -> int:
return len(self._dict)
@property
def maxsize(self) -> int:
return self._maxsize
def empty(self) -> bool:
return not self._dict
def full(self) -> bool:
if self._maxsize <= 0:
return False
else:
return self.qsize() >= self._maxsize
async def put(self, key: str, value: Any) -> None:
while self.full():
putter = self._loop.create_future()
self._putters[key] = putter
try:
await putter
except Exception as err:
print('Put err = ', err)
# Just in case putter is not done yet.
putter.cancel()
try:
# Clean self._putters from canceled putters.
del self._putters[key]
except ValueError:
# The putter could be removed from self._putters by a
# previous get_nowait call.
pass
if not self.full() and not putter.cancelled():
# We were woken up by get_nowait(), but can't take
# the call. Wake up the next in line.
self._wake_up_putter(key)
raise
return self.put_nowait(key, value)
def put_nowait(self, key: str, value: Any) -> None:
print(f'put no wait {key}, {value}')
if self.full():
raise DictIsFull
self._put(key, value)
self._unfinished_tasks += 1
self._finished.clear()
self._wake_up_getter(key)
async def get(self, key: str) -> Any:
print('dict = ', self._dict)
print('loop = ', self._loop)
while self._dict[key]:
print('self._dict[key] = ', self._dict[key])
getter = self._loop.create_future()
self._getters[key] = getter
try:
print('Before await')
await getter
print('After await')
except Exception as err:
print('Err = ', err)
# Just in case getter is not done yet.
getter.cancel()
try:
# Clean self._getters from canceled getters.
del self._getters[key]
except ValueError:
# The getter could be removed from self._getters by a
# previous put_nowait call.
pass
if not self.empty() and not getter.cancelled():
# We were woken up by put_nowait(), but can't take
# the call. Wake up the next in line.
self._wake_up_getter(key)
raise
return self.get_nowait(key)
def get_nowait(self, key: str) -> Any:
if self.empty():
raise DictIsEmpty
item = self._get(key)
self._wake_up_putter(key)
return item
def task_done(self) -> None:
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
async def join(self) -> None:
if self._unfinished_tasks > 0:
await self._finished.wait()
| [
"asyncio.get_event_loop",
"asyncio.Event"
] | [((889, 911), 'asyncio.Event', 'Event', ([], {'loop': 'self._loop'}), '(loop=self._loop)\n', (894, 911), False, 'from asyncio import Event, Future, get_event_loop, AbstractEventLoop\n'), ((634, 650), 'asyncio.get_event_loop', 'get_event_loop', ([], {}), '()\n', (648, 650), False, 'from asyncio import Event, Future, get_event_loop, AbstractEventLoop\n')] |
from aiohttp import web, WSMsgType
from aiohttp_security import authorized_userid
from players import AlreadyRegistered
from protocol import handle_command, handle_error, send_command
from commands import ErrorCommand
import json
import logging
from global_defs import global_playground, registry
async def websocket_handler(request):
ws = web.WebSocketResponse()
try:
await ws.prepare(request)
except web.HTTPException as e:
logging.info('Failed to open WebSocket')
else:
user_id = await authorized_userid(request)
logging.info("websocket connection opened with user_id {}".format(user_id))
# if already connected, not permit connection
try:
global_playground.register(user_id)
except AlreadyRegistered:
logging.info("Deliberately closed connection with already connected user_id {}!".format(user_id))
await send_command(ErrorCommand(user_id, msg=f'User id {user_id} already in use'))
await ws.close()
else:
registry.add_socket(user_id, ws)
async for msg in ws:
if msg.type == WSMsgType.TEXT:
await handle_command(json.loads(msg.data), user_id)
elif msg.type == WSMsgType.ERROR:
logging.info('connection closed with exception {} with user_id {}'.format(ws.exception(), user_id))
await handle_error(user_id)
elif msg.type == WSMsgType.BINARY:
logging.info('Received BINARY type message')
elif msg.type == WSMsgType.CLOSE:
logging.info('Received CLOSE type message')
elif msg.type == WSMsgType.CLOSED:
logging.info('Received CLOSED type message')
elif msg.type == WSMsgType.CLOSING:
logging.info('Received CLOSING type message')
elif msg.type == WSMsgType.CONTINUATION:
logging.info('Received CONTINUATION type message')
elif msg.type == WSMsgType.PING:
logging.info('Received PING type message')
elif msg.type == WSMsgType.PONG:
logging.info('Received PONG type message')
logging.info('websocket connection closed with user_id {}'.format(user_id))
await handle_error(user_id)
return ws
| [
"json.loads",
"global_defs.global_playground.register",
"aiohttp_security.authorized_userid",
"protocol.handle_error",
"global_defs.registry.add_socket",
"commands.ErrorCommand",
"logging.info",
"aiohttp.web.WebSocketResponse"
] | [((346, 369), 'aiohttp.web.WebSocketResponse', 'web.WebSocketResponse', ([], {}), '()\n', (367, 369), False, 'from aiohttp import web, WSMsgType\n'), ((456, 496), 'logging.info', 'logging.info', (['"""Failed to open WebSocket"""'], {}), "('Failed to open WebSocket')\n", (468, 496), False, 'import logging\n'), ((531, 557), 'aiohttp_security.authorized_userid', 'authorized_userid', (['request'], {}), '(request)\n', (548, 557), False, 'from aiohttp_security import authorized_userid\n'), ((721, 756), 'global_defs.global_playground.register', 'global_playground.register', (['user_id'], {}), '(user_id)\n', (747, 756), False, 'from global_defs import global_playground, registry\n'), ((1051, 1083), 'global_defs.registry.add_socket', 'registry.add_socket', (['user_id', 'ws'], {}), '(user_id, ws)\n', (1070, 1083), False, 'from global_defs import global_playground, registry\n'), ((2377, 2398), 'protocol.handle_error', 'handle_error', (['user_id'], {}), '(user_id)\n', (2389, 2398), False, 'from protocol import handle_command, handle_error, send_command\n'), ((932, 994), 'commands.ErrorCommand', 'ErrorCommand', (['user_id'], {'msg': 'f"""User id {user_id} already in use"""'}), "(user_id, msg=f'User id {user_id} already in use')\n", (944, 994), False, 'from commands import ErrorCommand\n'), ((1205, 1225), 'json.loads', 'json.loads', (['msg.data'], {}), '(msg.data)\n', (1215, 1225), False, 'import json\n'), ((1432, 1453), 'protocol.handle_error', 'handle_error', (['user_id'], {}), '(user_id)\n', (1444, 1453), False, 'from protocol import handle_command, handle_error, send_command\n'), ((1525, 1569), 'logging.info', 'logging.info', (['"""Received BINARY type message"""'], {}), "('Received BINARY type message')\n", (1537, 1569), False, 'import logging\n'), ((1640, 1683), 'logging.info', 'logging.info', (['"""Received CLOSE type message"""'], {}), "('Received CLOSE type message')\n", (1652, 1683), False, 'import logging\n'), ((1755, 1799), 'logging.info', 'logging.info', (['"""Received CLOSED type message"""'], {}), "('Received CLOSED type message')\n", (1767, 1799), False, 'import logging\n'), ((1872, 1917), 'logging.info', 'logging.info', (['"""Received CLOSING type message"""'], {}), "('Received CLOSING type message')\n", (1884, 1917), False, 'import logging\n'), ((1995, 2045), 'logging.info', 'logging.info', (['"""Received CONTINUATION type message"""'], {}), "('Received CONTINUATION type message')\n", (2007, 2045), False, 'import logging\n'), ((2115, 2157), 'logging.info', 'logging.info', (['"""Received PING type message"""'], {}), "('Received PING type message')\n", (2127, 2157), False, 'import logging\n'), ((2227, 2269), 'logging.info', 'logging.info', (['"""Received PONG type message"""'], {}), "('Received PONG type message')\n", (2239, 2269), False, 'import logging\n')] |
import time
from datetime import timedelta
from http.cookies import SimpleCookie
from unittest.mock import Mock, AsyncMock
import pytest
from fastapi import HTTPException
from fastapi.security import SecurityScopes
from starlette.responses import Response
from fastapi_login import LoginManager
from fastapi_login.exceptions import InvalidCredentialsException
@pytest.mark.asyncio
async def test_token_expiry(clean_manager, default_data):
token = clean_manager.create_access_token(
data=default_data,
expires=timedelta(microseconds=1) # should be invalid instantly
)
time.sleep(1)
with pytest.raises(HTTPException) as exc_info:
await clean_manager.get_current_user(token)
assert exc_info
@pytest.mark.asyncio
@pytest.mark.parametrize("loader", [Mock(), AsyncMock()])
async def test_user_loader(loader, clean_manager, default_data, db):
token = clean_manager.create_access_token(data=default_data)
loader = Mock()
clean_manager.user_loader(loader)
_ = await clean_manager.get_current_user(token)
loader.assert_called()
loader.assert_called_with(default_data['sub'])
@pytest.mark.asyncio
async def test_user_loader_not_set(clean_manager, default_data):
token = clean_manager.create_access_token(data=default_data)
with pytest.raises(Exception) as exc_info:
await clean_manager.get_current_user(token)
assert "Missing user_loader callback" == str(exc_info.value)
@pytest.mark.asyncio
async def test_user_loader_returns_none(clean_manager, invalid_data, load_user_fn):
clean_manager.user_loader(load_user_fn)
token = clean_manager.create_access_token(data={"sub": invalid_data["username"]})
with pytest.raises(HTTPException) as exc_info:
await clean_manager.get_current_user(token)
assert exc_info.value == InvalidCredentialsException
def test_token_from_cookie(clean_manager):
request = Mock(cookies={clean_manager.cookie_name: "test-value"})
token = clean_manager._token_from_cookie(request)
assert token == "<PASSWORD>"
def test_token_from_cookie_raises(clean_manager):
request = Mock(cookies={clean_manager.cookie_name: ""})
with pytest.raises(HTTPException) as exc_info:
clean_manager._token_from_cookie(request)
assert exc_info.value == InvalidCredentialsException
def test_token_from_cookie_returns_none_auto_error_off(clean_manager):
clean_manager.auto_error = False
request = Mock(cookies={clean_manager.cookie_name: ""})
token = clean_manager._token_from_cookie(request)
assert token is None
def test_set_cookie(clean_manager, default_data):
token = clean_manager.create_access_token(data=default_data)
response = Response()
clean_manager.set_cookie(response, token)
cookie = SimpleCookie(response.headers['set-cookie'])
cookie_value = cookie.get(clean_manager.cookie_name)
assert cookie_value is not None
assert cookie_value["httponly"] is True
assert cookie_value["samesite"] == "lax"
assert cookie_value.value == token
assert cookie_value.key == clean_manager.cookie_name
def test_config_no_cookie_no_header_raises(secret, token_url):
with pytest.raises(Exception) as exc_info:
LoginManager(secret, token_url, use_cookie=False, use_header=False)
assert "use_cookie and use_header are both False one of them needs to be True" == str(exc_info.value)
def test_has_scopes_true(clean_manager, default_data):
scopes = ["read"]
token = clean_manager.create_access_token(data=default_data, scopes=scopes)
required_scopes = SecurityScopes(scopes=scopes)
assert clean_manager.has_scopes(token, required_scopes)
def test_has_scopes_no_scopes(clean_manager, default_data):
scopes = ["read"]
token = clean_manager.create_access_token(data=default_data)
assert clean_manager.has_scopes(token, SecurityScopes(scopes=scopes)) is False
def test_has_scopes_missing_scopes(clean_manager, default_data):
scopes = ["read"]
token = clean_manager.create_access_token(data=default_data)
required_scopes = scopes + ["write"]
assert clean_manager.has_scopes(token, SecurityScopes(scopes=required_scopes)) is False
| [
"fastapi.security.SecurityScopes",
"unittest.mock.Mock",
"starlette.responses.Response",
"unittest.mock.AsyncMock",
"time.sleep",
"http.cookies.SimpleCookie",
"pytest.raises",
"fastapi_login.LoginManager",
"datetime.timedelta"
] | [((600, 613), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (610, 613), False, 'import time\n'), ((966, 972), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (970, 972), False, 'from unittest.mock import Mock, AsyncMock\n'), ((1917, 1972), 'unittest.mock.Mock', 'Mock', ([], {'cookies': "{clean_manager.cookie_name: 'test-value'}"}), "(cookies={clean_manager.cookie_name: 'test-value'})\n", (1921, 1972), False, 'from unittest.mock import Mock, AsyncMock\n'), ((2126, 2171), 'unittest.mock.Mock', 'Mock', ([], {'cookies': "{clean_manager.cookie_name: ''}"}), "(cookies={clean_manager.cookie_name: ''})\n", (2130, 2171), False, 'from unittest.mock import Mock, AsyncMock\n'), ((2463, 2508), 'unittest.mock.Mock', 'Mock', ([], {'cookies': "{clean_manager.cookie_name: ''}"}), "(cookies={clean_manager.cookie_name: ''})\n", (2467, 2508), False, 'from unittest.mock import Mock, AsyncMock\n'), ((2720, 2730), 'starlette.responses.Response', 'Response', ([], {}), '()\n', (2728, 2730), False, 'from starlette.responses import Response\n'), ((2790, 2834), 'http.cookies.SimpleCookie', 'SimpleCookie', (["response.headers['set-cookie']"], {}), "(response.headers['set-cookie'])\n", (2802, 2834), False, 'from http.cookies import SimpleCookie\n'), ((3589, 3618), 'fastapi.security.SecurityScopes', 'SecurityScopes', ([], {'scopes': 'scopes'}), '(scopes=scopes)\n', (3603, 3618), False, 'from fastapi.security import SecurityScopes\n'), ((623, 651), 'pytest.raises', 'pytest.raises', (['HTTPException'], {}), '(HTTPException)\n', (636, 651), False, 'import pytest\n'), ((797, 803), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (801, 803), False, 'from unittest.mock import Mock, AsyncMock\n'), ((805, 816), 'unittest.mock.AsyncMock', 'AsyncMock', ([], {}), '()\n', (814, 816), False, 'from unittest.mock import Mock, AsyncMock\n'), ((1304, 1328), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1317, 1328), False, 'import pytest\n'), ((1706, 1734), 'pytest.raises', 'pytest.raises', (['HTTPException'], {}), '(HTTPException)\n', (1719, 1734), False, 'import pytest\n'), ((2181, 2209), 'pytest.raises', 'pytest.raises', (['HTTPException'], {}), '(HTTPException)\n', (2194, 2209), False, 'import pytest\n'), ((3187, 3211), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3200, 3211), False, 'import pytest\n'), ((3233, 3300), 'fastapi_login.LoginManager', 'LoginManager', (['secret', 'token_url'], {'use_cookie': '(False)', 'use_header': '(False)'}), '(secret, token_url, use_cookie=False, use_header=False)\n', (3245, 3300), False, 'from fastapi_login import LoginManager\n'), ((533, 558), 'datetime.timedelta', 'timedelta', ([], {'microseconds': '(1)'}), '(microseconds=1)\n', (542, 558), False, 'from datetime import timedelta\n'), ((3871, 3900), 'fastapi.security.SecurityScopes', 'SecurityScopes', ([], {'scopes': 'scopes'}), '(scopes=scopes)\n', (3885, 3900), False, 'from fastapi.security import SecurityScopes\n'), ((4149, 4187), 'fastapi.security.SecurityScopes', 'SecurityScopes', ([], {'scopes': 'required_scopes'}), '(scopes=required_scopes)\n', (4163, 4187), False, 'from fastapi.security import SecurityScopes\n')] |
# Copyright 2022 The Blqs Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cirq
import pymore
import pytest
import blqs_cirq as bc
def test_moment():
def fn():
with bc.Moment():
bc.H(0)
bc.CX(1, 2)
q0, q1, q2 = cirq.LineQubit.range(3)
assert bc.build(fn)() == cirq.Circuit(cirq.Moment([cirq.H(q0), cirq.CX(q1, q2)]))
def test_multiple_moments():
def fn():
with bc.Moment():
bc.CX(1, 2)
with bc.Moment():
bc.X(0)
q0, q1, q2 = cirq.LineQubit.range(3)
assert bc.build(fn)() == cirq.Circuit(
[
cirq.Moment([cirq.CX(q1, q2)]),
cirq.Moment([cirq.X(q0)]),
]
)
def test_empty_moment():
def fn():
with bc.Moment():
pass
assert bc.build(fn)() == cirq.Circuit(cirq.Moment([]))
def test_moment_target_overlap():
def fn():
with bc.Moment():
bc.H(0)
bc.CX(0, 1)
with pytest.raises(ValueError, match="Overlapping operations"):
bc.build(fn)()
def fn_repeat():
with bc.Moment():
bc.H(0)
with bc.Repeat(repetitions=10):
bc.CX(0, 1)
with pytest.raises(ValueError, match="Overlapping operations"):
bc.build(fn_repeat)()
def test_moment_repeat():
def fn():
with bc.Moment():
with bc.Repeat(repetitions=10):
bc.H(0)
h = cirq.Circuit([cirq.H(cirq.LineQubit(0))])
assert bc.build(fn)() == cirq.Circuit([cirq.CircuitOperation(h.freeze(), repetitions=10)])
def test_moment_append_extend():
m = bc.Moment()
m.append(bc.H(0))
m.extend([bc.X(1), bc.X(2)])
assert m.statements() == (bc.H(0), bc.X(1), bc.X(2))
def test_moment_context_manager():
with bc.Moment() as m:
bc.H(0)
bc.X(1)
assert m.statements() == (bc.H(0), bc.X(1))
def test_moment_str():
with bc.Moment() as m:
pass
assert str(m) == "with Moment():\n"
with bc.Moment() as m:
bc.H(0)
bc.H(1)
assert str(m) == "with Moment():\n H 0\n H 1"
def test_moment_equality():
m0 = bc.Moment()
with bc.Moment() as m1:
bc.H(0)
with bc.Moment() as m2:
bc.H(1)
with bc.Moment() as m3:
bc.H(0)
bc.H(1)
equals_tester = pymore.EqualsTester()
equals_tester.make_equality_group(lambda: m0)
equals_tester.make_equality_group(lambda: m1)
equals_tester.add_equality_group(m2)
equals_tester.add_equality_group(m3)
| [
"blqs_cirq.build",
"blqs_cirq.Repeat",
"blqs_cirq.CX",
"cirq.H",
"cirq.Moment",
"cirq.LineQubit.range",
"cirq.LineQubit",
"blqs_cirq.X",
"blqs_cirq.Moment",
"pytest.raises",
"cirq.X",
"pymore.EqualsTester",
"blqs_cirq.H",
"cirq.CX"
] | [((772, 795), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(3)'], {}), '(3)\n', (792, 795), False, 'import cirq\n'), ((1041, 1064), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(3)'], {}), '(3)\n', (1061, 1064), False, 'import cirq\n'), ((2138, 2149), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (2147, 2149), True, 'import blqs_cirq as bc\n'), ((2662, 2673), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (2671, 2673), True, 'import blqs_cirq as bc\n'), ((2843, 2864), 'pymore.EqualsTester', 'pymore.EqualsTester', ([], {}), '()\n', (2862, 2864), False, 'import pymore\n'), ((1491, 1548), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Overlapping operations"""'}), "(ValueError, match='Overlapping operations')\n", (1504, 1548), False, 'import pytest\n'), ((1723, 1780), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Overlapping operations"""'}), "(ValueError, match='Overlapping operations')\n", (1736, 1780), False, 'import pytest\n'), ((2163, 2170), 'blqs_cirq.H', 'bc.H', (['(0)'], {}), '(0)\n', (2167, 2170), True, 'import blqs_cirq as bc\n'), ((2308, 2319), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (2317, 2319), True, 'import blqs_cirq as bc\n'), ((2334, 2341), 'blqs_cirq.H', 'bc.H', (['(0)'], {}), '(0)\n', (2338, 2341), True, 'import blqs_cirq as bc\n'), ((2350, 2357), 'blqs_cirq.X', 'bc.X', (['(1)'], {}), '(1)\n', (2354, 2357), True, 'import blqs_cirq as bc\n'), ((2440, 2451), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (2449, 2451), True, 'import blqs_cirq as bc\n'), ((2521, 2532), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (2530, 2532), True, 'import blqs_cirq as bc\n'), ((2547, 2554), 'blqs_cirq.H', 'bc.H', (['(0)'], {}), '(0)\n', (2551, 2554), True, 'import blqs_cirq as bc\n'), ((2563, 2570), 'blqs_cirq.H', 'bc.H', (['(1)'], {}), '(1)\n', (2567, 2570), True, 'import blqs_cirq as bc\n'), ((2683, 2694), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (2692, 2694), True, 'import blqs_cirq as bc\n'), ((2710, 2717), 'blqs_cirq.H', 'bc.H', (['(0)'], {}), '(0)\n', (2714, 2717), True, 'import blqs_cirq as bc\n'), ((2727, 2738), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (2736, 2738), True, 'import blqs_cirq as bc\n'), ((2754, 2761), 'blqs_cirq.H', 'bc.H', (['(1)'], {}), '(1)\n', (2758, 2761), True, 'import blqs_cirq as bc\n'), ((2771, 2782), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (2780, 2782), True, 'import blqs_cirq as bc\n'), ((2798, 2805), 'blqs_cirq.H', 'bc.H', (['(0)'], {}), '(0)\n', (2802, 2805), True, 'import blqs_cirq as bc\n'), ((2814, 2821), 'blqs_cirq.H', 'bc.H', (['(1)'], {}), '(1)\n', (2818, 2821), True, 'import blqs_cirq as bc\n'), ((697, 708), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (706, 708), True, 'import blqs_cirq as bc\n'), ((722, 729), 'blqs_cirq.H', 'bc.H', (['(0)'], {}), '(0)\n', (726, 729), True, 'import blqs_cirq as bc\n'), ((742, 753), 'blqs_cirq.CX', 'bc.CX', (['(1)', '(2)'], {}), '(1, 2)\n', (747, 753), True, 'import blqs_cirq as bc\n'), ((807, 819), 'blqs_cirq.build', 'bc.build', (['fn'], {}), '(fn)\n', (815, 819), True, 'import blqs_cirq as bc\n'), ((940, 951), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (949, 951), True, 'import blqs_cirq as bc\n'), ((965, 976), 'blqs_cirq.CX', 'bc.CX', (['(1)', '(2)'], {}), '(1, 2)\n', (970, 976), True, 'import blqs_cirq as bc\n'), ((990, 1001), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (999, 1001), True, 'import blqs_cirq as bc\n'), ((1015, 1022), 'blqs_cirq.X', 'bc.X', (['(0)'], {}), '(0)\n', (1019, 1022), True, 'import blqs_cirq as bc\n'), ((1076, 1088), 'blqs_cirq.build', 'bc.build', (['fn'], {}), '(fn)\n', (1084, 1088), True, 'import blqs_cirq as bc\n'), ((1271, 1282), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (1280, 1282), True, 'import blqs_cirq as bc\n'), ((1313, 1325), 'blqs_cirq.build', 'bc.build', (['fn'], {}), '(fn)\n', (1321, 1325), True, 'import blqs_cirq as bc\n'), ((1344, 1359), 'cirq.Moment', 'cirq.Moment', (['[]'], {}), '([])\n', (1355, 1359), False, 'import cirq\n'), ((1424, 1435), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (1433, 1435), True, 'import blqs_cirq as bc\n'), ((1449, 1456), 'blqs_cirq.H', 'bc.H', (['(0)'], {}), '(0)\n', (1453, 1456), True, 'import blqs_cirq as bc\n'), ((1469, 1480), 'blqs_cirq.CX', 'bc.CX', (['(0)', '(1)'], {}), '(0, 1)\n', (1474, 1480), True, 'import blqs_cirq as bc\n'), ((1558, 1570), 'blqs_cirq.build', 'bc.build', (['fn'], {}), '(fn)\n', (1566, 1570), True, 'import blqs_cirq as bc\n'), ((1608, 1619), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (1617, 1619), True, 'import blqs_cirq as bc\n'), ((1633, 1640), 'blqs_cirq.H', 'bc.H', (['(0)'], {}), '(0)\n', (1637, 1640), True, 'import blqs_cirq as bc\n'), ((1790, 1809), 'blqs_cirq.build', 'bc.build', (['fn_repeat'], {}), '(fn_repeat)\n', (1798, 1809), True, 'import blqs_cirq as bc\n'), ((1867, 1878), 'blqs_cirq.Moment', 'bc.Moment', ([], {}), '()\n', (1876, 1878), True, 'import blqs_cirq as bc\n'), ((2011, 2023), 'blqs_cirq.build', 'bc.build', (['fn'], {}), '(fn)\n', (2019, 2023), True, 'import blqs_cirq as bc\n'), ((2186, 2193), 'blqs_cirq.X', 'bc.X', (['(1)'], {}), '(1)\n', (2190, 2193), True, 'import blqs_cirq as bc\n'), ((2195, 2202), 'blqs_cirq.X', 'bc.X', (['(2)'], {}), '(2)\n', (2199, 2202), True, 'import blqs_cirq as bc\n'), ((2235, 2242), 'blqs_cirq.H', 'bc.H', (['(0)'], {}), '(0)\n', (2239, 2242), True, 'import blqs_cirq as bc\n'), ((2244, 2251), 'blqs_cirq.X', 'bc.X', (['(1)'], {}), '(1)\n', (2248, 2251), True, 'import blqs_cirq as bc\n'), ((2253, 2260), 'blqs_cirq.X', 'bc.X', (['(2)'], {}), '(2)\n', (2257, 2260), True, 'import blqs_cirq as bc\n'), ((2388, 2395), 'blqs_cirq.H', 'bc.H', (['(0)'], {}), '(0)\n', (2392, 2395), True, 'import blqs_cirq as bc\n'), ((2397, 2404), 'blqs_cirq.X', 'bc.X', (['(1)'], {}), '(1)\n', (2401, 2404), True, 'import blqs_cirq as bc\n'), ((1658, 1683), 'blqs_cirq.Repeat', 'bc.Repeat', ([], {'repetitions': '(10)'}), '(repetitions=10)\n', (1667, 1683), True, 'import blqs_cirq as bc\n'), ((1701, 1712), 'blqs_cirq.CX', 'bc.CX', (['(0)', '(1)'], {}), '(0, 1)\n', (1706, 1712), True, 'import blqs_cirq as bc\n'), ((1897, 1922), 'blqs_cirq.Repeat', 'bc.Repeat', ([], {'repetitions': '(10)'}), '(repetitions=10)\n', (1906, 1922), True, 'import blqs_cirq as bc\n'), ((1940, 1947), 'blqs_cirq.H', 'bc.H', (['(0)'], {}), '(0)\n', (1944, 1947), True, 'import blqs_cirq as bc\n'), ((1978, 1995), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (1992, 1995), False, 'import cirq\n'), ((851, 861), 'cirq.H', 'cirq.H', (['q0'], {}), '(q0)\n', (857, 861), False, 'import cirq\n'), ((863, 878), 'cirq.CX', 'cirq.CX', (['q1', 'q2'], {}), '(q1, q2)\n', (870, 878), False, 'import cirq\n'), ((1143, 1158), 'cirq.CX', 'cirq.CX', (['q1', 'q2'], {}), '(q1, q2)\n', (1150, 1158), False, 'import cirq\n'), ((1187, 1197), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (1193, 1197), False, 'import cirq\n')] |
#!/usr/bin/env python3
from flask import render_template, request, make_response, jsonify
from app import app
from app.mongo import mongodb
from . import admin
@admin.route('/setting', methods=['GET'])
def basicset_index():
collection = app.config.get('BASICSET_COLLECTION')
setting = mongodb[collection].find_one({"usageTag":"blog_setting"})
if setting:
blog_title = setting.get('blog_title')
blog_keywords = setting.get('blog_keywords')
blog_description = setting.get('blog_description')
blog_tag = setting.get('blog_tag')
return render_template('setting/index.html') | [
"flask.render_template",
"app.app.config.get"
] | [((245, 282), 'app.app.config.get', 'app.config.get', (['"""BASICSET_COLLECTION"""'], {}), "('BASICSET_COLLECTION')\n", (259, 282), False, 'from app import app\n'), ((618, 655), 'flask.render_template', 'render_template', (['"""setting/index.html"""'], {}), "('setting/index.html')\n", (633, 655), False, 'from flask import render_template, request, make_response, jsonify\n')] |
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
from delphin_6_automation.database_interactions.db_templates import sample_entry, delphin_entry
__author__ = "<NAME>"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
# RiBuild Modules
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
# Logger
logger = ribuild_logger(__name__)
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def get_bare():
ids = delphin_entry.Delphin.objects(sample_data__design_option__name__in=['1d_bare'])
print('IDS', ids.count())
return ids
def check_ids(project):
for project in project:
sample = sample_entry.Sample.objects(delphin_docs=project).first()
print(f"Project: {project.id} with sequence {project.sample_data.get('sequence')} is in sample iteration: {sample.iteration}. Data: {project.sample_data.get('exterior_climate')}")
if __name__ == '__main__':
server = mongo_setup.global_init(auth_dict)
ids = get_bare()
check_ids(ids)
mongo_setup.global_end_ssh(server) | [
"delphin_6_automation.database_interactions.mongo_setup.global_end_ssh",
"delphin_6_automation.database_interactions.mongo_setup.global_init",
"delphin_6_automation.database_interactions.db_templates.delphin_entry.Delphin.objects",
"delphin_6_automation.logging.ribuild_logger.ribuild_logger",
"delphin_6_automation.database_interactions.db_templates.sample_entry.Sample.objects"
] | [((528, 552), 'delphin_6_automation.logging.ribuild_logger.ribuild_logger', 'ribuild_logger', (['__name__'], {}), '(__name__)\n', (542, 552), False, 'from delphin_6_automation.logging.ribuild_logger import ribuild_logger\n'), ((713, 792), 'delphin_6_automation.database_interactions.db_templates.delphin_entry.Delphin.objects', 'delphin_entry.Delphin.objects', ([], {'sample_data__design_option__name__in': "['1d_bare']"}), "(sample_data__design_option__name__in=['1d_bare'])\n", (742, 792), False, 'from delphin_6_automation.database_interactions.db_templates import sample_entry, delphin_entry\n'), ((1199, 1233), 'delphin_6_automation.database_interactions.mongo_setup.global_init', 'mongo_setup.global_init', (['auth_dict'], {}), '(auth_dict)\n', (1222, 1233), False, 'from delphin_6_automation.database_interactions import mongo_setup\n'), ((1280, 1314), 'delphin_6_automation.database_interactions.mongo_setup.global_end_ssh', 'mongo_setup.global_end_ssh', (['server'], {}), '(server)\n', (1306, 1314), False, 'from delphin_6_automation.database_interactions import mongo_setup\n'), ((911, 960), 'delphin_6_automation.database_interactions.db_templates.sample_entry.Sample.objects', 'sample_entry.Sample.objects', ([], {'delphin_docs': 'project'}), '(delphin_docs=project)\n', (938, 960), False, 'from delphin_6_automation.database_interactions.db_templates import sample_entry, delphin_entry\n')] |
from setuptools import setup
setup(
name="mywhopackage",
install_requires=["django==1.0",],
extras_require={"test": ["pytest==2.0",], "docs": ["Sphinx==1.0",],},
)
| [
"setuptools.setup"
] | [((30, 162), 'setuptools.setup', 'setup', ([], {'name': '"""mywhopackage"""', 'install_requires': "['django==1.0']", 'extras_require': "{'test': ['pytest==2.0'], 'docs': ['Sphinx==1.0']}"}), "(name='mywhopackage', install_requires=['django==1.0'], extras_require\n ={'test': ['pytest==2.0'], 'docs': ['Sphinx==1.0']})\n", (35, 162), False, 'from setuptools import setup\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#****************************************************************************************************************************************************
# Copyright 2017 NXP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the NXP. nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#****************************************************************************************************************************************************
from typing import Optional
from FslBuildGen import IOUtil
from FslBuildGen.Log import Log
from FslBuildGen.Exceptions import UsageErrorException
from FslBuildGen.BuildExternal.PackageExperimentalRecipe import PackageExperimentalRecipe
from FslBuildGen.BuildExternal.RecipeBuilderSetup import RecipeBuilderSetup
from FslBuildGen.Vars.VariableProcessor import VariableProcessor
from FslBuildGen.Xml.XmlExperimentalRecipe import XmlExperimentalRecipe
class RecipePathBuilder(object):
def __init__(self, log: Log, variableProcessor: VariableProcessor, recipeBuilderSetup: Optional[RecipeBuilderSetup], platformName: str, compilerGeneratorName: str) -> None:
super(RecipePathBuilder, self).__init__()
self.__Log = log # type: Log
self.__VariableProcessor = variableProcessor # type: VariableProcessor
self.IsEnabled = recipeBuilderSetup is not None # type: bool
self.TargetPath = None # Optional[str]
self.DownloadCacheRootPath = None # Optional[str]
self.__TempRootPath = None # Optional[str]
self.__TempPipelineRootPath = None # Optional[str]
self.InstallRootPath = None # Optional[str]
self.ReadonlyCache_DownloadCacheRootPath = None # Optional[str]
if self.IsEnabled and recipeBuilderSetup is not None:
targetPath = recipeBuilderSetup.TargetPath
readonlyCachePath = recipeBuilderSetup.ReadonlyCachePath
if not IOUtil.IsAbsolutePath(targetPath):
raise Exception("Install area path is not absolute: '{0}'".format(targetPath))
if not readonlyCachePath is None and not IOUtil.IsAbsolutePath(readonlyCachePath):
raise Exception("Install area readonly cache path is not absolute: '{0}'".format(readonlyCachePath))
self.TargetPath = targetPath
self.DownloadCacheRootPath = IOUtil.Join(targetPath, ".DownloadCache")
self.__TempRootPath = IOUtil.Join(targetPath, ".Temp")
baseTempDirectory = IOUtil.Join(self.__TempRootPath, "pipeline")
baseTempDirectory = IOUtil.Join(baseTempDirectory, platformName)
self.__TempPipelineRootPath = IOUtil.Join(baseTempDirectory, compilerGeneratorName)
baseInstallDirectory = IOUtil.Join(targetPath, platformName)
self.InstallRootPath = IOUtil.Join(baseInstallDirectory, compilerGeneratorName)
self.ReadonlyCache_DownloadCacheRootPath = None if readonlyCachePath is None else IOUtil.Join(readonlyCachePath, ".DownloadCache")
def GetBuildPath(self, sourceRecipe: PackageExperimentalRecipe) -> str:
if not self.IsEnabled or self.__TempPipelineRootPath is None:
raise Exception("Can not GetBuildPath since the builder functionality has been disabled")
return IOUtil.Join(self.__TempPipelineRootPath, sourceRecipe.Name)
def TryGetInstallPath(self, xmlSourceRecipe: XmlExperimentalRecipe) -> Optional[str]:
if xmlSourceRecipe is None:
return None
elif not xmlSourceRecipe.ExternalInstallDirectory is None:
if not xmlSourceRecipe.Pipeline is None:
self.__Log.DoPrintWarning("SourceRecipe ExternalInstallDirectory overrides Pipeline '{0}'".format(xmlSourceRecipe.Name))
return self.__VariableProcessor.ResolveAbsolutePathWithLeadingEnvironmentVariablePathAsDir(xmlSourceRecipe.ExternalInstallDirectory)
if not self.IsEnabled or self.InstallRootPath is None:
raise Exception("Can not TryGetInstallPath since the builder functionality has been disabled, please enable the builder functionality for this project")
return None if xmlSourceRecipe.Pipeline is None else IOUtil.Join(self.InstallRootPath, xmlSourceRecipe.Name)
| [
"FslBuildGen.IOUtil.IsAbsolutePath",
"FslBuildGen.IOUtil.Join"
] | [((4702, 4761), 'FslBuildGen.IOUtil.Join', 'IOUtil.Join', (['self.__TempPipelineRootPath', 'sourceRecipe.Name'], {}), '(self.__TempPipelineRootPath, sourceRecipe.Name)\n', (4713, 4761), False, 'from FslBuildGen import IOUtil\n'), ((3766, 3807), 'FslBuildGen.IOUtil.Join', 'IOUtil.Join', (['targetPath', '""".DownloadCache"""'], {}), "(targetPath, '.DownloadCache')\n", (3777, 3807), False, 'from FslBuildGen import IOUtil\n'), ((3843, 3875), 'FslBuildGen.IOUtil.Join', 'IOUtil.Join', (['targetPath', '""".Temp"""'], {}), "(targetPath, '.Temp')\n", (3854, 3875), False, 'from FslBuildGen import IOUtil\n'), ((3909, 3953), 'FslBuildGen.IOUtil.Join', 'IOUtil.Join', (['self.__TempRootPath', '"""pipeline"""'], {}), "(self.__TempRootPath, 'pipeline')\n", (3920, 3953), False, 'from FslBuildGen import IOUtil\n'), ((3986, 4030), 'FslBuildGen.IOUtil.Join', 'IOUtil.Join', (['baseTempDirectory', 'platformName'], {}), '(baseTempDirectory, platformName)\n', (3997, 4030), False, 'from FslBuildGen import IOUtil\n'), ((4073, 4126), 'FslBuildGen.IOUtil.Join', 'IOUtil.Join', (['baseTempDirectory', 'compilerGeneratorName'], {}), '(baseTempDirectory, compilerGeneratorName)\n', (4084, 4126), False, 'from FslBuildGen import IOUtil\n'), ((4163, 4200), 'FslBuildGen.IOUtil.Join', 'IOUtil.Join', (['targetPath', 'platformName'], {}), '(targetPath, platformName)\n', (4174, 4200), False, 'from FslBuildGen import IOUtil\n'), ((4236, 4292), 'FslBuildGen.IOUtil.Join', 'IOUtil.Join', (['baseInstallDirectory', 'compilerGeneratorName'], {}), '(baseInstallDirectory, compilerGeneratorName)\n', (4247, 4292), False, 'from FslBuildGen import IOUtil\n'), ((5607, 5662), 'FslBuildGen.IOUtil.Join', 'IOUtil.Join', (['self.InstallRootPath', 'xmlSourceRecipe.Name'], {}), '(self.InstallRootPath, xmlSourceRecipe.Name)\n', (5618, 5662), False, 'from FslBuildGen import IOUtil\n'), ((3341, 3374), 'FslBuildGen.IOUtil.IsAbsolutePath', 'IOUtil.IsAbsolutePath', (['targetPath'], {}), '(targetPath)\n', (3362, 3374), False, 'from FslBuildGen import IOUtil\n'), ((4388, 4436), 'FslBuildGen.IOUtil.Join', 'IOUtil.Join', (['readonlyCachePath', '""".DownloadCache"""'], {}), "(readonlyCachePath, '.DownloadCache')\n", (4399, 4436), False, 'from FslBuildGen import IOUtil\n'), ((3524, 3564), 'FslBuildGen.IOUtil.IsAbsolutePath', 'IOUtil.IsAbsolutePath', (['readonlyCachePath'], {}), '(readonlyCachePath)\n', (3545, 3564), False, 'from FslBuildGen import IOUtil\n')] |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
import time
from data import accepting_cookies, conduit_login, create_new_article, conduit_registration
def test_modify_article():
browser_options = Options()
browser_options.headless = True
browser = webdriver.Chrome(ChromeDriverManager().install(), options=browser_options)
URL = "http://conduitapp.progmasters.hu:1667/#/"
browser.get(URL)
browser.implicitly_wait(10)
# Előfeltételek:
accepting_cookies(browser)
conduit_login(browser)
time.sleep(2)
create_new_article(browser)
# ~ ~ ~ ~ ~ TC-06: MEGLÉVŐ BLOGPOSZT MÓDOSÍTÁSA ~ ~ ~ ~ ~ #
edit_article_link = browser.find_element_by_css_selector("a[href='#/editor/new-title")
edit_article_link.click()
article_body_edited = browser.find_element_by_xpath('//textarea[@placeholder="Write your article (in markdown)"]')
article_body_edited.clear()
article_body_edited.send_keys("New body comes here.")
publish_article_button = browser.find_element_by_css_selector("button[type='submit']")
publish_article_button.click()
time.sleep(5)
# Assert: a blogbejegyzés body-ja megváltozott
article_body_refilled = browser.find_element_by_css_selector('div[class="col-xs-12"] div p')
assert article_body_refilled.text == "New body comes here."
# print("The article's body has been changed.")
browser.quit()
| [
"data.create_new_article",
"selenium.webdriver.chrome.options.Options",
"data.conduit_login",
"data.accepting_cookies",
"time.sleep",
"webdriver_manager.chrome.ChromeDriverManager"
] | [((297, 306), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (304, 306), False, 'from selenium.webdriver.chrome.options import Options\n'), ((564, 590), 'data.accepting_cookies', 'accepting_cookies', (['browser'], {}), '(browser)\n', (581, 590), False, 'from data import accepting_cookies, conduit_login, create_new_article, conduit_registration\n'), ((595, 617), 'data.conduit_login', 'conduit_login', (['browser'], {}), '(browser)\n', (608, 617), False, 'from data import accepting_cookies, conduit_login, create_new_article, conduit_registration\n'), ((622, 635), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (632, 635), False, 'import time\n'), ((640, 667), 'data.create_new_article', 'create_new_article', (['browser'], {}), '(browser)\n', (658, 667), False, 'from data import accepting_cookies, conduit_login, create_new_article, conduit_registration\n'), ((1195, 1208), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1205, 1208), False, 'import time\n'), ((374, 395), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (393, 395), False, 'from webdriver_manager.chrome import ChromeDriverManager\n')] |
# -*- coding: utf-8 -*-
import os
import re
from setuptools import setup
def get_readme(readme):
"""Get long description."""
# try:
# from pypandoc import convert_file, convert_text
# return convert_file(readme, 'rst')
# except ImportError as e:
# with open(readme) as f:
# return f.read()
with open(readme) as f:
return f.read()
def get_version(package):
"""Return package version as listed in `__version__` in `init.py`."""
f = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", f).group(1)
setup(
name='ipython-agnoster',
version=get_version('ipster'),
description='A hipster theme for the IPython REPL',
long_description=get_readme('README.rst'),
author='<NAME>',
url='https://github.com/i5ar/ipster',
license='MIT',
keywords="ipython agnoster",
packages=['ipster'],
# Convert Markdown to reStructuredtext
entry_points={
'console_scripts': ['convert-rst=ipster.command_line:main'],
},
# Use Environment Markers to install IPython for Python 2
install_requires=[
'ipython>=5.0,<6.0;python_version<="2.7"',
'ipython>=6.0;python_version>="3.3"',
],
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Text Editors',
],
zip_safe=False,
test_suite='tests',
)
| [
"os.path.join",
"re.search"
] | [((563, 612), 're.search', 're.search', (['"""__version__ = [\'"]([^\'"]+)[\'"]"""', 'f'], {}), '(\'__version__ = [\\\'"]([^\\\'"]+)[\\\'"]\', f)\n', (572, 612), False, 'import re\n'), ((507, 543), 'os.path.join', 'os.path.join', (['package', '"""__init__.py"""'], {}), "(package, '__init__.py')\n", (519, 543), False, 'import os\n')] |
from google.cloud import firestore
# Add a new document
db = firestore.Client.from_service_account_json("onetrustkey.json")
col_ref = db.collection(u'onetrust')
| [
"google.cloud.firestore.Client.from_service_account_json"
] | [((62, 124), 'google.cloud.firestore.Client.from_service_account_json', 'firestore.Client.from_service_account_json', (['"""onetrustkey.json"""'], {}), "('onetrustkey.json')\n", (104, 124), False, 'from google.cloud import firestore\n')] |
import argparse
import os
import os.path
import sys
from PyQt5 import QtChart, QtCore, QtGui, QtSerialPort, QtWidgets
from PyQt5.QtCore import Qt
class SerialPlotter(QtWidgets.QMainWindow):
def __init__(self, port, baud=115200, samples=255, min_value=0, max_value=1023, **kwargs):
super(SerialPlotter, self).__init__(**kwargs)
self._serial_port = port
self._serial_port_baud = baud
self.samples = samples
self.series = []
self.data = []
# set up chart
self.setWindowTitle('Serial Plotter')
self.setContentsMargins(0, 0, 0, 0)
self.chart = QtChart.QChart()
self.chart.setTheme(QtChart.QChart.ChartThemeDark)
# remove the annoying white border
self.chart.layout().setContentsMargins(0, 0, 0, 0)
self.chart.setBackgroundRoundness(0)
# set up chart view
self.chart_view = QtChart.QChartView(self.chart)
self.chart_view.setRenderHint(QtGui.QPainter.Antialiasing)
self.chart_view.setMinimumSize(800, 600)
# set up axis
self.x_axis = QtChart.QValueAxis()
self.x_axis.setRange(0, self.samples)
self.x_axis.setTitleText('Samples')
self.x_axis.setLabelFormat('%i')
self.y_axis = QtChart.QValueAxis()
self.y_axis.setRange(min_value, max_value)
self.y_axis.setTitleText('Values')
self.chart.addAxis(self.x_axis, Qt.AlignBottom)
self.chart.addAxis(self.y_axis, Qt.AlignLeft)
self.setCentralWidget(self.chart_view)
# Setup the serial port
self.serial = QtSerialPort.QSerialPort(
self._serial_port,
baudRate=self._serial_port_baud,
readyRead=self.on_serial_ready_read,
)
if not self.serial.open(QtCore.QIODevice.ReadWrite):
print('can\'t open serial port')
sys.exit(1)
self.show()
def add_series(self, name=None):
# add a series
series = QtChart.QLineSeries()
self.chart.addSeries(series)
series.attachAxis(self.x_axis)
series.attachAxis(self.y_axis)
series.setUseOpenGL(True)
if name:
series.setName(name)
self.series.append(series)
def append_data(self, new_data, auto_update=True):
num_elements = len(new_data)
if len(self.data) < num_elements:
if len(self.data) >= 1:
# we need to pad the newly added lines
padded = [0] * len(self.data[0])
for i in range(num_elements - len(self.data)):
self.data.append(padded.copy())
else:
for i in range(num_elements):
self.data.append([])
samples_current = len(self.data[0])
for i in range(num_elements):
if samples_current >= self.samples:
self.data[i] = self.data[i][1:] + [new_data[i]]
else:
self.data[i].append(new_data[i])
if auto_update:
self.update_plot()
@QtCore.pyqtSlot()
def on_serial_ready_read(self):
while self.serial.canReadLine():
line = self.serial.readLine().data().decode().strip()
data = line.split()
if '.' in line:
data = list([float(i) for i in data])
else:
data = list([int(i) for i in data])
if data:
self.append_data(data)
@QtCore.pyqtSlot()
def update_plot(self):
if not self.data:
return
# add all series not yet present
for i in range(len(self.series), len(self.data)):
self.add_series(name='Ch %d' % i)
# update the series data
for i in range(len(self.series)):
self.series[i].replace([QtCore.QPoint(j, self.data[i][j]) for j in range(len(self.data[i]))])
def check_serial_port(port):
port = os.path.abspath(port)
if not os.path.exists(port):
raise argparse.ArgumentTypeError('%s does not exist' % port)
if not os.access(port, os.R_OK + os.W_OK):
raise argparse.ArgumentTypeError('%s is not read/write able' % port)
return port
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('port', type=check_serial_port, help='The serial port to listen to for data to plot')
parser.add_argument('-b', '--baud', type=int, default=115200,
help='The baud rate for the serial port, defaults to 115200')
parser.add_argument('-s', '--samples', type=int, default=256,
help='The number of samples to buffer and display, defaults to 256')
parser.add_argument('--min', type=int, default=0, help='The minium value to display, defaults to 0')
parser.add_argument('--max', type=int, default=1023, help='The maximum value to display, defaults to 1023')
args = parser.parse_args()
app = QtWidgets.QApplication(sys.argv)
w = SerialPlotter(args.port, baud=args.baud, samples=args.samples, min_value=args.min, max_value=args.max)
sys.exit(app.exec_())
| [
"os.path.exists",
"sys.exit",
"argparse.ArgumentParser",
"PyQt5.QtCore.QPoint",
"os.access",
"PyQt5.QtCore.pyqtSlot",
"PyQt5.QtSerialPort.QSerialPort",
"argparse.ArgumentTypeError",
"PyQt5.QtChart.QValueAxis",
"PyQt5.QtChart.QChartView",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtChart.QChart",
"os.path.abspath",
"PyQt5.QtChart.QLineSeries"
] | [((3067, 3084), 'PyQt5.QtCore.pyqtSlot', 'QtCore.pyqtSlot', ([], {}), '()\n', (3082, 3084), False, 'from PyQt5 import QtChart, QtCore, QtGui, QtSerialPort, QtWidgets\n'), ((3479, 3496), 'PyQt5.QtCore.pyqtSlot', 'QtCore.pyqtSlot', ([], {}), '()\n', (3494, 3496), False, 'from PyQt5 import QtChart, QtCore, QtGui, QtSerialPort, QtWidgets\n'), ((3938, 3959), 'os.path.abspath', 'os.path.abspath', (['port'], {}), '(port)\n', (3953, 3959), False, 'import os\n'), ((4244, 4269), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4267, 4269), False, 'import argparse\n'), ((4950, 4982), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (4972, 4982), False, 'from PyQt5 import QtChart, QtCore, QtGui, QtSerialPort, QtWidgets\n'), ((629, 645), 'PyQt5.QtChart.QChart', 'QtChart.QChart', ([], {}), '()\n', (643, 645), False, 'from PyQt5 import QtChart, QtCore, QtGui, QtSerialPort, QtWidgets\n'), ((907, 937), 'PyQt5.QtChart.QChartView', 'QtChart.QChartView', (['self.chart'], {}), '(self.chart)\n', (925, 937), False, 'from PyQt5 import QtChart, QtCore, QtGui, QtSerialPort, QtWidgets\n'), ((1099, 1119), 'PyQt5.QtChart.QValueAxis', 'QtChart.QValueAxis', ([], {}), '()\n', (1117, 1119), False, 'from PyQt5 import QtChart, QtCore, QtGui, QtSerialPort, QtWidgets\n'), ((1273, 1293), 'PyQt5.QtChart.QValueAxis', 'QtChart.QValueAxis', ([], {}), '()\n', (1291, 1293), False, 'from PyQt5 import QtChart, QtCore, QtGui, QtSerialPort, QtWidgets\n'), ((1601, 1718), 'PyQt5.QtSerialPort.QSerialPort', 'QtSerialPort.QSerialPort', (['self._serial_port'], {'baudRate': 'self._serial_port_baud', 'readyRead': 'self.on_serial_ready_read'}), '(self._serial_port, baudRate=self._serial_port_baud,\n readyRead=self.on_serial_ready_read)\n', (1625, 1718), False, 'from PyQt5 import QtChart, QtCore, QtGui, QtSerialPort, QtWidgets\n'), ((1991, 2012), 'PyQt5.QtChart.QLineSeries', 'QtChart.QLineSeries', ([], {}), '()\n', (2010, 2012), False, 'from PyQt5 import QtChart, QtCore, QtGui, QtSerialPort, QtWidgets\n'), ((3971, 3991), 'os.path.exists', 'os.path.exists', (['port'], {}), '(port)\n', (3985, 3991), False, 'import os\n'), ((4007, 4061), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s does not exist' % port)"], {}), "('%s does not exist' % port)\n", (4033, 4061), False, 'import argparse\n'), ((4073, 4107), 'os.access', 'os.access', (['port', '(os.R_OK + os.W_OK)'], {}), '(port, os.R_OK + os.W_OK)\n', (4082, 4107), False, 'import os\n'), ((4123, 4185), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is not read/write able' % port)"], {}), "('%s is not read/write able' % port)\n", (4149, 4185), False, 'import argparse\n'), ((1880, 1891), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1888, 1891), False, 'import sys\n'), ((3826, 3859), 'PyQt5.QtCore.QPoint', 'QtCore.QPoint', (['j', 'self.data[i][j]'], {}), '(j, self.data[i][j])\n', (3839, 3859), False, 'from PyQt5 import QtChart, QtCore, QtGui, QtSerialPort, QtWidgets\n')] |
# Copyright (c) 2011 <NAME>
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
from screenplain.richstring import parse_emphasis
class Screenplay(object):
def __init__(self, title_page=None, paragraphs=None):
"""
Create a Screenplay object.
`title_page` is a dictionary mapping string keys to strings.
`paragraphs` is a sequence of paragraph objects.
"""
# Key/value pairs for title page
if title_page is None:
self.title_page = {}
else:
self.title_page = title_page
# The paragraphs of the actual script
if paragraphs is None:
self.paragraphs = []
else:
self.paragraphs = paragraphs
def get_rich_attribute(self, name, default=[]):
"""Get an attribute from the title page parsed into a RichString.
Returns a list of RichString objects.
E.g. `screenplay.get_rich_attribute('Title')`
"""
if name in self.title_page:
return [parse_emphasis(line) for line in self.title_page[name]]
else:
return default
def append(self, paragraph):
"""Append a paragraph to this screenplay."""
self.paragraphs.append(paragraph)
def __iter__(self):
"""Get an iterator over the paragraphs of this screenplay."""
return iter(self.paragraphs)
class Slug(object):
def __init__(self, line, scene_number=None):
"""Creates a scene heading (slug).
The line parameter is a RichString with the slugline.
The scene_number parameter is an optional RichString.
"""
self.line = line
self.scene_number = scene_number
self.synopsis = None
@property
def lines(self):
return [self.line]
def set_synopsis(self, text):
self.synopsis = text
class Section(object):
"""A section heading."""
def __init__(self, text, level, synopsis=None):
self.text = text
self.level = level
self.synopsis = synopsis
def set_synopsis(self, text):
self.synopsis = text
def __repr__(self):
return 'Section(%r, %r, %r)' % (self.text, self.level, self.synopsis)
def __eq__(self, other):
return (
self.text == other.text and
self.level == other.level and
self.synopsis == other.synopsis
)
class Dialog(object):
def __init__(self, character, lines=None):
self.character = character
self.blocks = [] # list of tuples of (is_parenthetical, text)
if lines:
self._parse(lines)
def _parse(self, lines):
inside_parenthesis = False
for line in lines:
if line.startswith('('):
inside_parenthesis = True
self.blocks.append((inside_parenthesis, line))
if line.endswith(')'):
inside_parenthesis = False
def add_line(self, line):
parenthetical = line.startswith('(')
self.blocks.append((parenthetical, line))
class DualDialog(object):
def __init__(self, left_dialog, right_dialog):
self.left = left_dialog
self.right = right_dialog
class Action(object):
def __init__(self, lines, centered=False):
self.lines = lines
self.centered = centered
class Transition(object):
def __init__(self, line):
self.line = line
@property
def lines(self):
return [self.line]
class PageBreak(object):
pass
| [
"screenplain.richstring.parse_emphasis"
] | [((1063, 1083), 'screenplain.richstring.parse_emphasis', 'parse_emphasis', (['line'], {}), '(line)\n', (1077, 1083), False, 'from screenplain.richstring import parse_emphasis\n')] |
#!/usr/bin/env python
# <NAME>
# Functions for shuffling and merging sequence samples (DataFrames) into a long sequence
# Used for fake genome generation from positive and negative BGC samples
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
def merge_samples(samples_series, idx, shuffle=True):
"""
Merge a Series of DataFrames into a single DataFrame, filtered by a Series index. DataFrames can be shuffled before merging.
Used to generate an artificial genome (long DataFrame) from a series of samples (short DataFrames)
:param samples_series: Series of DataFrames
:param idx: Array of indexes used to select DataFrames for merging
:param shuffle: Whether to shuffle the DataFrames
:return: Subset of given series selected by idx, shuffled if specified and merged to a single DataFrame
"""
if shuffle:
np.random.shuffle(idx)
return pd.concat(list(samples_series[idx]))
def merged_split(samples_list, splitter, shuffle_train=True, shuffle_test=True, split_params=None):
"""
Create generator of random train and test splits, where each train and test split
is a single DataFrame created from shuffled and merged samples using merge_samples function.
Will generate given number of (train, test) splits based on splitter argument.
:param samples_list: list of DataFrames to repeatedly split and merge
:param splitter: Number of KFold splits or Splitter with split(samples) function that will be used
:param shuffle_train: Whether to shuffle the samples in the train split before merging
:param shuffle_test: Whether to shuffle the samples in the test split before merging
:param split_params: Additional arguments to pass to the splitter split function
:return: Generator of (train, test) splits for given list of samples, where each train and test split
is a single DataFrame created from shuffled and merged samples using merge_samples function.
"""
if split_params is None:
split_params = {}
if isinstance(splitter, int):
splitter = KFold(n_splits=splitter, shuffle=True)
indexable_X = pd.Series(samples_list)
for trainidx, testidx in splitter.split(indexable_X, **split_params):
train_X = merge_samples(indexable_X, trainidx, shuffle=shuffle_train)
test_X = merge_samples(indexable_X, testidx, shuffle=shuffle_test)
yield train_X, test_X
| [
"pandas.Series",
"sklearn.model_selection.KFold",
"numpy.random.shuffle"
] | [((2150, 2173), 'pandas.Series', 'pd.Series', (['samples_list'], {}), '(samples_list)\n', (2159, 2173), True, 'import pandas as pd\n'), ((884, 906), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (901, 906), True, 'import numpy as np\n'), ((2093, 2131), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'splitter', 'shuffle': '(True)'}), '(n_splits=splitter, shuffle=True)\n', (2098, 2131), False, 'from sklearn.model_selection import KFold\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.