code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
#
# Backtesting Algo Strategies based on
# Logistic Regression with scikit-learn
#
# <NAME>
# ODSC London 2016
# The Python Quants GmbH
#
import numpy as np
import pandas as pd
import seaborn as sns; sns.set()
from pandas_datareader import data as web
from sklearn import linear_model
class ScikitBacktest(object):
def __init__(self, sym):
self.lags = 5
self.symbol = sym
self.get_data()
self.lm = linear_model.LogisticRegression(C=1e3)
def get_data(self):
d = web.DataReader(self.symbol, data_source='yahoo')['Adj Close']
d = pd.DataFrame(d)
d.columns = [self.symbol]
d['returns'] = np.log(d / d.shift(1))
self.data = d
def select_data(self, start, end):
d = self.data[(self.data.index >= start) & (self.data.index <= end)].copy()
return d
def get_matrix(self, start, end):
d = self.select_data(start, end)
m = np.zeros((self.lags+1, len(d)-self.lags))
for i in range(self.lags+1):
if i == self.lags:
m[i] = d.returns.values[i:]
else:
m[i] = d.returns.values[i:i-self.lags]
self.matrix = m
def fit_model(self, start, end):
self.get_matrix(start, end)
self.lm.fit(self.matrix[:self.lags].T, np.sign(self.matrix[self.lags]))
def predict_moves(self, start, end):
self.get_matrix(start, end)
pred = self.lm.predict(self.matrix[:self.lags].T)
return pred
def run_strategy(self, start_tr, end_tr, start_te, end_te, lags):
self.lags = lags
self.fit_model(start_tr, end_tr)
pred = self.predict_moves(start_te, end_te)
d = self.select_data(start_te, end_te)
d['pred'] = 0.0
d['pred'].ix[self.lags:] = pred
d['strategy'] = d.pred * d.returns
title = '%s to %s for %d lags' % (start_te, end_te, self.lags)
d[['returns', 'strategy']].ix[self.lags:].cumsum().apply(np.exp).plot(title=title) | [
"seaborn.set",
"pandas_datareader.data.DataReader",
"sklearn.linear_model.LogisticRegression",
"numpy.sign",
"pandas.DataFrame"
] | [((202, 211), 'seaborn.set', 'sns.set', ([], {}), '()\n', (209, 211), True, 'import seaborn as sns\n'), ((437, 478), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'C': '(1000.0)'}), '(C=1000.0)\n', (468, 478), False, 'from sklearn import linear_model\n'), ((586, 601), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (598, 601), True, 'import pandas as pd\n'), ((512, 560), 'pandas_datareader.data.DataReader', 'web.DataReader', (['self.symbol'], {'data_source': '"""yahoo"""'}), "(self.symbol, data_source='yahoo')\n", (526, 560), True, 'from pandas_datareader import data as web\n'), ((1306, 1337), 'numpy.sign', 'np.sign', (['self.matrix[self.lags]'], {}), '(self.matrix[self.lags])\n', (1313, 1337), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
print(np.arange(0, (2* np.pi), ((2* np.pi)/1000), dtype= float))
x = np.arange(0,2* np.pi, .01)
plt.xlim(0,2* np.pi)
plt.ylim(-1, 10)
y1 = 5.5* np.cos(2*x) + 5.5
y2 = 0.02 * np.exp(x)
y3 = (0.25* x**2 + (.1* np.sin(10*x)))
plt.plot(x,y1)
plt.plot(x,y2)
plt.plot(x,y3)
plt.xlabel("Time in Astro119")
plt.ylabel("Measure of Awesomeness")
plt.show()
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
print(np.arange(0, (2* np.pi), ((2* np.pi)/1000), dtype= float))
x = np.arange(0,2* np.pi, .01)
plt.xlim(0,2* np.pi)
plt.ylim(-1, 10)
y1 = 5.5* np.cos(2*x) + 5.5
y2 = 0.02 * np.exp(x)
y3 = (0.25* x**2 + (.1* np.sin(10*x)))
plt.plot(x,y1)
plt.plot(x,y2)
plt.plot(x,y3)
plt.xlabel("Time in Astro119")
plt.ylabel("Measure of Awesomeness")
plt.show()
# In[ ]:
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((173, 202), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.01)'], {}), '(0, 2 * np.pi, 0.01)\n', (182, 202), True, 'import numpy as np\n'), ((200, 222), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (208, 222), True, 'import matplotlib.pyplot as plt\n'), ((221, 237), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(10)'], {}), '(-1, 10)\n', (229, 237), True, 'import matplotlib.pyplot as plt\n'), ((327, 342), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {}), '(x, y1)\n', (335, 342), True, 'import matplotlib.pyplot as plt\n'), ((342, 357), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {}), '(x, y2)\n', (350, 357), True, 'import matplotlib.pyplot as plt\n'), ((357, 372), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y3'], {}), '(x, y3)\n', (365, 372), True, 'import matplotlib.pyplot as plt\n'), ((372, 402), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time in Astro119"""'], {}), "('Time in Astro119')\n", (382, 402), True, 'import matplotlib.pyplot as plt\n'), ((403, 439), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Measure of Awesomeness"""'], {}), "('Measure of Awesomeness')\n", (413, 439), True, 'import matplotlib.pyplot as plt\n'), ((440, 450), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (448, 450), True, 'import matplotlib.pyplot as plt\n'), ((587, 616), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.01)'], {}), '(0, 2 * np.pi, 0.01)\n', (596, 616), True, 'import numpy as np\n'), ((614, 636), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (622, 636), True, 'import matplotlib.pyplot as plt\n'), ((635, 651), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(10)'], {}), '(-1, 10)\n', (643, 651), True, 'import matplotlib.pyplot as plt\n'), ((741, 756), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {}), '(x, y1)\n', (749, 756), True, 'import matplotlib.pyplot as plt\n'), ((756, 771), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {}), '(x, y2)\n', (764, 771), True, 'import matplotlib.pyplot as plt\n'), ((771, 786), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y3'], {}), '(x, y3)\n', (779, 786), True, 'import matplotlib.pyplot as plt\n'), ((786, 816), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time in Astro119"""'], {}), "('Time in Astro119')\n", (796, 816), True, 'import matplotlib.pyplot as plt\n'), ((817, 853), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Measure of Awesomeness"""'], {}), "('Measure of Awesomeness')\n", (827, 853), True, 'import matplotlib.pyplot as plt\n'), ((854, 864), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (862, 864), True, 'import matplotlib.pyplot as plt\n'), ((108, 162), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(2 * np.pi / 1000)'], {'dtype': 'float'}), '(0, 2 * np.pi, 2 * np.pi / 1000, dtype=float)\n', (117, 162), True, 'import numpy as np\n'), ((278, 287), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (284, 287), True, 'import numpy as np\n'), ((522, 576), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(2 * np.pi / 1000)'], {'dtype': 'float'}), '(0, 2 * np.pi, 2 * np.pi / 1000, dtype=float)\n', (531, 576), True, 'import numpy as np\n'), ((692, 701), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (698, 701), True, 'import numpy as np\n'), ((248, 261), 'numpy.cos', 'np.cos', (['(2 * x)'], {}), '(2 * x)\n', (254, 261), True, 'import numpy as np\n'), ((312, 326), 'numpy.sin', 'np.sin', (['(10 * x)'], {}), '(10 * x)\n', (318, 326), True, 'import numpy as np\n'), ((662, 675), 'numpy.cos', 'np.cos', (['(2 * x)'], {}), '(2 * x)\n', (668, 675), True, 'import numpy as np\n'), ((726, 740), 'numpy.sin', 'np.sin', (['(10 * x)'], {}), '(10 * x)\n', (732, 740), True, 'import numpy as np\n')] |
# pylint: disable=missing-docstring, invalid-name, import-error
import numpy as np
import pandas as pd
from mltils.preprocessing.encoders import InfrequentValueEncoder
from mltils.utils.test_utils import _test_immutability
def test_infrequent_value_encoder_1():
ive = InfrequentValueEncoder()
assert ive is not None
def test_infrequent_value_encoder_2():
df = pd.DataFrame({'A': ['a', 'a', 'b', 'b', 'c']})
ive = InfrequentValueEncoder(thrshld=1, str_rpl='ifq')
encoded = ive.fit_transform(df)
expected = pd.DataFrame({'A': ['a', 'a', 'b', 'b', 'ifq']})
assert expected.equals(encoded)
def test_infrequent_value_encoder_3():
df = pd.DataFrame({'A': ['a', 'a', 'b', 'b', 'c']})
ive = InfrequentValueEncoder(thrshld=0, str_rpl='ifq')
encoded = ive.fit_transform(df)
expected = pd.DataFrame({'A': ['a', 'a', 'b', 'b', 'c']})
assert expected.equals(encoded)
def test_infrequent_value_encoder_4():
df = pd.DataFrame({'A': ['a', 'a', 'b', 'b', 'c'],
'B': [1, 1, 1, 2, 3]})
ive = InfrequentValueEncoder(thrshld=2, str_rpl='ifq', num_rpl=-1)
encoded = ive.fit_transform(df)
expected = pd.DataFrame({'A': ['ifq', 'ifq', 'ifq', 'ifq', 'ifq'],
'B': [1, 1, 1, -1, -1]})
assert expected.equals(encoded)
def test_infrequent_value_encoder_5():
tr_df = pd.DataFrame({'A': ['a', 'a', 'b', 'b', 'c']})
ive = InfrequentValueEncoder(thrshld=1, str_rpl='ifq')
ive.fit(tr_df)
te_df = pd.DataFrame({'A': ['c', 'd', 'e', 'a', 'b']})
encoded = ive.transform(te_df)
expected = pd.DataFrame({'A': ['ifq', 'ifq', 'ifq', 'a', 'b']})
assert expected.equals(encoded)
def test_infrequent_value_encoder_6():
tr_df = pd.DataFrame({'A': ['a', 'a', 'b', 'b', 'c', np.nan]})
ive = InfrequentValueEncoder(thrshld=1, str_rpl='ifq')
ive.fit(tr_df)
te_df = pd.DataFrame({'A': [np.nan, 'c', 'd', 'e', 'a', 'b']})
encoded = ive.transform(te_df)
expected = pd.DataFrame({'A': [np.nan, 'ifq', 'ifq', 'ifq', 'a', 'b']})
assert expected.equals(encoded)
def test_infrequent_value_encoder_7():
df = pd.DataFrame({'A': [1, 2, 3, np.nan, 4, np.nan]})
encoded = InfrequentValueEncoder(thrshld=1, num_rpl=-1).fit_transform(df)
expected = pd.DataFrame({'A': [-1, -1, -1, np.nan, -1, np.nan]})
assert expected.equals(encoded)
def test_infrequent_value_encoder_8():
_test_immutability(encoder=InfrequentValueEncoder())
| [
"pandas.DataFrame",
"mltils.preprocessing.encoders.InfrequentValueEncoder"
] | [((275, 299), 'mltils.preprocessing.encoders.InfrequentValueEncoder', 'InfrequentValueEncoder', ([], {}), '()\n', (297, 299), False, 'from mltils.preprocessing.encoders import InfrequentValueEncoder\n'), ((377, 423), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ['a', 'a', 'b', 'b', 'c']}"], {}), "({'A': ['a', 'a', 'b', 'b', 'c']})\n", (389, 423), True, 'import pandas as pd\n'), ((434, 482), 'mltils.preprocessing.encoders.InfrequentValueEncoder', 'InfrequentValueEncoder', ([], {'thrshld': '(1)', 'str_rpl': '"""ifq"""'}), "(thrshld=1, str_rpl='ifq')\n", (456, 482), False, 'from mltils.preprocessing.encoders import InfrequentValueEncoder\n'), ((534, 582), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ['a', 'a', 'b', 'b', 'ifq']}"], {}), "({'A': ['a', 'a', 'b', 'b', 'ifq']})\n", (546, 582), True, 'import pandas as pd\n'), ((669, 715), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ['a', 'a', 'b', 'b', 'c']}"], {}), "({'A': ['a', 'a', 'b', 'b', 'c']})\n", (681, 715), True, 'import pandas as pd\n'), ((726, 774), 'mltils.preprocessing.encoders.InfrequentValueEncoder', 'InfrequentValueEncoder', ([], {'thrshld': '(0)', 'str_rpl': '"""ifq"""'}), "(thrshld=0, str_rpl='ifq')\n", (748, 774), False, 'from mltils.preprocessing.encoders import InfrequentValueEncoder\n'), ((826, 872), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ['a', 'a', 'b', 'b', 'c']}"], {}), "({'A': ['a', 'a', 'b', 'b', 'c']})\n", (838, 872), True, 'import pandas as pd\n'), ((959, 1027), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ['a', 'a', 'b', 'b', 'c'], 'B': [1, 1, 1, 2, 3]}"], {}), "({'A': ['a', 'a', 'b', 'b', 'c'], 'B': [1, 1, 1, 2, 3]})\n", (971, 1027), True, 'import pandas as pd\n'), ((1061, 1121), 'mltils.preprocessing.encoders.InfrequentValueEncoder', 'InfrequentValueEncoder', ([], {'thrshld': '(2)', 'str_rpl': '"""ifq"""', 'num_rpl': '(-1)'}), "(thrshld=2, str_rpl='ifq', num_rpl=-1)\n", (1083, 1121), False, 'from mltils.preprocessing.encoders import InfrequentValueEncoder\n'), ((1173, 1258), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ['ifq', 'ifq', 'ifq', 'ifq', 'ifq'], 'B': [1, 1, 1, -1, -1]}"], {}), "({'A': ['ifq', 'ifq', 'ifq', 'ifq', 'ifq'], 'B': [1, 1, 1, -1, -1]}\n )\n", (1185, 1258), True, 'import pandas as pd\n'), ((1372, 1418), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ['a', 'a', 'b', 'b', 'c']}"], {}), "({'A': ['a', 'a', 'b', 'b', 'c']})\n", (1384, 1418), True, 'import pandas as pd\n'), ((1429, 1477), 'mltils.preprocessing.encoders.InfrequentValueEncoder', 'InfrequentValueEncoder', ([], {'thrshld': '(1)', 'str_rpl': '"""ifq"""'}), "(thrshld=1, str_rpl='ifq')\n", (1451, 1477), False, 'from mltils.preprocessing.encoders import InfrequentValueEncoder\n'), ((1509, 1555), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ['c', 'd', 'e', 'a', 'b']}"], {}), "({'A': ['c', 'd', 'e', 'a', 'b']})\n", (1521, 1555), True, 'import pandas as pd\n'), ((1606, 1658), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ['ifq', 'ifq', 'ifq', 'a', 'b']}"], {}), "({'A': ['ifq', 'ifq', 'ifq', 'a', 'b']})\n", (1618, 1658), True, 'import pandas as pd\n'), ((1748, 1802), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ['a', 'a', 'b', 'b', 'c', np.nan]}"], {}), "({'A': ['a', 'a', 'b', 'b', 'c', np.nan]})\n", (1760, 1802), True, 'import pandas as pd\n'), ((1813, 1861), 'mltils.preprocessing.encoders.InfrequentValueEncoder', 'InfrequentValueEncoder', ([], {'thrshld': '(1)', 'str_rpl': '"""ifq"""'}), "(thrshld=1, str_rpl='ifq')\n", (1835, 1861), False, 'from mltils.preprocessing.encoders import InfrequentValueEncoder\n'), ((1893, 1947), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [np.nan, 'c', 'd', 'e', 'a', 'b']}"], {}), "({'A': [np.nan, 'c', 'd', 'e', 'a', 'b']})\n", (1905, 1947), True, 'import pandas as pd\n'), ((1998, 2058), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [np.nan, 'ifq', 'ifq', 'ifq', 'a', 'b']}"], {}), "({'A': [np.nan, 'ifq', 'ifq', 'ifq', 'a', 'b']})\n", (2010, 2058), True, 'import pandas as pd\n'), ((2145, 2194), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [1, 2, 3, np.nan, 4, np.nan]}"], {}), "({'A': [1, 2, 3, np.nan, 4, np.nan]})\n", (2157, 2194), True, 'import pandas as pd\n'), ((2288, 2341), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [-1, -1, -1, np.nan, -1, np.nan]}"], {}), "({'A': [-1, -1, -1, np.nan, -1, np.nan]})\n", (2300, 2341), True, 'import pandas as pd\n'), ((2209, 2254), 'mltils.preprocessing.encoders.InfrequentValueEncoder', 'InfrequentValueEncoder', ([], {'thrshld': '(1)', 'num_rpl': '(-1)'}), '(thrshld=1, num_rpl=-1)\n', (2231, 2254), False, 'from mltils.preprocessing.encoders import InfrequentValueEncoder\n'), ((2450, 2474), 'mltils.preprocessing.encoders.InfrequentValueEncoder', 'InfrequentValueEncoder', ([], {}), '()\n', (2472, 2474), False, 'from mltils.preprocessing.encoders import InfrequentValueEncoder\n')] |
import cairo
import pango
import pangocairo
def draw_text(surface, context, text, font="sans 14", position=None,
color=None,
box_width=None,
alignment=pango.ALIGN_CENTER,
line_spacing=None, letter_spacing=None,
extra_kerning=None):
if color is None:
color = (0.0, 0.0, 0.0)
context.set_source_rgb(*color)
pc = pangocairo.CairoContext(context)
layout = pc.create_layout()
layout.set_text(text)
layout.set_font_description(pango.FontDescription(font))
if box_width: layout.set_width(box_width)
layout.set_alignment(alignment)
if line_spacing: layout.set_spacing(spacing)
alist = pango.AttrList()
if letter_spacing:
alist.insert(pango.AttrLetterSpacing(letter_spacing, 0, len
(text)))
if extra_kerning:
for pos, kern in extra_kerning.iteritems():
alist.insert(pango.AttrLetterSpacing(kern, pos, pos
+1))
layout.set_attributes(alist)
if position is None:
width, height = surface.get_width(), surface.get_height()
w, h = layout.get_pixel_size()
position = (width/2.0 - w/2.0, height/2.0 - h/2.0)
context.move_to(*position)
pc.show_layout(layout)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
context = cairo.Context(surface)
draw_text(surface, context, 'Hello world!',
font="sans 52", color=(.25,.28,.33),
letter_spacing=-6000,
extra_kerning={0:-9000, 1:-1000, 6:6000, 7:-15000, 8:5000,
9:-7000})
surface.write_to_png("hello.png")
| [
"cairo.ImageSurface",
"cairo.Context",
"pango.AttrLetterSpacing",
"pango.FontDescription",
"pango.AttrList",
"pangocairo.CairoContext"
] | [((1353, 1407), 'cairo.ImageSurface', 'cairo.ImageSurface', (['cairo.FORMAT_ARGB32', 'width', 'height'], {}), '(cairo.FORMAT_ARGB32, width, height)\n', (1371, 1407), False, 'import cairo\n'), ((1418, 1440), 'cairo.Context', 'cairo.Context', (['surface'], {}), '(surface)\n', (1431, 1440), False, 'import cairo\n'), ((431, 463), 'pangocairo.CairoContext', 'pangocairo.CairoContext', (['context'], {}), '(context)\n', (454, 463), False, 'import pangocairo\n'), ((754, 770), 'pango.AttrList', 'pango.AttrList', ([], {}), '()\n', (768, 770), False, 'import pango\n'), ((566, 593), 'pango.FontDescription', 'pango.FontDescription', (['font'], {}), '(font)\n', (587, 593), False, 'import pango\n'), ((990, 1033), 'pango.AttrLetterSpacing', 'pango.AttrLetterSpacing', (['kern', 'pos', '(pos + 1)'], {}), '(kern, pos, pos + 1)\n', (1013, 1033), False, 'import pango\n')] |
import pytest
def test_main():
from flogging.flogging import setup as setup_logging
setup_logging(level="info", structured=False)
| [
"flogging.flogging.setup"
] | [((95, 140), 'flogging.flogging.setup', 'setup_logging', ([], {'level': '"""info"""', 'structured': '(False)'}), "(level='info', structured=False)\n", (108, 140), True, 'from flogging.flogging import setup as setup_logging\n')] |
from DealUUIDGenerator import DealUUIDGenerator
class TestClass(object):
def test_singleExchangeConstantProfit(self):
dealUUIDGenerator = DealUUIDGenerator()
id1 = dealUUIDGenerator.getUUID(timestamp=1, volBTC=1, nodesStr="bitstamp,coinbasepro", profitPerc=0.5)
id2 = dealUUIDGenerator.getUUID(timestamp=2, volBTC=1, nodesStr="bitstamp,coinbasepro", profitPerc=0.5)
id3 = dealUUIDGenerator.getUUID(timestamp=7, volBTC=1, nodesStr="bitstamp,coinbasepro", profitPerc=0.5)
id4 = dealUUIDGenerator.getUUID(timestamp=12.1, volBTC=1, nodesStr="bitstamp,coinbasepro", profitPerc=0.5)
id5 = dealUUIDGenerator.getUUID(timestamp=12.2, volBTC=1, nodesStr="bitstamp,coinbasepro", profitPerc=0.5)
id6 = dealUUIDGenerator.getUUID(timestamp=12.3, volBTC=1, nodesStr="bitstamp,coinbasepro", profitPerc=0.51)
assert id1 == id2 == id3 != id4 == id5 != id6
assert id5[:-2] == id6[:-2]
def test_variedExchangesConstantProfit(self):
dealUUIDGenerator = DealUUIDGenerator()
id1 = dealUUIDGenerator.getUUID(timestamp=1, volBTC=1, nodesStr="bitstamp,coinbasepro", profitPerc=0.5)
id2 = dealUUIDGenerator.getUUID(timestamp=2, volBTC=1, nodesStr="coinbasepro-BTC,coinbasepro-EUR,kraken-EUR,kraken-BTC,coinbasepro-BTC", profitPerc=0.5)
id3 = dealUUIDGenerator.getUUID(timestamp=7, volBTC=1, nodesStr="coinbasepro-BTC,coinbasepro-EUR,kraken-EUR,kraken-BTC,coinbasepro-BTC", profitPerc=0.5)
id4 = dealUUIDGenerator.getUUID(timestamp=12.1, volBTC=1, nodesStr="bitstamp,coinbasepro", profitPerc=0.5)
id5 = dealUUIDGenerator.getUUID(timestamp=12.2, volBTC=1, nodesStr="coinbasepro-BTC,coinbasepro-EUR,kraken-EUR,kraken-BTC,coinbasepro-BTC", profitPerc=0.51)
assert id1 != id2 == id3 != id4 != id5
assert id4[:-2] != id5[:-2]
| [
"DealUUIDGenerator.DealUUIDGenerator"
] | [((153, 172), 'DealUUIDGenerator.DealUUIDGenerator', 'DealUUIDGenerator', ([], {}), '()\n', (170, 172), False, 'from DealUUIDGenerator import DealUUIDGenerator\n'), ((1024, 1043), 'DealUUIDGenerator.DealUUIDGenerator', 'DealUUIDGenerator', ([], {}), '()\n', (1041, 1043), False, 'from DealUUIDGenerator import DealUUIDGenerator\n')] |
from apscheduler.schedulers.background import BackgroundScheduler
DEFAULT_CONFIG = {
'apscheduler.jobstores.default': {
'type': 'mongodb'
},
'apscheduler.executors.default': {
'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
'max_workers': 5
},
'apscheduler.job_defaults.coalesce': True,
'apscheduler.job_defaults.max_instances': '3',
'apscheduler.job_defaults.replace_existing': True,
'apscheduler.timezone': 'America/Santo_Domingo'
}
def configure_scheduler(config, adapter):
scheduler_config = {}
scheduler_config.update(DEFAULT_CONFIG)
scheduler_config.update(config.get('scheduler', {}))
adapter.scheduler = BackgroundScheduler(scheduler_config)
return adapter.scheduler
| [
"apscheduler.schedulers.background.BackgroundScheduler"
] | [((703, 740), 'apscheduler.schedulers.background.BackgroundScheduler', 'BackgroundScheduler', (['scheduler_config'], {}), '(scheduler_config)\n', (722, 740), False, 'from apscheduler.schedulers.background import BackgroundScheduler\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 13:34:37 2020
@author: tapiwamaruni
"""
# ARTIFICIAL NEURAL NETWORKS
# Inital loads always first
from __future__ import print_function
import plaidml.keras
plaidml.keras.install_backend()
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import keras and other packages
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import StandardScaler
# ===========
# Globals
sc = StandardScaler()
columns = ['CreditScore', 'Geography', 'Gender', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'HasCrCard', 'IsActiveMember', 'EstimatedSalary']
dummies = ['Geography', 'Gender']
final_cols = []
# ===========
def prepareDataset(dataset2):
print('==================================')
print('Preparing dataset...')
print(f'Total Records: {len(dataset2.values)}')
print(f'Info:')
print(dataset2.info())
print('==================================')
# Get dependent varaibles
y = dataset2['Exited'].values
# Get independent features
# RowNumber CustomerId Surname CreditScore Geography Gender Age Tenure Balance NumOfProducts HasCrCard IsActiveMember EstimatedSalary Exited
dataset = dataset2[columns]
# Handle categorical variables
dataset = pd.get_dummies(dataset, columns=dummies, drop_first=True)
# Get list of final columns
final_cols = dataset.columns.values.tolist()
# Get final list
X = dataset.values
return X, y, final_cols
def getPredictionSet(data_dict):
return pd.DataFrame(data_dict)
def featureScale(X_train, X_test):
print('Scalling fetures in the data')
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
return X_train, X_test
def buildArtificialNeuralNet():
print('Building ANN structure')
# Initialize the ANN
classifier = Sequential()
# Step 1 - Add inpit layer and firsthidden layer. [activation function - Rectifier function]
'''
activation - activation function used by the layer
units - (Avergae of #of inputs and # outputs so (11 + 1)/2 = 6)
kernel_initializer - How teh layer is initialized
input_dim - Number of inputs - Tuned to th e# of independent variables we extracted
'''
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
# Step 2 - Adding the second hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
'''
-->> If you need more outputs -> Use Softmax -> used when you have more than 2 categories. Then the units will alos need to be increated by OneHotEncoder
'''
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compile and ANN
'''
optimizer - Stcj=hastic optimer called adam
loss - Use the Logarithmic loss for classification
- Binary - binary_crossentropy
- None binary - categorical_crossentropy
metrics - criteria you use to evaulate/improve your model
'''
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return classifier
def buildConfusionMatrix(y_test, y_pred):
print('Building Confusion Matrix to check accuracy of model with')
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
return cm
# ===========
# ===========
# ===========
file_name = 'Churn_Modelling.csv'
# Part 1 - Data preprocessing
print('..... START: Building Model for churn classification .......')
print(f'Processing file: {file_name}')
# Importing the dataset
dataset2 = pd.read_csv(file_name)
X, y, final_cols = prepareDataset(dataset2)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
# Feature Scaling
X_train, X_test = featureScale(X_train, X_test)
# ===========
# Part 2 - Model Fitting with and ANN
classifier = buildArtificialNeuralNet()
# Fitting classifier to the Training set
classifier.fit(X_train, y_train, batch_size=10, epochs=30)
print(f'..... Classifer Built from file: {file_name}. Starting Predictions .....')
# Predicting the Test set results
# Convert probabilities as true or false
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
cm = buildConfusionMatrix(y_test, y_pred)
print('..... TEST: Predicting sample .......')
d1 = {'CreditScore': [600],
'Age': [40],
'Tenure': [3],
'Balance': [60000],
'NumOfProducts': [2],
'HasCrCard': [1],
'IsActiveMember': [1],
'EstimatedSalary': [50000],
'Geography_Germany': [0],
'Geography_Spain': [0],
'Gender_Male': [1]
}
d1x = pd.DataFrame(d1).values
# print(d1x)
X1 = sc.transform(d1x)
# print(X1)
y1 = classifier.predict(X1)
y1x = (y1 > 0.5)
print(f'Prediction: {"{:.3%}".format(y1[0,0])} chance to exit. Will Exit={y1x[0,0]}')
print('..... END: Building Model for churn classification .......')
# ===========
# ===========
# ===========
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"pandas.get_dummies",
"sklearn.preprocessing.StandardScaler",
"keras.models.Sequential",
"keras.layers.Dense",
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix"
] | [((583, 599), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (597, 599), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3917, 3939), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {}), '(file_name)\n', (3928, 3939), True, 'import pandas as pd\n'), ((4132, 4169), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (4148, 4169), False, 'from sklearn.model_selection import train_test_split\n'), ((1410, 1467), 'pandas.get_dummies', 'pd.get_dummies', (['dataset'], {'columns': 'dummies', 'drop_first': '(True)'}), '(dataset, columns=dummies, drop_first=True)\n', (1424, 1467), True, 'import pandas as pd\n'), ((1682, 1705), 'pandas.DataFrame', 'pd.DataFrame', (['data_dict'], {}), '(data_dict)\n', (1694, 1705), True, 'import pandas as pd\n'), ((2031, 2043), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2041, 2043), False, 'from keras.models import Sequential\n'), ((3611, 3643), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3627, 3643), False, 'from sklearn.metrics import confusion_matrix\n'), ((5096, 5112), 'pandas.DataFrame', 'pd.DataFrame', (['d1'], {}), '(d1)\n', (5108, 5112), True, 'import pandas as pd\n'), ((2446, 2523), 'keras.layers.Dense', 'Dense', ([], {'units': '(6)', 'kernel_initializer': '"""uniform"""', 'activation': '"""relu"""', 'input_dim': '(11)'}), "(units=6, kernel_initializer='uniform', activation='relu', input_dim=11)\n", (2451, 2523), False, 'from keras.layers import Dense\n'), ((2603, 2666), 'keras.layers.Dense', 'Dense', ([], {'units': '(6)', 'kernel_initializer': '"""uniform"""', 'activation': '"""relu"""'}), "(units=6, kernel_initializer='uniform', activation='relu')\n", (2608, 2666), False, 'from keras.layers import Dense\n'), ((2907, 2973), 'keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'kernel_initializer': '"""uniform"""', 'activation': '"""sigmoid"""'}), "(units=1, kernel_initializer='uniform', activation='sigmoid')\n", (2912, 2973), False, 'from keras.layers import Dense\n')] |
"""The WaveBlocks Project
Use a symbolic exact formula for computing the inner product
between two semi-classical wavepackets. The formula is
constructed explicitly for the inhomogeneous case.
@author: <NAME>
@copyright: Copyright (C) 2013 <NAME>
@license: Modified BSD License
"""
from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan
from scipy import exp
from scipy.misc import factorial
from scipy.special import binom
from WaveBlocksND.InnerProduct import InnerProductException
from WaveBlocksND.Quadrature import Quadrature
__all__ = ["SymbolicIntegral"]
class SymbolicIntegral(Quadrature):
r"""
"""
def __init__(self, doraise=False, *unused, **kunused):
r"""Use a symbolic exact formula for computing the inner product
between two semi-classical wavepackets. The formula is
constructed explicitly for the inhomogeneous case.
:param doraise: Raise an :py:class:`InnerProductException` exception
in case the symbolic formula will fail due to an
inherent mathematical singularity. Default is ``False``.
"""
self._doraise = doraise
# Drop any other argument, we do not need a qr instance.
def __str__(self):
return "Inhomogeneous inner product <k|l> computed using a symbolic exact formula."
def get_description(self):
r"""Return a description of this integral object.
A description is a ``dict`` containing all key-value pairs
necessary to reconstruct the current instance. A description
never contains any data.
"""
d = {}
d["type"] = "SymbolicIntegral"
return d
def initialize_packet(self, pacbra, packet=None):
r"""Provide the wavepacket parts of the inner product to evaluate.
Since the formula is for the inhomogeneous case explicitly, different
wavepackets can be used for the 'bra' as well as the 'ket' part.
:param pacbra: The packet that is used for the 'bra' part.
:param packet: The packet that is used for the 'ket' part.
:raises: :py:class:`ValueError` if the dimension of :math:`\Psi` is not 1.
"""
# Allow to ommit the ket if it is the same as the bra
if packet is None:
packet = pacbra
if not pacbra.get_dimension() == 1:
raise ValueError("The 'SymbolicIntegral' applies in the 1D case only.")
self._pacbra = pacbra
self._packet = packet
def initialize_operator(self, operator=None, matrix=False, eval_at_once=False):
r"""Provide the operator part of the inner product to evaluate.
This function initializes the operator used for quadratures
and for building matrices.
.. note:: The symbolic solution can not handle operators at all.
:param operator: The operator of the inner product.
If ``None`` a suitable identity is used.
:param matrix: Set this to ``True`` (Default is ``False``) in case
we want to compute the matrix elements.
For nasty technical reasons we can not yet unify
the operator call syntax.
:param eval_at_once: Flag to tell whether the operator supports the ``entry=(r,c)`` call syntax.
Since we do not support operators at all, it has no effect.
:type eval_at_once: Boolean, default is ``False``.
"""
# Operator is None is interpreted as identity transformation
if operator is None:
self._operator = lambda nodes, dummy, entry=None: ones((1, nodes.shape[1])) if entry[0] == entry[1] else zeros((1, nodes.shape[1]))
else:
raise ValueError("The 'SymbolicIntegral' can not handle operators.")
def prepare(self, rows, cols):
r"""Precompute some values needed for evaluating the integral
:math:`\langle \Phi_i | \Phi^\prime_j \rangle` or the corresponding
matrix over the basis functions of :math:`\Phi_i` and :math:`\Phi^\prime_j`.
Note that this function does nothing in the current implementation.
:param rows: A list of all :math:`i` with :math:`0 \leq i \leq N`
selecting the :math:`\Phi_i` for which we precompute values.
:param cols: A list of all :math:`j` with :math:`0 \leq j \leq N`
selecting the :math:`\Phi^\prime_j` for which we precompute values.
"""
pass
def _evaluate_hermite(self, N, x):
r"""Evaluate the first `N` Hermite polynomials at once by exploiting
the recursion relation.
:param N: The maximal order :math:`N` for which we evaluate :math:`H_N`.
:param x: The argument :math:`x` of the Hermite polynomial :math:`H_n(x)`.
:return: A list :math:`[H_0, H_1, \ldots, H_N]` of all Hermite polynomials
up to order :math:`N` inclusive.
"""
H = {}
H[-1] = 0.0
H[0] = 1.0
for n in range(N + 1):
H[n + 1] = 2.0 * x * H[n] - 2.0 * n * H[n - 1]
H.pop(-1)
return H
def exact_result_ground(self, Pibra, Piket, eps):
r"""Compute the overlap integral :math:`\langle \phi_0 | \phi_0 \rangle` of
the groundstate :math:`\phi_0` by using the symbolic formula:
.. math::
\langle \phi_0 | \phi_0 \rangle =
\sqrt{\frac{-2 i}{Q_2 \overline{P_1} - P_2 \overline{Q_1}}} \cdot
\exp \Biggl(
\frac{i}{2 \varepsilon^2}
\frac{Q_2 \overline{Q_1} \left(p_2-p_1\right)^2 + P_2 \overline{P_1} \left(q_2-q_1\right)^2}
{\left(Q_2 \overline{P_1} - P_2 \overline{Q_1}\right)}
\\
-\frac{i}{\varepsilon^2}
\frac{\left(q_2-q_1\right) \left( Q_2 \overline{P_1} p_2 - P_2 \overline{Q_1} p_1\right)}
{\left(Q_2 \overline{P_1} - P_2 \overline{Q_1}\right)}
\Biggr)
Note that this is an internal method and usually there is no
reason to call it from outside.
:param Pibra: The parameter set :math:`\Pi = \{q_1,p_1,Q_1,P_1\}` of the bra :math:`\langle \phi_0 |`.
:param Piket: The parameter set :math:`\Pi^\prime = \{q_2,p_2,Q_2,P_2\}` of the ket :math:`| \phi_0 \rangle`.
:param eps: The semi-classical scaling parameter :math:`\varepsilon`.
:return: The value of the integral :math:`\langle \phi_0 | \phi_0 \rangle`.
"""
q1, p1, Q1, P1 = Pibra
q2, p2, Q2, P2 = Piket
hbar = eps**2
X = Q2 * conjugate(P1) - P2 * conjugate(Q1)
I = sqrt(-2.0j / X) * exp( 1.0j / (2*hbar) * (Q2*conjugate(Q1)*(p2 - p1)**2 + P2*conjugate(P1)*(q2 - q1)**2) / X
-1.0j / hbar * ((q2 - q1)*(Q2*conjugate(P1)*p2 - P2*conjugate(Q1)*p1)) / X
)
return I
def exact_result_higher(self, Pibra, Piket, eps, k, l):
r"""Compute the overlap integral :math:`\langle \phi_k | \phi_l \rangle` of
two states :math:`\phi_k` and :math:`\phi_l` by using the symbolic formula:
.. math::
\langle \phi_k | \phi_l \rangle =
\frac{1}{\sqrt{k!l!}} 2^{-\frac{k+l}{2}} \langle \phi_0 | \phi_0 \rangle \cdot
\left(i \overline{ P_1} Q_2 - i \overline{Q_1} P_2\right)^{-\frac{k+l}{2}} \cdot \\
\sum_{j=0}^{\min\left(k,l\right)}
\Biggl(
\binom{k}{j} \binom{l}{j} j! 4^j
\left(i Q_2 P_1 - i Q_1 P_2\right)^{\frac{k-j}{2}}
\left(i \overline{Q_2 P_1} - i\overline{Q_1 P_2}\right)^{\frac{l-j}{2}}
\\
\cdot H_{k-j}\left(-\frac{1}{\varepsilon}
\frac{Q_2\left(p_1-p_2\right)-P_2\left(q_1-q_2\right)}
{\sqrt{Q_2 P_1 - Q_1 P_2}\sqrt{\overline{P_1}Q_2-\overline{Q_1} P_2}}\right)
\\
\cdot H_{l-j}\left(\frac{1}{\varepsilon}
\frac{\overline{ P_1}\left(q_1-q_2\right)-\overline{Q_1}\left(p_1-p_2\right)}
{\sqrt{\overline{Q_2 P_1}-\overline{Q_1 P_2}}\sqrt{\overline{ P_1}Q_2-\overline{Q_1} P_2}}\right)
\Biggr)
Note that this is an internal method and usually there is no
reason to call it from outside.
:param Pibra: The parameter set :math:`\Pi = \{q_1,p_1,Q_1,P_1\}` of the bra :math:`\langle \phi_k |`.
:param Piket: The parameter set :math:`\Pi^\prime = \{q_2,p_2,Q_2,P_2\}` of the ket :math:`| \phi_l \rangle`.
:param eps: The semi-classical scaling parameter :math:`\varepsilon`.
:param k: Index :math:`k` of the wavepacket basis function :math:`\phi_k`.
:param l: Index :math:`l` of the wavepacket basis function :math:`\phi_l`.
:return: The value of the integral :math:`\langle \phi_k | \phi_l \rangle`.
"""
# < phi_k[Pi1] | phi_l[Pi2] >
q1, p1, Q1, P1 = Pibra
q2, p2, Q2, P2 = Piket
# If both parameter sets are identical, we are back in the
# homogeneous case where the phi are orthonormal.
# This early returns just serves to avoid introducing NaN
# elements in arg1 and arg2 further below. It is allowed to
# compare floats on exact equality because the inhomogeneous
# formula works even if the floats differ only by tiny amounts.
if q1 == q2 and p1 == p2 and Q1 == Q2 and P1 == P2:
return 1.0 if k == l else 0.0
# TODO: Note that the formula can still fail if Q1 = Q2 and P1 = P2
# but q1 \neq q2 and p1 \neq p2.
pf = (self._f[(k, l)] * 2**(-(k + l) / 2.0) * self._I0 *
(1.0j * conjugate(P1) * Q2 - 1.0j * conjugate(Q1) * P2)**(-(k + l) / 2.0))
S = 0.0j
for j in range(min(k, l) + 1):
S = S + (self._bk[k, j] * self._bl[l, j] * self._jf[j] *
self._pfk[k - j] * self._pfl[l - j] * self._Hk[k - j] * self._Hl[l - j])
Ikl = pf * S
return squeeze(Ikl)
def _cache_factors(self, Pibra, Piket, Kbra, Kket, eps):
r"""Cache some summands to speed up the computation of the sum.
:param Pibra: The parameter set :math:`\Pi` of the bra :math:`\langle \Phi |`.
:param Piket: The parameter set :math:`\Pi^\prime` of the ket :math:`| \Phi^\prime \rangle`.
:param Kbra: The basis shape :math:`\mathfrak{K}` of the bra :math:`\langle \Phi |`.
:type Kbra: A :py:class:`BasisShape` instance.
:param Kket: The basis shape :math:`\mathfrak{K}^\prime` of the ket :math:`| \Phi^\prime \rangle`.
:type Kket: A :py:class:`BasisShape` instance.
:param eps: The semi-classical scaling parameter :math:`\varepsilon`.
"""
q1, p1, Q1, P1 = Pibra
q2, p2, Q2, P2 = Piket
# If both parameter sets are identical, we are back in the homogeneous case.
if q1 == q2 and p1 == p2 and Q1 == Q2 and P1 == P2:
self._Hk = None
self._Hl = None
# We have k in [0, 1, ..., K-1] where |K| is the basis size
# hence K-1 is the maximal index.
K = Kbra.get_basis_size()
L = Kket.get_basis_size()
mikl = min(K, L)
makl = max(K, L)
# Factorials
f = factorial(arange(makl))
self._f = 1.0 / sqrt(f[:K].reshape(-1, 1) * f[:L].reshape(1, -1))
# These prefactors depend only on j
self._jf = f[:mikl] * 4**arange(mikl)
# Binomials depend on k or l and j
ik = arange(K).reshape(-1, 1)
il = arange(L).reshape(-1, 1)
ij = arange(mikl).reshape(1, -1)
self._bk = binom(ik, ij)
self._bl = binom(il, ij)
# Note: formula currently fails for non-inhomogeneous case
# because of divisions by zero in the two args below.
argk = ((1.0j*Q2*(p1-p2) - 1.0j*P2*(q1-q2)) /
(sqrt(1.0j*Q2*P1 - 1.0j*Q1*P2) *
sqrt(1.0j*conjugate(P1)*Q2 - 1.0j*conjugate(Q1)*P2)))
argl = ((1.0j*conjugate(P1)*(q1-q2) - 1.0j*conjugate(Q1)*(p1-p2)) /
(sqrt(1.0j*conjugate(Q2*P1) - 1.0j*conjugate(Q1*P2)) *
sqrt(1.0j*conjugate(P1)*Q2 - 1.0j*conjugate(Q1)*P2)))
# TODO: Better test for failure?
if self._doraise and (isnan(squeeze(argk)) or isnan(squeeze(argl))):
raise InnerProductException("Symbolic formula failed due to Q_k = Q_l and P_k = P_l.")
# The parameter j varies in the range [0, 1, ..., min(K-1,L-1)]
# hence we have that k-j can be in [K-1, K-2, ..., K-1-min(K-1,L-1)]
# and similar for l-j we have [L-1, L-2, ..., L-1-min(K-1,L-1)]
# where both K-1-min(K-1,L-1) and L-1-min(K-1,L-1) are non-negative.
self._Hk = self._evaluate_hermite(K - 1, -1.0 / eps * argk)
self._Hl = self._evaluate_hermite(L - 1, 1.0 / eps * argl)
self._pfk = ((1.0j*Q2*P1 - 1.0j*Q1*P2) ** (ik / 2.0)).reshape(K)
self._pfl = ((1.0j*conjugate(Q2*P1) - 1.0j*conjugate(Q1*P2)) ** (il / 2.0)).reshape(L)
# And the groundstate value
self._I0 = self.exact_result_ground(Pibra, Piket, eps)
def perform_quadrature(self, row, col):
r"""Evaluates the integral :math:`\langle \Phi_i | \Phi^\prime_j \rangle`
by an exact symbolic formula.
:param row: The index :math:`i` of the component :math:`\Phi_i` of :math:`\Psi`.
:param row: The index :math:`j` of the component :math:`\Phi^\prime_j` of :math:`\Psi^\prime`.
:return: A single complex floating point number.
"""
eps = self._packet.get_eps()
Pibra = self._pacbra.get_parameters(component=row)
Piket = self._packet.get_parameters(component=col)
cbra = self._pacbra.get_coefficient_vector(component=row)
cket = self._packet.get_coefficient_vector(component=col)
Kbra = self._pacbra.get_basis_shapes(component=row)
Kket = self._packet.get_basis_shapes(component=col)
self._cache_factors(Pibra[:4], Piket[:4], Kbra, Kket, eps)
result = array([[0.0j]], dtype=complexfloating)
for r in Kbra:
for c in Kket:
cr = cbra[Kbra[r], 0]
cc = cket[Kket[c], 0]
i = self.exact_result_higher(Pibra[:4], Piket[:4], eps, r[0], c[0])
result = result + conjugate(cr) * cc * i
phase = exp(1.0j / eps**2 * (Piket[4] - conjugate(Pibra[4])))
return phase * result
def perform_build_matrix(self, row, col):
r"""Computes the matrix elements :math:`\langle\Phi_i |\Phi^\prime_j\rangle`
by an exact symbolic formula.
:param row: The index :math:`i` of the component :math:`\Phi_i` of :math:`\Psi`.
:param row: The index :math:`j` of the component :math:`\Phi^\prime_j` of :math:`\Psi^\prime`.
:return: A complex valued matrix of shape :math:`|\mathfrak{K}_i| \times |\mathfrak{K}^\prime_j|`.
"""
eps = self._packet.get_eps()
Pibra = self._pacbra.get_parameters(component=row)
Piket = self._packet.get_parameters(component=col)
Kbra = self._pacbra.get_basis_shapes(component=row)
Kket = self._packet.get_basis_shapes(component=col)
self._cache_factors(Pibra[:4], Piket[:4], Kbra, Kket, eps)
M = zeros((Kbra.get_basis_size(), Kket.get_basis_size()), dtype=complexfloating)
for r in Kbra:
for c in Kket:
M[Kbra[r], Kket[c]] = self.exact_result_higher(Pibra[:4], Piket[:4], eps, r[0], c[0])
phase = exp(1.0j / eps**2 * (Piket[4] - conjugate(Pibra[4])))
return phase * M
| [
"numpy.sqrt",
"numpy.ones",
"scipy.special.binom",
"numpy.conjugate",
"numpy.squeeze",
"numpy.array",
"numpy.zeros",
"WaveBlocksND.InnerProduct.InnerProductException",
"numpy.arange"
] | [((10174, 10186), 'numpy.squeeze', 'squeeze', (['Ikl'], {}), '(Ikl)\n', (10181, 10186), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((11808, 11821), 'scipy.special.binom', 'binom', (['ik', 'ij'], {}), '(ik, ij)\n', (11813, 11821), False, 'from scipy.special import binom\n'), ((11841, 11854), 'scipy.special.binom', 'binom', (['il', 'ij'], {}), '(il, ij)\n', (11846, 11854), False, 'from scipy.special import binom\n'), ((14228, 14266), 'numpy.array', 'array', (['[[0.0j]]'], {'dtype': 'complexfloating'}), '([[0.0j]], dtype=complexfloating)\n', (14233, 14266), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((6715, 6730), 'numpy.sqrt', 'sqrt', (['(-2.0j / X)'], {}), '(-2.0j / X)\n', (6719, 6730), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((11449, 11461), 'numpy.arange', 'arange', (['makl'], {}), '(makl)\n', (11455, 11461), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12521, 12606), 'WaveBlocksND.InnerProduct.InnerProductException', 'InnerProductException', (['"""Symbolic formula failed due to Q_k = Q_l and P_k = P_l."""'], {}), "('Symbolic formula failed due to Q_k = Q_l and P_k = P_l.'\n )\n", (12542, 12606), False, 'from WaveBlocksND.InnerProduct import InnerProductException\n'), ((6668, 6681), 'numpy.conjugate', 'conjugate', (['P1'], {}), '(P1)\n', (6677, 6681), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((6689, 6702), 'numpy.conjugate', 'conjugate', (['Q1'], {}), '(Q1)\n', (6698, 6702), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((11615, 11627), 'numpy.arange', 'arange', (['mikl'], {}), '(mikl)\n', (11621, 11627), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((11685, 11694), 'numpy.arange', 'arange', (['K'], {}), '(K)\n', (11691, 11694), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((11723, 11732), 'numpy.arange', 'arange', (['L'], {}), '(L)\n', (11729, 11732), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((11761, 11773), 'numpy.arange', 'arange', (['mikl'], {}), '(mikl)\n', (11767, 11773), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12062, 12099), 'numpy.sqrt', 'sqrt', (['(1.0j * Q2 * P1 - 1.0j * Q1 * P2)'], {}), '(1.0j * Q2 * P1 - 1.0j * Q1 * P2)\n', (12066, 12099), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((3683, 3708), 'numpy.ones', 'ones', (['(1, nodes.shape[1])'], {}), '((1, nodes.shape[1]))\n', (3687, 3708), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((3738, 3764), 'numpy.zeros', 'zeros', (['(1, nodes.shape[1])'], {}), '((1, nodes.shape[1]))\n', (3743, 3764), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12462, 12475), 'numpy.squeeze', 'squeeze', (['argk'], {}), '(argk)\n', (12469, 12475), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12486, 12499), 'numpy.squeeze', 'squeeze', (['argl'], {}), '(argl)\n', (12493, 12499), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((14584, 14603), 'numpy.conjugate', 'conjugate', (['Pibra[4]'], {}), '(Pibra[4])\n', (14593, 14603), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((15755, 15774), 'numpy.conjugate', 'conjugate', (['Pibra[4]'], {}), '(Pibra[4])\n', (15764, 15774), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12188, 12201), 'numpy.conjugate', 'conjugate', (['P1'], {}), '(P1)\n', (12197, 12201), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12217, 12230), 'numpy.conjugate', 'conjugate', (['Q1'], {}), '(Q1)\n', (12226, 12230), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((9850, 9863), 'numpy.conjugate', 'conjugate', (['P1'], {}), '(P1)\n', (9859, 9863), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((9878, 9891), 'numpy.conjugate', 'conjugate', (['Q1'], {}), '(Q1)\n', (9887, 9891), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12269, 12287), 'numpy.conjugate', 'conjugate', (['(Q2 * P1)'], {}), '(Q2 * P1)\n', (12278, 12287), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12293, 12311), 'numpy.conjugate', 'conjugate', (['(Q1 * P2)'], {}), '(Q1 * P2)\n', (12302, 12311), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((13138, 13156), 'numpy.conjugate', 'conjugate', (['(Q2 * P1)'], {}), '(Q2 * P1)\n', (13147, 13156), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((13162, 13180), 'numpy.conjugate', 'conjugate', (['(Q1 * P2)'], {}), '(Q1 * P2)\n', (13171, 13180), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((14512, 14525), 'numpy.conjugate', 'conjugate', (['cr'], {}), '(cr)\n', (14521, 14525), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12121, 12134), 'numpy.conjugate', 'conjugate', (['P1'], {}), '(P1)\n', (12130, 12134), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12145, 12158), 'numpy.conjugate', 'conjugate', (['Q1'], {}), '(Q1)\n', (12154, 12158), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12340, 12353), 'numpy.conjugate', 'conjugate', (['P1'], {}), '(P1)\n', (12349, 12353), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((12364, 12377), 'numpy.conjugate', 'conjugate', (['Q1'], {}), '(Q1)\n', (12373, 12377), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((6760, 6773), 'numpy.conjugate', 'conjugate', (['Q1'], {}), '(Q1)\n', (6769, 6773), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((6792, 6805), 'numpy.conjugate', 'conjugate', (['P1'], {}), '(P1)\n', (6801, 6805), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((6892, 6905), 'numpy.conjugate', 'conjugate', (['P1'], {}), '(P1)\n', (6901, 6905), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n'), ((6914, 6927), 'numpy.conjugate', 'conjugate', (['Q1'], {}), '(Q1)\n', (6923, 6927), False, 'from numpy import array, squeeze, conjugate, sqrt, ones, zeros, complexfloating, arange, isnan\n')] |
from pyjuxa import db
def test_connect():
Session = db.connect()
_ = Session()
| [
"pyjuxa.db.connect"
] | [((58, 70), 'pyjuxa.db.connect', 'db.connect', ([], {}), '()\n', (68, 70), False, 'from pyjuxa import db\n')] |
from rps.sequence_problems.transcribing_dna_into_rna import transcribe
def test_transcribe():
"""
Checks if transcription properly replaces T with U
"""
sequence = ["GATGGAACTTGACTACGTAAATT"]
expected_rna = "GAUGGAACUUGACUACGUAAAUU"
transcribed = transcribe(sequence)
assert transcribed == expected_rna
| [
"rps.sequence_problems.transcribing_dna_into_rna.transcribe"
] | [((273, 293), 'rps.sequence_problems.transcribing_dna_into_rna.transcribe', 'transcribe', (['sequence'], {}), '(sequence)\n', (283, 293), False, 'from rps.sequence_problems.transcribing_dna_into_rna import transcribe\n')] |
# Generated by Django 3.2.4 on 2021-06-13 18:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cards', '0010_auto_20210610_0154'),
]
operations = [
migrations.AlterField(
model_name='cardinfo',
name='name',
field=models.CharField(help_text="Displayed card's name.", max_length=50),
),
migrations.AddConstraint(
model_name='cardinfo',
constraint=models.UniqueConstraint(fields=('name',), name='unique_CardInfo_name'),
),
]
| [
"django.db.models.UniqueConstraint",
"django.db.models.CharField"
] | [((333, 400), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Displayed card\'s name."""', 'max_length': '(50)'}), '(help_text="Displayed card\'s name.", max_length=50)\n', (349, 400), False, 'from django.db import migrations, models\n'), ((505, 575), 'django.db.models.UniqueConstraint', 'models.UniqueConstraint', ([], {'fields': "('name',)", 'name': '"""unique_CardInfo_name"""'}), "(fields=('name',), name='unique_CardInfo_name')\n", (528, 575), False, 'from django.db import migrations, models\n')] |
# SPDX-FileCopyrightText: 2020 2020
#
# SPDX-License-Identifier: Apache-2.0
"""
REST Builder.
"""
from __future__ import absolute_import
import collections
from splunktaucclib.rest_handler.schema import RestSchema
from .builder import (
RestBuilder,
RestBuilderError,
)
__all__ = [
"RestBuilder",
"RestBuilderError",
"RestHandlerClass",
"build",
]
__version__ = "0.0.0"
RestHandlerClass = collections.namedtuple("RestHandlerClass", ("module", "name"),)
def build(schema, handler, output_path, j2_env, post_process=None, *args, **kwargs):
"""
Build REST for Add-on.
:param schema: REST schema.
:type schema: RestSchema
:param handler: REST handler class import path:
``module.sub_module.RestHandlerClass``.
The HandlerClass must be subclass of
splunktaucclib.rest_handler.admin_external.AdminExternalHandler.
:type handler: str
:param output_path: path for output.
:param post_process:
:param args: args for post_process.
:param kwargs: kwargs for post_process.
:return:
"""
def _parse_handler(handler_path):
parts = handler_path.split(".")
if len(parts) <= 1:
raise RestBuilderError(
"Invalid handler specified. "
'It should be in form "module.sub_module.RestHandlerClass".'
)
return RestHandlerClass(module=".".join(parts[:-1]), name=parts[-1],)
builder_obj = RestBuilder(schema, _parse_handler(handler), output_path)
builder_obj.build()
if post_process is not None:
post_process(builder_obj, schema, *args, **kwargs)
return builder_obj
| [
"collections.namedtuple"
] | [((421, 483), 'collections.namedtuple', 'collections.namedtuple', (['"""RestHandlerClass"""', "('module', 'name')"], {}), "('RestHandlerClass', ('module', 'name'))\n", (443, 483), False, 'import collections\n')] |
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import pandas as pd
def filter_data(candles, table, symbol=None, start_date=None, end_date=None):
mask = (candles['table'] == table)
if symbol:
mask = mask & (candles['symbol'] == symbol)
if start_date and end_date:
mask = mask & (candles['date'] >= start_date) & (candles['date'] < end_date)
candles_to_show = candles[mask]
return candles_to_show
def show_candle(candles, size=(1400, 800)):
layout = go.Layout(
autosize=True,
width=size[0],
height=size[1],
xaxis=go.layout.XAxis(linecolor='black',
linewidth=1,
mirror=True),
xaxis2=go.layout.XAxis(linecolor='black',
linewidth=1,
mirror=True),
yaxis=go.layout.YAxis(linecolor='black',
linewidth=1,
mirror=True,
domain=[0, 0.2]),
yaxis2=go.layout.YAxis(linecolor='black',
linewidth=1,
mirror=True,
domain=[0.3, 1]),
)
fig = make_subplots(rows=2, cols=1, shared_xaxes=True)
fig.update_layout(layout, xaxis2_rangeslider_visible=False)
fig.add_trace(go.Candlestick(x=candles['date'],
open=candles['open'],
high=candles['high'],
low=candles['low'],
close=candles['close']),
row=2, col=1)
fig.update_yaxes(fixedrange=False)
# fig.update_xaxes(rangebreaks=[dict(values=compute_datetime_to_hide(start_date, end_date))])
return fig
def add_indicator(data: pd.DataFrame, fig: go.Figure, name: str, color: str = 'rgba(46, 134, 193, 0.5)') -> go.Figure:
width = 2
fig.add_trace(go.Scatter(x=data['date'],
y=data[name],
mode='lines',
name=name,
line=dict(color=color, width=width)
),
row=2, col=1
)
return fig
| [
"plotly.graph_objects.layout.XAxis",
"plotly.graph_objects.Candlestick",
"plotly.subplots.make_subplots",
"plotly.graph_objects.layout.YAxis"
] | [((1265, 1313), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'shared_xaxes': '(True)'}), '(rows=2, cols=1, shared_xaxes=True)\n', (1278, 1313), False, 'from plotly.subplots import make_subplots\n'), ((1396, 1522), 'plotly.graph_objects.Candlestick', 'go.Candlestick', ([], {'x': "candles['date']", 'open': "candles['open']", 'high': "candles['high']", 'low': "candles['low']", 'close': "candles['close']"}), "(x=candles['date'], open=candles['open'], high=candles['high'\n ], low=candles['low'], close=candles['close'])\n", (1410, 1522), True, 'import plotly.graph_objects as go\n'), ((616, 676), 'plotly.graph_objects.layout.XAxis', 'go.layout.XAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, mirror=True)\n", (631, 676), True, 'import plotly.graph_objects as go\n'), ((753, 813), 'plotly.graph_objects.layout.XAxis', 'go.layout.XAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'mirror': '(True)'}), "(linecolor='black', linewidth=1, mirror=True)\n", (768, 813), True, 'import plotly.graph_objects as go\n'), ((891, 968), 'plotly.graph_objects.layout.YAxis', 'go.layout.YAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'mirror': '(True)', 'domain': '[0, 0.2]'}), "(linecolor='black', linewidth=1, mirror=True, domain=[0, 0.2])\n", (906, 968), True, 'import plotly.graph_objects as go\n'), ((1075, 1152), 'plotly.graph_objects.layout.YAxis', 'go.layout.YAxis', ([], {'linecolor': '"""black"""', 'linewidth': '(1)', 'mirror': '(True)', 'domain': '[0.3, 1]'}), "(linecolor='black', linewidth=1, mirror=True, domain=[0.3, 1])\n", (1090, 1152), True, 'import plotly.graph_objects as go\n')] |
import django.conf
import importlib
from voxel_globe.vip.celery import app
class IngestClass(object):
def __init__(self, ingest_data, description=''):
self.ingest=ingest_data
self.description=description
#key: [Friendly name, moduleName]
#Module name should not include tasks, but it is assume that tasks.ingest_data is used
#I'm sure this will be updated at a later time to have api data in the module rather than here
#SENSOR_TYPES = {'arducopter':'Arducopter',
# 'jpg_exif':'JPEG with EXIF tags'}
#to be used in conjunction with importlib
def preload_tasks():
''' Load up app.tasks
app.tasks isn't populated until all the .tasks modules are loaded. Short
of finding the RIGHT way to load all the tasks specified in Django, I
just load them all myself here.
I APPARENTLY shouldn't be doing this... Investigate new registry method
Maybe ONLY load voxel_globe.ingest.*.tasks??? I wouldn't mind that much'''
for tasks in django.conf.settings.INSTALLED_APPS:
try:
importlib.import_module(tasks+'.tasks')
except (ImportError):
pass
class GetTypes(object):
def __init__(self, typetype):
self.typetype = typetype
self.types = {}
def get_types(self):
if not self.types:
preload_tasks()
for _, task in app.tasks.iteritems():
try:
if getattr(task, self.typetype+'_ingest'):
self.types[task.dbname] = IngestClass(task, task.description)
except AttributeError:
pass
return self.types
PayloadTypes = GetTypes('payload')
MetadataTypes = GetTypes('metadata')
ControlPointTypes = GetTypes('controlpoint')
| [
"voxel_globe.vip.celery.app.tasks.iteritems",
"importlib.import_module"
] | [((1040, 1081), 'importlib.import_module', 'importlib.import_module', (["(tasks + '.tasks')"], {}), "(tasks + '.tasks')\n", (1063, 1081), False, 'import importlib\n'), ((1313, 1334), 'voxel_globe.vip.celery.app.tasks.iteritems', 'app.tasks.iteritems', ([], {}), '()\n', (1332, 1334), False, 'from voxel_globe.vip.celery import app\n')] |
from typing import Dict, List, Callable, Tuple
from enum import Enum
import math
import random
import pygame
from pygame.locals import Rect
from pygame.sprite import Sprite
from stage import Stage
from screen import Screen, BaseScreen
from sprites import HoverRect, PressRect, SimpleSprite, TextSprite, make_outline_splites, load_animation_sprite
from sprites import RichSprite, layout_rects
from component import CounterBtn
from component import make_counter_btn
from component import ValuesGroup
from game_config import GameConfig
class StageSelectScreen2(BaseScreen):
def __init__(self, game_config: GameConfig, gamesetting):
super().__init__()
# game_configから必要な情報を取り出す
self.stages = game_config.stages
self.gamesetting = gamesetting
self.load_images(game_config)
self.load_sounds(game_config)
pygame.display.set_caption("ステージセレクト")
self.hover_rects = {}
self.press_rects = {}
self.selected_stage = None
self.stock = 3
self.font_size_stage_title = 50
self.font_stage_title = pygame.font.SysFont(None, self.font_size_stage_title)
self.font_size_stock = 80
self.font_stock = pygame.font.SysFont(None, self.font_size_stock)
self.font_size = 40
self.font = pygame.font.SysFont(None, self.font_size)
self.stock_counter = None
self.init()
def _split_area(self):
""" 表示領域を決定し,Rectとして保持する.
"""
rect = self.display.get_rect()
# 領域分割
self.stage_view_rect = Rect(
rect.x,
rect.y,
int(rect.width * 0.4),
int(rect.height * 0.75)
)
self.stage_select_rect = Rect(
self.stage_view_rect.right,
self.stage_view_rect.y,
rect.width - self.stage_view_rect.width,
self.stage_view_rect.height
)
self.stock_rect = Rect(
self.stage_view_rect.x,
self.stage_view_rect.bottom,
rect.width,
rect.height - self.stage_view_rect.height
)
self.area = [self.stage_view_rect, self.stage_select_rect, self.stock_rect]
def _set_stage_thumbnail_sprites(self):
""" サムネイルを正方形のタイルにしてself.stage_select_rect上に並べる
"""
cols = 3
rows = math.ceil(len(self.stages) / cols)
padding = 10
tile_margin = 10
rect = self.stage_select_rect
rect = Rect(rect.x + padding, rect.y + padding, rect.w - padding * 2, rect.h - padding * 2)
tile_width = min((rect.w - (cols - 1) * tile_margin) // cols, (rect.h - (rows - 1) * tile_margin) // rows)
tile_height = tile_width
left = rect.x + (rect.w - (tile_width * cols + tile_margin * (cols - 1))) // 2
top = rect.y + (rect.h - (tile_height * rows + tile_margin * (rows - 1))) // 2
thumbnail_sprites = []
stages = list(self.stages.values())
for i in range(rows):
for j in range(cols):
tmp = i * rows + j
if tmp >= len(stages):
break
stage = stages[tmp]
x = left + j * (tile_width + tile_margin)
y = top + i * (tile_height + tile_margin)
thumbnail_rect = stage.thumbnail_rect(width=tile_width, height=tile_height)
image = stage.image.subsurface(thumbnail_rect)
sprite = SimpleSprite(Rect(x, y, tile_width, tile_height), image)
self._add_hover_rect(sprite, self._visible_outlines, self._invisible_outlines)
self._add_press_rect(sprite, self._select_stage, stage)
thumbnail_sprites.append(sprite)
self.thumbnail_sprites = thumbnail_sprites
self.front_sprites.add(self.thumbnail_sprites)
def _set_stock_counter(self):
"""ストックのカウンターを設置
"""
counter_btn = CounterBtn(
x=self.stock_rect.centerx,
y=self.stock_rect.centery,
min_=1,
max_=5,
font=self.font_stock,
front_group=self.front_sprites,
back_group=self.middle_sprites,
color=(0, 0, 0),
bgcolor=None,
sound=self.click_sound,
)
self.stock_counter = counter_btn
rect = counter_btn.rect
stock_label = TextSprite(
x=rect.right + 5,
y=rect.top,
text="stock",
font=self.font_stock,
color=(0, 0, 0),
align="left",
vertical_align="top",
)
self.middle_sprites.add(stock_label)
def _set_back_btn(self):
"""戻るボタン設置
"""
rect = self.display.get_rect()
back_btn = TextSprite(
x=rect.x + 5,
y=rect.y + 5,
text="<back",
font=self.font,
color=(0, 0, 0),
align="left",
vertical_align="top",
)
self.front_sprites.add(back_btn)
self._add_hover_rect(back_btn, self._visible_outlines, self._invisible_outlines)
self._add_press_rect(back_btn, self._go_back_character_select, None)
def _set_next_btn(self):
"""進むボタン設置
"""
rect = self.display.get_rect()
next_btn = TextSprite(
x=rect.right - 5,
y=rect.y + 5,
text="next>",
font=self.font,
color=(0, 0, 0),
align="right",
vertical_align="top",
)
self.front_sprites.add(next_btn)
self._add_hover_rect(next_btn, self._visible_outlines, self._invisible_outlines)
self._add_press_rect(next_btn, self._go_to_game, None)
def _get_stage_big_thumbnails(self):
""" self.stage_view_rect に合わせた表示のための画像の辞書を作成する
"""
padding = 10
rect = self.stage_view_rect
w = rect
image_rect = Rect(rect.x + padding, rect.y + padding, rect.w - padding * 2, rect.h - padding * 2)
text_rect = Rect(
image_rect.x,
image_rect.bottom - self.font_size_stage_title - 5,
image_rect.w,
self.font_size_stage_title + 5,
)
self.stage_view_sprites = {}
self.stage_name_sprites = {}
for id_, stage in self.stages.items():
thumbnail_rect = stage.thumbnail_rect(width=image_rect.w, height=image_rect.h)
image = stage.image.subsurface(thumbnail_rect)
stage_view_sprite = SimpleSprite(image_rect, image)
stage_name_sprite = TextSprite(
x=text_rect.centerx,
y=text_rect.centery,
text=stage.name,
font=self.font_stage_title,
color=(0, 0, 0),
bgcolor=(255, 255, 255),
align="center",
vertical_align="middle"
)
self.stage_view_sprites[stage] = stage_view_sprite
self.stage_name_sprites[stage] = stage_name_sprite
# デフォルトのステージ(先頭)を選択しておく
self._update_stage(list(self.stages.values())[0])
def _load_background(self):
"""背景画像を読み込む (self.background_sprites に追加する)
"""
bg_image = self.bg_image
bg_sprite = SimpleSprite(bg_image.get_rect(), bg_image)
self.background_sprites.add(bg_sprite)
def load_sounds(self, game_config: GameConfig):
"""サウンドを読み込む
"""
self.bgm_sound = game_config.sounds["menu"]
self.click_sound = game_config.sounds["click"]
self.bgm_sound.set_volume(0.3)
self.stage_sounds = {}
def load_images(self, game_config: GameConfig):
"""Surfaceを読み込む
"""
self.outline_image = game_config.components["outline"]
self.bg_image = game_config.components["background"]
def init(self):
"""初期化関数. 描画するスプライトグループを空にしてからいろいろ配置する
"""
self.empty_all_sprites()
self._split_area()
self._set_stage_thumbnail_sprites()
self._set_stock_counter()
self._set_back_btn()
self._set_next_btn()
self._get_stage_big_thumbnails()
self._load_background()
def _add_hover_rect(self, sprite: Sprite, enter_fnc: Callable, exit_fnc: Callable):
rect = sprite.rect
hover_rect = HoverRect(rect, enter_fnc, exit_fnc)
outline_sprites = make_outline_splites(rect, self.outline_image, border_width=3)
self.hover_rects[hover_rect] = outline_sprites
def _visible_outlines(self, hover_rect: HoverRect):
""" hover_rect に対応した OutlineSprite を見えるようにする (self.middle_spritesに追加)
"""
outline_sprites = self.hover_rects[hover_rect]
self.middle_sprites.add(outline_sprites)
def _invisible_outlines(self, hover_rect: HoverRect):
""" hover_rect に対応した OutlineSprite を見えないようにする (self.middle_spritesから削除)
"""
outline_sprites = self.hover_rects[hover_rect]
self.middle_sprites.remove(outline_sprites)
def _go_to_game(self, *args):
"""ゲーム画面に進む
"""
print("go to game screen")
print("stage: {}, stock: {}".format(self.selected_stage.name, self.stock))
self.next_screen = Screen.GAME
self.run = False
def _go_back_character_select(self, *args):
"""キャラクター選択画面に戻る
"""
print("go back to character select screen")
self.next_screen = Screen.CHARACTER_SELECT
self.run = False
def _add_press_rect(self, sprite: Sprite, fnc: Callable, value):
""" press_rect を self.press_rects に登録する
"""
rect = sprite.rect
press_rect = PressRect(rect, fnc)
self.press_rects[press_rect] = value
def _update_stage(self, new_stage: Stage):
""" self.selected_stage を new_stage に置き換える
"""
if new_stage is not self.selected_stage:
if self.selected_stage is not None:
# 前の選択ステージを解除
stage_name_sprite = self.stage_name_sprites[self.selected_stage]
if self.front_sprites.has(stage_name_sprite):
self.front_sprites.remove(stage_name_sprite)
stage_view_sprite = self.stage_view_sprites[self.selected_stage]
if self.middle_sprites.has(stage_view_sprite):
self.middle_sprites.remove(stage_view_sprite)
# 選択ステージに更新
self.selected_stage = new_stage
stage_name_sprite = self.stage_name_sprites[self.selected_stage]
if not self.front_sprites.has(stage_name_sprite):
self.front_sprites.add(stage_name_sprite)
stage_view_sprite = self.stage_view_sprites[self.selected_stage]
if not self.middle_sprites.has(stage_view_sprite):
self.middle_sprites.add(stage_view_sprite)
def _select_stage(self, press_rect: PressRect):
""" press_rect が呼び出す関数.self.selected_stage を更新する
"""
self.click_sound.play()
stage = self.press_rects[press_rect]
self._update_stage(stage)
def main(self):
self.bgm_sound.play(loops=-1)
while self.run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.next_screen = Screen.QUIT
self.run = False
# hover_rectの更新
for hover_rect in self.hover_rects:
hover_rect.update()
# press_rectの更新
for press_rect in self.press_rects:
press_rect.update()
# componentsの更新
self.stock_counter.update()
self.stock = self.stock_counter.count
self.update()
self.draw()
# # press_rect の領域表示
# for press_rect in self.press_rects:
# pygame.draw.rect(self.display, (0, 0, 255), press_rect.rect, width=2)
pygame.display.update()
self.bgm_sound.stop()
self.click_sound.stop()
class StageSelectScreen(BaseScreen):
def __init__(self, game_config: GameConfig, gamesetting):
super().__init__()
# game_configから必要な情報を取り出す
self.stages = game_config.stages
self.gamesetting = gamesetting
self.load_images(game_config)
self.load_sounds(game_config)
pygame.display.set_caption("ステージセレクト")
self.selected_stage = None
self.stock = 3
self.btn_font = pygame.font.SysFont(None, 30)
self.stock_font = pygame.font.SysFont(None, 60)
self.init()
def init(self):
self._split_area()
self._set_bg()
self._set_next_back_btn()
self._set_stages()
self._set_stage_view_thumbnails()
self._set_stock_btn()
self._select_stage(list(self.stages.values())[0])
def load_sounds(self, game_config: GameConfig):
"""サウンドを読み込む
"""
self.bgm_sound = game_config.sounds["menu"]
self.click_sound = game_config.sounds["click"]
self.stage_sounds = {}
def load_images(self, game_config: GameConfig):
"""Surfaceを読み込む
"""
self.outline_image = game_config.components["outline"]
self.bg_image = game_config.components["background"]
def _split_area(self):
""" 表示領域を決定し,Rectとして保持する.
"""
rect = self.display.get_rect()
# 領域分割
self.stage_view_rect = Rect(
rect.x,
rect.y,
int(rect.width * 0.4),
int(rect.height * 0.75)
)
self.stage_select_rect = Rect(
self.stage_view_rect.right,
self.stage_view_rect.y,
rect.width - self.stage_view_rect.width,
self.stage_view_rect.height
)
self.stock_rect = Rect(
self.stage_view_rect.x,
self.stage_view_rect.bottom,
rect.width,
rect.height - self.stage_view_rect.height
)
self.area = [self.stage_view_rect, self.stage_select_rect, self.stock_rect]
def _set_bg(self):
bg_sprite = SimpleSprite(self.display.get_rect(), self.bg_image)
self.background_sprites.add(bg_sprite)
def _set_next_back_btn(self):
"""next, backボタンの設置
"""
rect = self.display.get_rect()
tuples = [
("next", "right", rect.w - 5, 5, Screen.GAME),
("back", "left", 5, 5, Screen.CHARACTER_SELECT)
]
for text, align, x, y, next_screen in tuples:
btn_surface = self.btn_font.render(text, True, (0, 0, 0))
btn_sprite = RichSprite(
x=x,
y=y,
align=align,
vertical_align="top",
image=btn_surface,
press_fnc=self._go_to_screen,
press_fnc_args=(next_screen,)
)
self.hoverable(btn_sprite, self.outline_image, border_width=3)
self.front_sprites.add(btn_sprite)
def _go_to_screen(self, next_screen: Screen):
"""画面遷移する
"""
print("go to", next_screen)
print("stage: {}, stock: {}".format(self.selected_stage, self.stock_btn.get_value()))
# gamesettingに選択したステージ,ストック数を反映する
if self.gamesetting is not None:
self.gamesetting.stage = self.selected_stage
self.gamesetting.stock = self.stock_btn.get_value()
self.next_screen = next_screen
self.run = False
def _select_stage(self, stage: Stage):
"""ステージを選んだ際の処理
"""
if self.selected_stage is not None:
# 前選んでいたスプライトを所属グループから削除
pre_view_sprite = self.stage_view_sprites[self.selected_stage]
pre_view_sprite.remove(pre_view_sprite.groups())
# 選択ステージを更新
self.selected_stage = stage
new_view_sprite = self.stage_view_sprites[self.selected_stage]
self.middle_sprites.add(new_view_sprite)
def _set_stages(self):
"""画面右側にステージのサムネイルをタイル表示する
"""
base_rect = self.stage_select_rect
rects = layout_rects(base_rect=base_rect, item_width=50, item_height=50, padding=30)
stages = list(self.stages.values()) * 6
for rect, stage in zip(rects, stages):
image = stage.thumbnail_image(*rect.size)
sprite = RichSprite(
x=rect.centerx,
y=rect.centery,
image=image,
press_fnc=self._select_stage,
press_fnc_args=(stage,)
)
self.hoverable(sprite, self.outline_image)
self.middle_sprites.add(sprite)
def _set_stage_view_thumbnails(self):
"""画面左側に選択したステージのサムネイルを表示する
"""
rect = self.stage_view_rect
w = h = min(int(rect.w * 0.7), int(rect.h * 0.7))
rect = layout_rects(self.stage_view_rect, cols=1, rows=1, item_width=w, item_height=h)[0]
# 左側用の画像の辞書を作成
self.stage_view_sprites = {
stage: RichSprite(*rect.center, image=stage.thumbnail_image(rect.w, rect.h))
for stage in self.stages.values()
}
def _set_stock_btn(self):
rect = self.display.get_rect()
x = rect.centerx
y = rect.h * 2 // 3
rect = Rect(0, 0, 300, 50)
rect.center = (x, y)
self.stock_btn = ValuesGroup(rect, list(range(1, 6)), color=(0, 0, 0), bg_color=(255, 255, 255), defalut_i=2)
self.middle_sprites.add(self.stock_btn)
# counter_btn, get_count_fnc = make_counter_btn(x, y, self.stock_font, min_=1, max_=5)
# self.get_count_fnc = get_count_fnc
# self.front_sprites.add(counter_btn)
# print("set counter")
def main(self):
while self.run:
self.get_events()
self.clock.tick(self.fps)
self.update()
self.draw()
# print(self.front_sprites)
pygame.display.update()
def get_sample_stages(json_path="./jsons/stage.json"):
""" json stages の サンプルを読み込む
"""
import glob, json
stages = {}
with open(json_path, "r") as f:
json_data = json.load(f)
stages = {key: Stage(key, dic["name"], pygame.image.load(dic["path"])) for key, dic in json_data.items()}
return stages
if __name__ == "__main__":
pygame.init()
pygame.display.set_mode((500, 500))
game_config = GameConfig("./jsons/config.json")
# exit()
# stages = get_sample_stages(json_path="./jsons/stages2.json")
gamesetting = None
stage_select_screen = StageSelectScreen(game_config, gamesetting)
stage_select_screen.main() | [
"pygame.locals.Rect",
"pygame.init",
"sprites.make_outline_splites",
"pygame.event.get",
"pygame.display.set_mode",
"sprites.SimpleSprite",
"sprites.PressRect",
"game_config.GameConfig",
"component.CounterBtn",
"sprites.TextSprite",
"pygame.image.load",
"sprites.HoverRect",
"pygame.display.set_caption",
"json.load",
"sprites.RichSprite",
"pygame.display.update",
"pygame.font.SysFont",
"sprites.layout_rects"
] | [((18394, 18407), 'pygame.init', 'pygame.init', ([], {}), '()\n', (18405, 18407), False, 'import pygame\n'), ((18412, 18447), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(500, 500)'], {}), '((500, 500))\n', (18435, 18447), False, 'import pygame\n'), ((18467, 18500), 'game_config.GameConfig', 'GameConfig', (['"""./jsons/config.json"""'], {}), "('./jsons/config.json')\n", (18477, 18500), False, 'from game_config import GameConfig\n'), ((863, 901), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""ステージセレクト"""'], {}), "('ステージセレクト')\n", (889, 901), False, 'import pygame\n'), ((1093, 1146), 'pygame.font.SysFont', 'pygame.font.SysFont', (['None', 'self.font_size_stage_title'], {}), '(None, self.font_size_stage_title)\n', (1112, 1146), False, 'import pygame\n'), ((1207, 1254), 'pygame.font.SysFont', 'pygame.font.SysFont', (['None', 'self.font_size_stock'], {}), '(None, self.font_size_stock)\n', (1226, 1254), False, 'import pygame\n'), ((1303, 1344), 'pygame.font.SysFont', 'pygame.font.SysFont', (['None', 'self.font_size'], {}), '(None, self.font_size)\n', (1322, 1344), False, 'import pygame\n'), ((1723, 1854), 'pygame.locals.Rect', 'Rect', (['self.stage_view_rect.right', 'self.stage_view_rect.y', '(rect.width - self.stage_view_rect.width)', 'self.stage_view_rect.height'], {}), '(self.stage_view_rect.right, self.stage_view_rect.y, rect.width - self.\n stage_view_rect.width, self.stage_view_rect.height)\n', (1727, 1854), False, 'from pygame.locals import Rect\n'), ((1934, 2051), 'pygame.locals.Rect', 'Rect', (['self.stage_view_rect.x', 'self.stage_view_rect.bottom', 'rect.width', '(rect.height - self.stage_view_rect.height)'], {}), '(self.stage_view_rect.x, self.stage_view_rect.bottom, rect.width, rect.\n height - self.stage_view_rect.height)\n', (1938, 2051), False, 'from pygame.locals import Rect\n'), ((2468, 2557), 'pygame.locals.Rect', 'Rect', (['(rect.x + padding)', '(rect.y + padding)', '(rect.w - padding * 2)', '(rect.h - padding * 2)'], {}), '(rect.x + padding, rect.y + padding, rect.w - padding * 2, rect.h - \n padding * 2)\n', (2472, 2557), False, 'from pygame.locals import Rect\n'), ((3946, 4180), 'component.CounterBtn', 'CounterBtn', ([], {'x': 'self.stock_rect.centerx', 'y': 'self.stock_rect.centery', 'min_': '(1)', 'max_': '(5)', 'font': 'self.font_stock', 'front_group': 'self.front_sprites', 'back_group': 'self.middle_sprites', 'color': '(0, 0, 0)', 'bgcolor': 'None', 'sound': 'self.click_sound'}), '(x=self.stock_rect.centerx, y=self.stock_rect.centery, min_=1,\n max_=5, font=self.font_stock, front_group=self.front_sprites,\n back_group=self.middle_sprites, color=(0, 0, 0), bgcolor=None, sound=\n self.click_sound)\n', (3956, 4180), False, 'from component import CounterBtn\n'), ((4394, 4527), 'sprites.TextSprite', 'TextSprite', ([], {'x': '(rect.right + 5)', 'y': 'rect.top', 'text': '"""stock"""', 'font': 'self.font_stock', 'color': '(0, 0, 0)', 'align': '"""left"""', 'vertical_align': '"""top"""'}), "(x=rect.right + 5, y=rect.top, text='stock', font=self.font_stock,\n color=(0, 0, 0), align='left', vertical_align='top')\n", (4404, 4527), False, 'from sprites import HoverRect, PressRect, SimpleSprite, TextSprite, make_outline_splites, load_animation_sprite\n'), ((4783, 4909), 'sprites.TextSprite', 'TextSprite', ([], {'x': '(rect.x + 5)', 'y': '(rect.y + 5)', 'text': '"""<back"""', 'font': 'self.font', 'color': '(0, 0, 0)', 'align': '"""left"""', 'vertical_align': '"""top"""'}), "(x=rect.x + 5, y=rect.y + 5, text='<back', font=self.font, color=\n (0, 0, 0), align='left', vertical_align='top')\n", (4793, 4909), False, 'from sprites import HoverRect, PressRect, SimpleSprite, TextSprite, make_outline_splites, load_animation_sprite\n'), ((5339, 5469), 'sprites.TextSprite', 'TextSprite', ([], {'x': '(rect.right - 5)', 'y': '(rect.y + 5)', 'text': '"""next>"""', 'font': 'self.font', 'color': '(0, 0, 0)', 'align': '"""right"""', 'vertical_align': '"""top"""'}), "(x=rect.right - 5, y=rect.y + 5, text='next>', font=self.font,\n color=(0, 0, 0), align='right', vertical_align='top')\n", (5349, 5469), False, 'from sprites import HoverRect, PressRect, SimpleSprite, TextSprite, make_outline_splites, load_animation_sprite\n'), ((5967, 6056), 'pygame.locals.Rect', 'Rect', (['(rect.x + padding)', '(rect.y + padding)', '(rect.w - padding * 2)', '(rect.h - padding * 2)'], {}), '(rect.x + padding, rect.y + padding, rect.w - padding * 2, rect.h - \n padding * 2)\n', (5971, 6056), False, 'from pygame.locals import Rect\n'), ((6072, 6192), 'pygame.locals.Rect', 'Rect', (['image_rect.x', '(image_rect.bottom - self.font_size_stage_title - 5)', 'image_rect.w', '(self.font_size_stage_title + 5)'], {}), '(image_rect.x, image_rect.bottom - self.font_size_stage_title - 5,\n image_rect.w, self.font_size_stage_title + 5)\n', (6076, 6192), False, 'from pygame.locals import Rect\n'), ((8363, 8399), 'sprites.HoverRect', 'HoverRect', (['rect', 'enter_fnc', 'exit_fnc'], {}), '(rect, enter_fnc, exit_fnc)\n', (8372, 8399), False, 'from sprites import HoverRect, PressRect, SimpleSprite, TextSprite, make_outline_splites, load_animation_sprite\n'), ((8426, 8488), 'sprites.make_outline_splites', 'make_outline_splites', (['rect', 'self.outline_image'], {'border_width': '(3)'}), '(rect, self.outline_image, border_width=3)\n', (8446, 8488), False, 'from sprites import HoverRect, PressRect, SimpleSprite, TextSprite, make_outline_splites, load_animation_sprite\n'), ((9707, 9727), 'sprites.PressRect', 'PressRect', (['rect', 'fnc'], {}), '(rect, fnc)\n', (9716, 9727), False, 'from sprites import HoverRect, PressRect, SimpleSprite, TextSprite, make_outline_splites, load_animation_sprite\n'), ((12414, 12452), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""ステージセレクト"""'], {}), "('ステージセレクト')\n", (12440, 12452), False, 'import pygame\n'), ((12537, 12566), 'pygame.font.SysFont', 'pygame.font.SysFont', (['None', '(30)'], {}), '(None, 30)\n', (12556, 12566), False, 'import pygame\n'), ((12593, 12622), 'pygame.font.SysFont', 'pygame.font.SysFont', (['None', '(60)'], {}), '(None, 60)\n', (12612, 12622), False, 'import pygame\n'), ((13679, 13810), 'pygame.locals.Rect', 'Rect', (['self.stage_view_rect.right', 'self.stage_view_rect.y', '(rect.width - self.stage_view_rect.width)', 'self.stage_view_rect.height'], {}), '(self.stage_view_rect.right, self.stage_view_rect.y, rect.width - self.\n stage_view_rect.width, self.stage_view_rect.height)\n', (13683, 13810), False, 'from pygame.locals import Rect\n'), ((13890, 14007), 'pygame.locals.Rect', 'Rect', (['self.stage_view_rect.x', 'self.stage_view_rect.bottom', 'rect.width', '(rect.height - self.stage_view_rect.height)'], {}), '(self.stage_view_rect.x, self.stage_view_rect.bottom, rect.width, rect.\n height - self.stage_view_rect.height)\n', (13894, 14007), False, 'from pygame.locals import Rect\n'), ((16170, 16246), 'sprites.layout_rects', 'layout_rects', ([], {'base_rect': 'base_rect', 'item_width': '(50)', 'item_height': '(50)', 'padding': '(30)'}), '(base_rect=base_rect, item_width=50, item_height=50, padding=30)\n', (16182, 16246), False, 'from sprites import RichSprite, layout_rects\n'), ((17350, 17369), 'pygame.locals.Rect', 'Rect', (['(0)', '(0)', '(300)', '(50)'], {}), '(0, 0, 300, 50)\n', (17354, 17369), False, 'from pygame.locals import Rect\n'), ((18214, 18226), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18223, 18226), False, 'import glob, json\n'), ((6551, 6582), 'sprites.SimpleSprite', 'SimpleSprite', (['image_rect', 'image'], {}), '(image_rect, image)\n', (6563, 6582), False, 'from sprites import HoverRect, PressRect, SimpleSprite, TextSprite, make_outline_splites, load_animation_sprite\n'), ((6615, 6805), 'sprites.TextSprite', 'TextSprite', ([], {'x': 'text_rect.centerx', 'y': 'text_rect.centery', 'text': 'stage.name', 'font': 'self.font_stage_title', 'color': '(0, 0, 0)', 'bgcolor': '(255, 255, 255)', 'align': '"""center"""', 'vertical_align': '"""middle"""'}), "(x=text_rect.centerx, y=text_rect.centery, text=stage.name, font=\n self.font_stage_title, color=(0, 0, 0), bgcolor=(255, 255, 255), align=\n 'center', vertical_align='middle')\n", (6625, 6805), False, 'from sprites import HoverRect, PressRect, SimpleSprite, TextSprite, make_outline_splites, load_animation_sprite\n'), ((11245, 11263), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (11261, 11263), False, 'import pygame\n'), ((12001, 12024), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (12022, 12024), False, 'import pygame\n'), ((14700, 14839), 'sprites.RichSprite', 'RichSprite', ([], {'x': 'x', 'y': 'y', 'align': 'align', 'vertical_align': '"""top"""', 'image': 'btn_surface', 'press_fnc': 'self._go_to_screen', 'press_fnc_args': '(next_screen,)'}), "(x=x, y=y, align=align, vertical_align='top', image=btn_surface,\n press_fnc=self._go_to_screen, press_fnc_args=(next_screen,))\n", (14710, 14839), False, 'from sprites import RichSprite, layout_rects\n'), ((16417, 16532), 'sprites.RichSprite', 'RichSprite', ([], {'x': 'rect.centerx', 'y': 'rect.centery', 'image': 'image', 'press_fnc': 'self._select_stage', 'press_fnc_args': '(stage,)'}), '(x=rect.centerx, y=rect.centery, image=image, press_fnc=self.\n _select_stage, press_fnc_args=(stage,))\n', (16427, 16532), False, 'from sprites import RichSprite, layout_rects\n'), ((16925, 17004), 'sprites.layout_rects', 'layout_rects', (['self.stage_view_rect'], {'cols': '(1)', 'rows': '(1)', 'item_width': 'w', 'item_height': 'h'}), '(self.stage_view_rect, cols=1, rows=1, item_width=w, item_height=h)\n', (16937, 17004), False, 'from sprites import RichSprite, layout_rects\n'), ((17997, 18020), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (18018, 18020), False, 'import pygame\n'), ((18275, 18305), 'pygame.image.load', 'pygame.image.load', (["dic['path']"], {}), "(dic['path'])\n", (18292, 18305), False, 'import pygame\n'), ((3486, 3521), 'pygame.locals.Rect', 'Rect', (['x', 'y', 'tile_width', 'tile_height'], {}), '(x, y, tile_width, tile_height)\n', (3490, 3521), False, 'from pygame.locals import Rect\n')] |
from django.test import RequestFactory
from olympia.accounts.templatetags import jinja_helpers
def test_login_link():
request = RequestFactory().get('/en-US/firefox/addons')
assert jinja_helpers.login_link({'request': request}) == (
'http://testserver/api/v5/accounts/login/start/'
'?to=%2Fen-US%2Ffirefox%2Faddons'
)
request = RequestFactory().get('/en-US/firefox/addons?blah=1')
assert jinja_helpers.login_link({'request': request}) == (
'http://testserver/api/v5/accounts/login/start/'
'?to=%2Fen-US%2Ffirefox%2Faddons%3Fblah%3D1'
)
request = RequestFactory().get('/en-US/firefox/addons?blah=1&bâr=2')
assert jinja_helpers.login_link({'request': request}) == (
'http://testserver/api/v5/accounts/login/start/'
'?to=%2Fen-US%2Ffirefox%2Faddons%3Fblah%3D1%26b%25C3%25A2r%3D2'
)
| [
"django.test.RequestFactory",
"olympia.accounts.templatetags.jinja_helpers.login_link"
] | [((192, 238), 'olympia.accounts.templatetags.jinja_helpers.login_link', 'jinja_helpers.login_link', (["{'request': request}"], {}), "({'request': request})\n", (216, 238), False, 'from olympia.accounts.templatetags import jinja_helpers\n'), ((428, 474), 'olympia.accounts.templatetags.jinja_helpers.login_link', 'jinja_helpers.login_link', (["{'request': request}"], {}), "({'request': request})\n", (452, 474), False, 'from olympia.accounts.templatetags import jinja_helpers\n'), ((681, 727), 'olympia.accounts.templatetags.jinja_helpers.login_link', 'jinja_helpers.login_link', (["{'request': request}"], {}), "({'request': request})\n", (705, 727), False, 'from olympia.accounts.templatetags import jinja_helpers\n'), ((135, 151), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (149, 151), False, 'from django.test import RequestFactory\n'), ((364, 380), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (378, 380), False, 'from django.test import RequestFactory\n'), ((611, 627), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (625, 627), False, 'from django.test import RequestFactory\n')] |
from django.contrib import admin
# Register your models here.
from .models import Dmis_countries,Dmis_disasters, Dmis_field_report, Dmis_numericalreport, Dmis_response_tools, surge_heops_deployments
admin.site.register(Dmis_countries)
admin.site.register(Dmis_disasters)
admin.site.register(Dmis_field_report)
admin.site.register(Dmis_numericalreport)
admin.site.register(Dmis_response_tools)
admin.site.register(surge_heops_deployments) | [
"django.contrib.admin.site.register"
] | [((203, 238), 'django.contrib.admin.site.register', 'admin.site.register', (['Dmis_countries'], {}), '(Dmis_countries)\n', (222, 238), False, 'from django.contrib import admin\n'), ((239, 274), 'django.contrib.admin.site.register', 'admin.site.register', (['Dmis_disasters'], {}), '(Dmis_disasters)\n', (258, 274), False, 'from django.contrib import admin\n'), ((275, 313), 'django.contrib.admin.site.register', 'admin.site.register', (['Dmis_field_report'], {}), '(Dmis_field_report)\n', (294, 313), False, 'from django.contrib import admin\n'), ((314, 355), 'django.contrib.admin.site.register', 'admin.site.register', (['Dmis_numericalreport'], {}), '(Dmis_numericalreport)\n', (333, 355), False, 'from django.contrib import admin\n'), ((356, 396), 'django.contrib.admin.site.register', 'admin.site.register', (['Dmis_response_tools'], {}), '(Dmis_response_tools)\n', (375, 396), False, 'from django.contrib import admin\n'), ((397, 441), 'django.contrib.admin.site.register', 'admin.site.register', (['surge_heops_deployments'], {}), '(surge_heops_deployments)\n', (416, 441), False, 'from django.contrib import admin\n')] |
"""Delete table_name unique constraint in mysql
Revision ID: 18532d70ab98
Revises: 3fbbc6e8d654
Create Date: 2020-09-25 10:56:13.711182
"""
# revision identifiers, used by Alembic.
revision = "18532d70ab98"
down_revision = "3fbbc6e8d654"
from alembic import op
from sqlalchemy.dialects.mysql.base import MySQLDialect
from sqlalchemy.engine.reflection import Inspector
from rabbitai.utils.core import generic_find_uq_constraint_name
def upgrade():
bind = op.get_bind()
# Uniqueness constraint if present only exists in MySQL.
if isinstance(bind.dialect, MySQLDialect):
constraint_name = generic_find_uq_constraint_name(
"tables", {"table_name"}, Inspector.from_engine(bind)
)
if constraint_name:
op.drop_constraint(constraint_name, "tables", type_="unique")
def downgrade():
pass
| [
"alembic.op.drop_constraint",
"alembic.op.get_bind",
"sqlalchemy.engine.reflection.Inspector.from_engine"
] | [((465, 478), 'alembic.op.get_bind', 'op.get_bind', ([], {}), '()\n', (476, 478), False, 'from alembic import op\n'), ((685, 712), 'sqlalchemy.engine.reflection.Inspector.from_engine', 'Inspector.from_engine', (['bind'], {}), '(bind)\n', (706, 712), False, 'from sqlalchemy.engine.reflection import Inspector\n'), ((764, 825), 'alembic.op.drop_constraint', 'op.drop_constraint', (['constraint_name', '"""tables"""'], {'type_': '"""unique"""'}), "(constraint_name, 'tables', type_='unique')\n", (782, 825), False, 'from alembic import op\n')] |
#!/usr/bin/env python3
# Standard libraries
# Third-party libraries
import matplotlib.pyplot as plt
import numpy as np
import rospy
# Local libraries
from pycopter import quadlog
from pycopter import animation as ani
class SimNQuads():
def __init__(self, quads, fc, time, ndrones=3, alt_d=0.2, frames=200):
self.test = False
self.ndrones = ndrones
self.xlim = 20
self.ylim = 20
# Extract quadcopters from list
self.quads = quads
self.lw = 3.0
self.fc = fc
# Simulation parameters
self.it = 0
# Data log
self.qlogs = []
for _ in range(ndrones):
self.qlogs.append(quadlog.quadlog(time))
self.Ed_log = np.zeros((time.size, 4))
# Plots
self.quadcolor = ["r", "g", "b", "c", "m", "y", "k", "r", "b"]
plt.close("all")
plt.ion()
self.fig = plt.figure(0)
self.axis3d = self.fig.add_subplot(111, projection='3d')
self.init_area = 5
self.s = 2
self.alt_d = alt_d
self.counter_reach_alt = 0
self.frames = frames
def get_errors(self, errors):
errors_array = np.array(errors[:-1])
errors_array = errors_array.reshape([self.ndrones, self.ndrones])
errors_list = [errors_array[0,1], errors_array[0,4], errors_array[2,3]]
# Append the system error at the end of the list
errors_list.append(errors[-1])
return errors_list
def new_iteration(self, t, dt, U=None, errors=None):
# Simulation
X = np.array([])
V = np.array([])
for i in range(self.ndrones):
X = np.append(X, self.quads[i].xyz[0:2])
V = np.append(V, self.quads[i].v_ned[0:2])
if t<5:
U = []
for i in range(self.ndrones):
U.append(0)
U.append(0)
print('No U Present')
for i in range(self.ndrones):
# print("z: ", self.quads[i].xyz[2], "des_z: ", self.alt_d)
if self.quads[i].xyz[2] < -self.alt_d and self.counter_reach_alt < self.ndrones*5:
# print("Reached Altitude")
self.counter_reach_alt+=1
if self.test:
self.quads[i].set_a_2D_alt_lya(U[2*i:2*i+2], -self.alt_d)
else:
self.quads[i].set_v_2D_alt_lya(U[2*i:2*i+2], -self.alt_d)
self.quads[i].step(dt)
# Animation
if self.it%self.frames == 0:
plt.figure(0)
self.axis3d.cla()
for i in range(self.ndrones):
ani.draw3d(self.axis3d, self.quads[i].xyz,
self.quads[i].Rot_bn(), self.quadcolor[i])
self.axis3d.set_xlim(-self.xlim, self.xlim)
self.axis3d.set_ylim(-self.ylim, self.ylim)
self.axis3d.set_zlim(0, 10)
self.axis3d.set_xlabel('South [m]', fontsize = 'xx-large')
self.axis3d.set_ylabel('East [m]', fontsize = 'xx-large')
self.axis3d.set_zlabel('Up [m]', fontsize = 'xx-large')
self.axis3d.set_title("3D Map", fontsize = 'xx-large')
plt.pause(0.001)
plt.draw()
plt.figure(1)
plt.clf()
ani.draw2d(1, X, self.fc, self.quadcolor, self.ndrones)
if self.ndrones == 3:
ani.draw_edges(1, X, self.fc, -1)
plt.xlabel('South [m]')
plt.ylabel('West [m]')
plt.title('2D Map')
plt.xlim(-self.xlim, self.xlim)
plt.ylim(-self.ylim, self.ylim)
# plt.xlim(-self.s*self.init_area, self.s*self.init_area)
# plt.ylim(-self.s*self.init_area, self.s*self.init_area)
plt.grid()
plt.pause(0.001)
plt.draw()
# Log
for i in range(self.ndrones):
self.qlogs[i].xyz_h[self.it, :] = self.quads[i].xyz
self.qlogs[i].att_h[self.it, :] = self.quads[i].att
self.qlogs[i].w_h[self.it, :] = self.quads[i].w
self.qlogs[i].v_ned_h[self.it, :] = self.quads[i].v_ned
if errors:
self.Ed_log[self.it,:] = self.get_errors(errors)
else:
self.Ed_log[self.it, :] = [0] * 4
self.it+=1
# Stop if crash
if (self.quads[0].crashed==1 or self.quads[1].crashed==1 or self.quads[2].crashed==1):
return -1
return (X, V)
def final_plots(self, time, it):
plt.figure(1)
plt.title("2D Position [m]", fontsize = 'xx-large')
for i in range(self.ndrones):
plt.plot(self.qlogs[i].xyz_h[:, 0], self.qlogs[i].xyz_h[:, 1],
label="q{}".format(i+1), color=self.quadcolor[i], linewidth=self.lw)
plt.xlabel("East", fontsize = 'xx-large')
plt.ylabel("South", fontsize = 'xx-large')
plt.tick_params(labelsize = 'xx-large')
plt.legend( fontsize = 'xx-large')
plt.figure(2)
plt.title("Yaw", fontsize = 'xx-large')
for i in range(self.ndrones):
plt.plot(time[0:it], self.qlogs[i].att_h[:, 2][0:it], label="yaw q{}".format(i+1), linewidth=self.lw)
plt.xlabel("Time [s]", fontsize = 'xx-large')
plt.ylabel("Yaw [rad]", fontsize = 'xx-large' )
plt.tick_params(labelsize = 'xx-large')
plt.grid()
plt.legend(fontsize = 'xx-large')
plt.figure(3)
plt.title("Altitude", fontsize = 'xx-large')
for i in range(self.ndrones):
plt.plot(time[0:it], -self.qlogs[i].xyz_h[:, 2][0:it],
label="$q_{}$".format(i+1), linewidth=self.lw)
plt.xlabel("Time [s]", fontsize = 'xx-large')
plt.ylabel("Altitude [m]", fontsize = 'xx-large')
plt.tick_params(labelsize = 'xx-large')
plt.grid()
plt.legend(fontsize = 'xx-large')
plt.figure(4)
plt.title("Formation error", fontsize = 'xx-large')
plt.plot(time[0:it], self.Ed_log[:, 0][0:it], label="$e_{12}$", linewidth=self.lw)
plt.plot(time[0:it], self.Ed_log[:, 1][0:it], label="$e_{15}$", linewidth=self.lw)
plt.plot(time[0:it], self.Ed_log[:, 2][0:it], label="$e_{34}$", linewidth=self.lw)
plt.plot(time[0:it], self.Ed_log[:, 3][0:it], label="$e_{T}$", linewidth=self.lw)
plt.xlabel("Time [s]", fontsize = 'xx-large')
plt.ylabel("Formation distance error [m]", fontsize = 'xx-large')
plt.tick_params(labelsize = 'xx-large')
plt.grid()
plt.legend(fontsize = 'xx-large')
try:
plt.pause(0)
except:
pass
return
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"pycopter.animation.draw_edges",
"pycopter.quadlog.quadlog",
"matplotlib.pyplot.ylim",
"pycopter.animation.draw2d",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.clf",
"numpy.append",
"numpy.zeros",
"matplotlib.pyplot.figure"
] | [((734, 758), 'numpy.zeros', 'np.zeros', (['(time.size, 4)'], {}), '((time.size, 4))\n', (742, 758), True, 'import numpy as np\n'), ((855, 871), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (864, 871), True, 'import matplotlib.pyplot as plt\n'), ((880, 889), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (887, 889), True, 'import matplotlib.pyplot as plt\n'), ((909, 922), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (919, 922), True, 'import matplotlib.pyplot as plt\n'), ((1195, 1216), 'numpy.array', 'np.array', (['errors[:-1]'], {}), '(errors[:-1])\n', (1203, 1216), True, 'import numpy as np\n'), ((1586, 1598), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1594, 1598), True, 'import numpy as np\n'), ((1611, 1623), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1619, 1623), True, 'import numpy as np\n'), ((4529, 4542), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4539, 4542), True, 'import matplotlib.pyplot as plt\n'), ((4551, 4600), 'matplotlib.pyplot.title', 'plt.title', (['"""2D Position [m]"""'], {'fontsize': '"""xx-large"""'}), "('2D Position [m]', fontsize='xx-large')\n", (4560, 4600), True, 'import matplotlib.pyplot as plt\n'), ((4814, 4853), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""East"""'], {'fontsize': '"""xx-large"""'}), "('East', fontsize='xx-large')\n", (4824, 4853), True, 'import matplotlib.pyplot as plt\n'), ((4864, 4904), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""South"""'], {'fontsize': '"""xx-large"""'}), "('South', fontsize='xx-large')\n", (4874, 4904), True, 'import matplotlib.pyplot as plt\n'), ((4915, 4952), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '"""xx-large"""'}), "(labelsize='xx-large')\n", (4930, 4952), True, 'import matplotlib.pyplot as plt\n'), ((4963, 4994), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '"""xx-large"""'}), "(fontsize='xx-large')\n", (4973, 4994), True, 'import matplotlib.pyplot as plt\n'), ((5007, 5020), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (5017, 5020), True, 'import matplotlib.pyplot as plt\n'), ((5029, 5066), 'matplotlib.pyplot.title', 'plt.title', (['"""Yaw"""'], {'fontsize': '"""xx-large"""'}), "('Yaw', fontsize='xx-large')\n", (5038, 5066), True, 'import matplotlib.pyplot as plt\n'), ((5230, 5273), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {'fontsize': '"""xx-large"""'}), "('Time [s]', fontsize='xx-large')\n", (5240, 5273), True, 'import matplotlib.pyplot as plt\n'), ((5284, 5328), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Yaw [rad]"""'], {'fontsize': '"""xx-large"""'}), "('Yaw [rad]', fontsize='xx-large')\n", (5294, 5328), True, 'import matplotlib.pyplot as plt\n'), ((5340, 5377), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '"""xx-large"""'}), "(labelsize='xx-large')\n", (5355, 5377), True, 'import matplotlib.pyplot as plt\n'), ((5389, 5399), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5397, 5399), True, 'import matplotlib.pyplot as plt\n'), ((5408, 5439), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '"""xx-large"""'}), "(fontsize='xx-large')\n", (5418, 5439), True, 'import matplotlib.pyplot as plt\n'), ((5450, 5463), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (5460, 5463), True, 'import matplotlib.pyplot as plt\n'), ((5472, 5514), 'matplotlib.pyplot.title', 'plt.title', (['"""Altitude"""'], {'fontsize': '"""xx-large"""'}), "('Altitude', fontsize='xx-large')\n", (5481, 5514), True, 'import matplotlib.pyplot as plt\n'), ((5699, 5742), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {'fontsize': '"""xx-large"""'}), "('Time [s]', fontsize='xx-large')\n", (5709, 5742), True, 'import matplotlib.pyplot as plt\n'), ((5753, 5800), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altitude [m]"""'], {'fontsize': '"""xx-large"""'}), "('Altitude [m]', fontsize='xx-large')\n", (5763, 5800), True, 'import matplotlib.pyplot as plt\n'), ((5811, 5848), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '"""xx-large"""'}), "(labelsize='xx-large')\n", (5826, 5848), True, 'import matplotlib.pyplot as plt\n'), ((5860, 5870), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5868, 5870), True, 'import matplotlib.pyplot as plt\n'), ((5879, 5910), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '"""xx-large"""'}), "(fontsize='xx-large')\n", (5889, 5910), True, 'import matplotlib.pyplot as plt\n'), ((5922, 5935), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (5932, 5935), True, 'import matplotlib.pyplot as plt\n'), ((5944, 5993), 'matplotlib.pyplot.title', 'plt.title', (['"""Formation error"""'], {'fontsize': '"""xx-large"""'}), "('Formation error', fontsize='xx-large')\n", (5953, 5993), True, 'import matplotlib.pyplot as plt\n'), ((6005, 6092), 'matplotlib.pyplot.plot', 'plt.plot', (['time[0:it]', 'self.Ed_log[:, 0][0:it]'], {'label': '"""$e_{12}$"""', 'linewidth': 'self.lw'}), "(time[0:it], self.Ed_log[:, 0][0:it], label='$e_{12}$', linewidth=\n self.lw)\n", (6013, 6092), True, 'import matplotlib.pyplot as plt\n'), ((6096, 6183), 'matplotlib.pyplot.plot', 'plt.plot', (['time[0:it]', 'self.Ed_log[:, 1][0:it]'], {'label': '"""$e_{15}$"""', 'linewidth': 'self.lw'}), "(time[0:it], self.Ed_log[:, 1][0:it], label='$e_{15}$', linewidth=\n self.lw)\n", (6104, 6183), True, 'import matplotlib.pyplot as plt\n'), ((6187, 6274), 'matplotlib.pyplot.plot', 'plt.plot', (['time[0:it]', 'self.Ed_log[:, 2][0:it]'], {'label': '"""$e_{34}$"""', 'linewidth': 'self.lw'}), "(time[0:it], self.Ed_log[:, 2][0:it], label='$e_{34}$', linewidth=\n self.lw)\n", (6195, 6274), True, 'import matplotlib.pyplot as plt\n'), ((6278, 6364), 'matplotlib.pyplot.plot', 'plt.plot', (['time[0:it]', 'self.Ed_log[:, 3][0:it]'], {'label': '"""$e_{T}$"""', 'linewidth': 'self.lw'}), "(time[0:it], self.Ed_log[:, 3][0:it], label='$e_{T}$', linewidth=\n self.lw)\n", (6286, 6364), True, 'import matplotlib.pyplot as plt\n'), ((6368, 6411), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {'fontsize': '"""xx-large"""'}), "('Time [s]', fontsize='xx-large')\n", (6378, 6411), True, 'import matplotlib.pyplot as plt\n'), ((6422, 6485), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Formation distance error [m]"""'], {'fontsize': '"""xx-large"""'}), "('Formation distance error [m]', fontsize='xx-large')\n", (6432, 6485), True, 'import matplotlib.pyplot as plt\n'), ((6496, 6533), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '"""xx-large"""'}), "(labelsize='xx-large')\n", (6511, 6533), True, 'import matplotlib.pyplot as plt\n'), ((6545, 6555), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6553, 6555), True, 'import matplotlib.pyplot as plt\n'), ((6564, 6595), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '"""xx-large"""'}), "(fontsize='xx-large')\n", (6574, 6595), True, 'import matplotlib.pyplot as plt\n'), ((1678, 1714), 'numpy.append', 'np.append', (['X', 'self.quads[i].xyz[0:2]'], {}), '(X, self.quads[i].xyz[0:2])\n', (1687, 1714), True, 'import numpy as np\n'), ((1731, 1769), 'numpy.append', 'np.append', (['V', 'self.quads[i].v_ned[0:2]'], {}), '(V, self.quads[i].v_ned[0:2])\n', (1740, 1769), True, 'import numpy as np\n'), ((2528, 2541), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (2538, 2541), True, 'import matplotlib.pyplot as plt\n'), ((3183, 3199), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (3192, 3199), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3222), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3220, 3222), True, 'import matplotlib.pyplot as plt\n'), ((3248, 3261), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3258, 3261), True, 'import matplotlib.pyplot as plt\n'), ((3274, 3283), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3281, 3283), True, 'import matplotlib.pyplot as plt\n'), ((3296, 3351), 'pycopter.animation.draw2d', 'ani.draw2d', (['(1)', 'X', 'self.fc', 'self.quadcolor', 'self.ndrones'], {}), '(1, X, self.fc, self.quadcolor, self.ndrones)\n', (3306, 3351), True, 'from pycopter import animation as ani\n'), ((3448, 3471), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""South [m]"""'], {}), "('South [m]')\n", (3458, 3471), True, 'import matplotlib.pyplot as plt\n'), ((3484, 3506), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""West [m]"""'], {}), "('West [m]')\n", (3494, 3506), True, 'import matplotlib.pyplot as plt\n'), ((3519, 3538), 'matplotlib.pyplot.title', 'plt.title', (['"""2D Map"""'], {}), "('2D Map')\n", (3528, 3538), True, 'import matplotlib.pyplot as plt\n'), ((3551, 3582), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-self.xlim)', 'self.xlim'], {}), '(-self.xlim, self.xlim)\n', (3559, 3582), True, 'import matplotlib.pyplot as plt\n'), ((3595, 3626), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-self.ylim)', 'self.ylim'], {}), '(-self.ylim, self.ylim)\n', (3603, 3626), True, 'import matplotlib.pyplot as plt\n'), ((3779, 3789), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3787, 3789), True, 'import matplotlib.pyplot as plt\n'), ((3802, 3818), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (3811, 3818), True, 'import matplotlib.pyplot as plt\n'), ((3831, 3841), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3839, 3841), True, 'import matplotlib.pyplot as plt\n'), ((6624, 6636), 'matplotlib.pyplot.pause', 'plt.pause', (['(0)'], {}), '(0)\n', (6633, 6636), True, 'import matplotlib.pyplot as plt\n'), ((689, 710), 'pycopter.quadlog.quadlog', 'quadlog.quadlog', (['time'], {}), '(time)\n', (704, 710), False, 'from pycopter import quadlog\n'), ((3402, 3435), 'pycopter.animation.draw_edges', 'ani.draw_edges', (['(1)', 'X', 'self.fc', '(-1)'], {}), '(1, X, self.fc, -1)\n', (3416, 3435), True, 'from pycopter import animation as ani\n')] |
import os
cache_dir = os.path.join(os.path.expanduser('~'), '.kdezero')
from kdezero.utils.calculation_graph import get_dot_graph
from kdezero.utils.calculation_graph import plot_dot_graph
from kdezero.utils.download_file import show_progress
from kdezero.utils.download_file import get_file
from kdezero.utils.numpy_utility import sum_to
from kdezero.utils.numpy_utility import reshape_sum_backward
from kdezero.utils.numpy_utility import logsumexp
from kdezero.utils.utils import get_deconv_outsize
from kdezero.utils.utils import get_conv_outsize
from kdezero.utils.utils import pair
| [
"os.path.expanduser"
] | [((36, 59), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (54, 59), False, 'import os\n')] |
import datetime, logging, webbrowser
logger = logging.Logger('catch_all')
from importer import importLines
Lines = importLines("list1")
saveStateFile = open('E:\Programming\eclipseWorkspace\Planner\saveState.txt', encoding="utf8")
try:
skipLines= int(saveStateFile.readline())
saveStateFile.close()
successfulLoad = False
except:
print("couldn't open saveState.txt")
skipLines=0
if skipLines==0:
successfulLoad = True
responseLines = []
print("menu, skip forward, ")
currentLineNumber = 0
for line in Lines:
if successfulLoad == False:
skipLines-=1
currentLineNumber+=1
if skipLines <= 0: successfulLoad =True
continue
currentLineNumber+=1
lineInQuestion = line.strip()
if lineInQuestion == "":
continue
if lineInQuestion != lineInQuestion.capitalize() and len(lineInQuestion)<50:
lineInQuestion = lineInQuestion.capitalize()
reply = input("Ln {}. {}\n".format(currentLineNumber, lineInQuestion))
if reply == "":
continue
reggie = [0]
# RegEx Logic # if string ends with .com
reggie = reply.split()
if reggie[0] == "skip": # try except bracket
skipLines = int(reggie[1])
successfulLoad = False
continue
if reply == "break":
saveSaveState = open('E:\Programming\eclipseWorkspace\Planner\saveState.txt', 'w')
saveSaveState.writelines(str(currentLineNumber))
saveSaveState.close()
break
if reply == "sort":
print("to", end = "")
destination = input()
destination = "E:\Programming\eclipseWorkspace\Planner\list" + destination +".txt"
try:
with open(destination, 'a') as sortFile:
sortFile.write(lineInQuestion + "\n")
except Exception as e:
logger.error('Entry not saved correctly: '+ str(e))
continue
if reply == "end":
saveSaveState = open('E:\Programming\eclipseWorkspace\Planner\saveState.txt', 'w')
saveSaveState.writelines("0")
saveSaveState.close()
break
if reply =="help":
# Code goes here
print("Reply to Lines")
print("Enter end to reset and exit, type break to save progress")
print("type nothing to skip a line, type skip 5 to skip ahead 5 lines, type skip forward to specify")
continue
if reply =="search":
search_name = input("Enter search term : ")
search_name = search_name.replace(" ", "+")
complete_url = "google.com/search?q=" + search_name
webbrowser.open_new_tab(complete_url)
continue
if reply =="delete or insert or create or update":
# Code goes here
continue
responseLines = responseLines + [reply]
replyFile = open('E:\Programming\eclipseWorkspace\Planner\list2.txt', 'a')
if len(responseLines) != 0:
replyFile.writelines(datetime.date.today().isoformat() + "\n")
for linesHow in responseLines:
replyFile.writelines(linesHow)
replyFile.writelines("\n")
replyFile.close() | [
"logging.Logger",
"webbrowser.open_new_tab",
"datetime.date.today",
"importer.importLines"
] | [((48, 75), 'logging.Logger', 'logging.Logger', (['"""catch_all"""'], {}), "('catch_all')\n", (62, 75), False, 'import datetime, logging, webbrowser\n'), ((123, 143), 'importer.importLines', 'importLines', (['"""list1"""'], {}), "('list1')\n", (134, 143), False, 'from importer import importLines\n'), ((2674, 2711), 'webbrowser.open_new_tab', 'webbrowser.open_new_tab', (['complete_url'], {}), '(complete_url)\n', (2697, 2711), False, 'import datetime, logging, webbrowser\n'), ((3018, 3039), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3037, 3039), False, 'import datetime, logging, webbrowser\n')] |
# Generated by Django 4.0.3 on 2022-04-13 09:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('transaction', '0004_alter_expense_created_at'),
]
operations = [
migrations.CreateModel(
name='IncomeCategory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Income Categories',
'ordering': ('-created_at',),
},
),
migrations.CreateModel(
name='Income',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1024)),
('amount', models.DecimalField(decimal_places=2, max_digits=5)),
('description', models.TextField(blank=True, null=True)),
('date', models.DateField(default=django.utils.timezone.localtime)),
('created_at', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='transaction.incomecategory')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date',),
},
),
]
| [
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BigAutoField",
"django.db.models.DateTimeField",
"django.db.models.DecimalField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((256, 313), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (287, 313), False, 'from django.db import migrations, models\n'), ((510, 606), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (529, 606), False, 'from django.db import migrations, models\n'), ((630, 662), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (646, 662), False, 'from django.db import migrations, models\n'), ((696, 735), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (716, 735), False, 'from django.db import migrations, models\n'), ((763, 859), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (780, 859), False, 'from django.db import migrations, models\n'), ((1129, 1225), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1148, 1225), False, 'from django.db import migrations, models\n'), ((1249, 1282), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1024)'}), '(max_length=1024)\n', (1265, 1282), False, 'from django.db import migrations, models\n'), ((1312, 1363), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(5)'}), '(decimal_places=2, max_digits=5)\n', (1331, 1363), False, 'from django.db import migrations, models\n'), ((1398, 1437), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1414, 1437), False, 'from django.db import migrations, models\n'), ((1465, 1522), 'django.db.models.DateField', 'models.DateField', ([], {'default': 'django.utils.timezone.localtime'}), '(default=django.utils.timezone.localtime)\n', (1481, 1522), False, 'from django.db import migrations, models\n'), ((1556, 1595), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1576, 1595), False, 'from django.db import migrations, models\n'), ((1627, 1727), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""transaction.incomecategory"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'transaction.incomecategory')\n", (1644, 1727), False, 'from django.db import migrations, models\n'), ((1750, 1846), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1767, 1846), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python3
import sys
from pathlib import Path
import unittest
from collections import OrderedDict
from tempfile import TemporaryDirectory
from shutil import rmtree
thisFile = Path(__file__).absolute()
thisDir = thisFile.parent.absolute()
repoMainDir = thisDir.parent.absolute()
sys.path.append(str(repoMainDir))
dict = OrderedDict
from prebuilder.tearers.FHSTearer import FHSTearer
from prebuilder.core.Package import VersionedPackageRef
from prebuilder.webServices import detectService, _detectService
from prebuilder.webServices.services import *
@unittest.skip
class TestTearer(unittest.TestCase):
def setUp(self):
self.testPkgDirO = TemporaryDirectory(suffix=None, prefix=None, dir=None)
self.testPkgDir = Path(self.testPkgDirO.__enter__())
def tearDown(self):
self.testPkgDirO.__exit__(None, None, None)
def testTearer(self):
testFileVirtualPath = Path("/usr/share/locale/cmn")
testFilePath = nestPath(self.testPkgDir, testFileVirtualPath)
testFilePath.parent.mkdir(parents=True, exist_ok=True)
testFilePath.write_text("")
#print(list(self.testPkgDir.glob("**/*")))
res = FHSTearer(self.testPkgDir)
self.assertEqual(res['data'], [testFileVirtualPath])
class TestWebServicesDetectors(unittest.TestCase):
def testDetectors(self):
table = (
("https://github.com/rpm-software-management/rpm", (GitHubService, {"owner": "rpm-software-management", "repo": "rpm"})),
("https://gitlab.com/dslackw/colored", (GitLabService, {"serviceBase": "gitlab.com", "repoPath": "dslackw/colored"})),
("https://gitlab.gnome.org/GNOME/gimp", (GitLabService, {"serviceBase": "gitlab.gnome.org", "repoPath": "GNOME/gimp"})),
("https://gitlab.freedesktop.org/mesa/mesa", (GitLabService, {"serviceBase": "gitlab.freedesktop.org", "repoPath": "mesa/mesa"})),
("https://salsa.debian.org/pkg-rpm-team/rpm", (GitLabService, {"serviceBase": "salsa.debian.org", "repoPath": "pkg-rpm-team/rpm"})),
("https://git.launchpad.net/~pythoneers/+git/distlib", (LaunchpadService, {"owner": "pythoneers", 'project': None, "repo": "distlib", "vcs": "git"})), # todo
("https://bitbucket.org/ruamel/yaml", (BitBucketService, {"owner": "ruamel", "repo": "yaml"})),
)
for (uri, (correctService, correctArgs)) in table:
with self.subTest(uri=uri, correctService=correctService, correctArgs=correctArgs):
detectOther = False
with self.subTest(func="_detectService"):
serviceClass, ctorArgs = _detectService(uri)
self.assertTrue(issubclass(serviceClass, correctService))
self.assertEqual(ctorArgs, correctArgs)
detectOther = True
if detectOther:
with self.subTest(func="detectService"):
res = detectService(uri)
self.assertIsInstance(res, correctService)
if __name__ == '__main__':
unittest.main()
| [
"tempfile.TemporaryDirectory",
"pathlib.Path",
"prebuilder.tearers.FHSTearer.FHSTearer",
"prebuilder.webServices.detectService",
"prebuilder.webServices._detectService",
"unittest.main"
] | [((2778, 2793), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2791, 2793), False, 'import unittest\n'), ((190, 204), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (194, 204), False, 'from pathlib import Path\n'), ((661, 715), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {'suffix': 'None', 'prefix': 'None', 'dir': 'None'}), '(suffix=None, prefix=None, dir=None)\n', (679, 715), False, 'from tempfile import TemporaryDirectory\n'), ((889, 918), 'pathlib.Path', 'Path', (['"""/usr/share/locale/cmn"""'], {}), "('/usr/share/locale/cmn')\n", (893, 918), False, 'from pathlib import Path\n'), ((1126, 1152), 'prebuilder.tearers.FHSTearer.FHSTearer', 'FHSTearer', (['self.testPkgDir'], {}), '(self.testPkgDir)\n', (1135, 1152), False, 'from prebuilder.tearers.FHSTearer import FHSTearer\n'), ((2444, 2463), 'prebuilder.webServices._detectService', '_detectService', (['uri'], {}), '(uri)\n', (2458, 2463), False, 'from prebuilder.webServices import detectService, _detectService\n'), ((2679, 2697), 'prebuilder.webServices.detectService', 'detectService', (['uri'], {}), '(uri)\n', (2692, 2697), False, 'from prebuilder.webServices import detectService, _detectService\n')] |
from sklearn import tree
from sklearn import svm
from sklearn import neighbors
from sklearn import discriminant_analysis
from sklearn import linear_model
dt = tree.DecisionTreeClassifier()
# CHALLENGE - create 3 more classifiers...
# 1
lsvc = svm.LinearSVC()
# 2
kn = neighbors.KNeighborsClassifier(3)
# 3
svc = svm.SVC()
classifiers = [ dt, lsvc, kn, svc ]
# [height, weight, shoe_size]
X = [[181, 80, 44], [177, 70, 43], [160, 60, 38], [154, 54, 37], [166, 65, 40],
[190, 90, 47], [175, 64, 39],
[177, 70, 40], [159, 55, 37], [171, 75, 42], [181, 85, 43]]
male = 'male'
female = 'female'
Y = [male, male, female, female, male, male, female, female,
female, male, male]
# CHALLENGE - ...and train them on our data
for clf in classifiers:
clf = clf.fit(X, Y)
prediction = clf.predict([[190, 70, 43]])
print("%s %s" % (clf, prediction))
# CHALLENGE compare their results and print the best one! | [
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.svm.LinearSVC",
"sklearn.svm.SVC"
] | [((161, 190), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (188, 190), False, 'from sklearn import tree\n'), ((246, 261), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {}), '()\n', (259, 261), False, 'from sklearn import svm\n'), ((272, 305), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', (['(3)'], {}), '(3)\n', (302, 305), False, 'from sklearn import neighbors\n'), ((317, 326), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (324, 326), False, 'from sklearn import svm\n')] |
import itertools
import random
from functools import lru_cache
import torch
from torch.nn import functional as F
from torchvision.models import resnet18, resnet50, resnet101, resnext50_32x4d, mobilenet_v2
from efficientnet_pytorch import EfficientNet
from nn_utils.game_constants import constants
from nn_utils.models import TransformerModel
from torchvision.models.resnet import _resnet, BasicBlock
import pytorch_lightning as pl
from pytorch_lightning.metrics.functional import accuracy
import numpy as np
from utils import input_to_tensors, output_to_numpy
scaling_constant = 50.0
def check(x):
assert not torch.isnan(x).any()
arch_dict = dict(resnet18=resnet18, resnet50=resnet50, resnet101=resnet101, resnext50_32x4d=resnext50_32x4d,
mobilenet_v2=mobilenet_v2, TransformerModel=TransformerModel)
def get_efficient_net(name, num_classes, **arch_params):
return EfficientNet.from_name(name, num_classes=num_classes, **arch_params)
def get_arch(name, num_classes, **arch_params):
if 'efficientnet' in name.lower():
return get_efficient_net(name, num_classes=num_classes, **arch_params)
elif name == 'resnet9':
return _resnet('resnet', BasicBlock, pretrained=False, progress=None,
num_classes=num_classes, layers=[1, 1, 1, 1])
else:
return arch_dict[name](num_classes=num_classes, **arch_params)
def get_optimizer(parameters, name, **optimizer_params):
opt_dict = dict(Adam=torch.optim.Adam,
AdamW=torch.optim.AdamW,
SGD=torch.optim.SGD,
RMSProp=torch.optim.RMSprop)
return opt_dict[name](parameters, **optimizer_params)
class CardGameModel(pl.LightningModule):
def __init__(self, scheduler_params, optimizer_params, game):
super().__init__()
self.optimizer_params = optimizer_params
self.scheduler_params = scheduler_params
self.backbone = None
self.value_scaling_constant = constants[game]["value_scaling_constant"]
self.status_rows = constants[game]["status_rows"]
@staticmethod
@lru_cache(maxsize=32)
def _permutation_for_i(random_integer):
all_suit_permutations = list(itertools.permutations([0, 1, 2, 3]))
fixed_suit_permuation = all_suit_permutations[random_integer]
permutation = []
for i in range(32):
if i % 8 == 4:
permutation.append(i)
else:
suit = i // 8
new_suit = fixed_suit_permuation[suit]
permutation.append(i % 8 + 8 * new_suit)
idx = np.empty_like(permutation)
idx[permutation] = np.arange(len(permutation))
return tuple(idx)
def _get_random_permutation(self):
random_integer = random.randint(0, 23)
return CardGameModel._permutation_for_i(random_integer)
def training_epoch_end(self, training_step_outputs):
losses = [it['loss'] for it in training_step_outputs]
self._metrics["train_loss"] = sum(losses) / len(losses)
self.log('train_loss', self._metrics["train_loss"], prog_bar=False)
def validation_epoch_end(self, validation_step_outputs):
metrics_to_update = dict(
zip(self.validation_metrics, zip(*validation_step_outputs)))
for key, value in metrics_to_update.items():
metrics_to_update[key] = sum(value) / len(value)
self.log(key, metrics_to_update[key], prog_bar=True)
self._metrics.update(metrics_to_update)
@property
def metrics(self):
return self._metrics
def configure_optimizers(self):
optimizer = get_optimizer(self.parameters(), **self.optimizer_params)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, **self.scheduler_params)
return [optimizer], [scheduler]
class PolicyModel(CardGameModel):
def __init__(self, *, arch_params, **kwargs):
super().__init__(**kwargs)
self.save_hyperparameters()
self.backbone = get_arch(**arch_params, num_classes=32)
self.convolutional = 'transformer' not in arch_params.name.lower()
self.validation_metrics = ["val_acc", "val_kl_loss"]
self.train_metrics = ["train_loss"]
self._metrics = {
m: 0.0 for m in self.train_metrics + self.validation_metrics}
def forward(self, x):
x = x - 0.025
if self.convolutional:
x = x[:, None, ...].repeat([1, 3, 1, 1])
policy_logits = self.backbone(x)
return policy_logits
@input_to_tensors
@output_to_numpy
def get_policy(self, x):
was_singleton = False
if x.ndim == 2:
was_singleton = True
x = x[None, ...]
if torch.cuda.is_available():
self.cuda()
x = x.cuda()
else:
self.to('cpu')
self.eval()
with torch.no_grad():
policy_logits = self(x)
if was_singleton:
policy_logits = policy_logits[0]
return policy_logits
def _apply_augmentation(self, inputs, masks, probs):
perm = np.array(self._get_random_permutation())
inputs[:, self.status_rows:, :] = inputs[:, self.status_rows:, perm]
return inputs, masks[:, perm], probs[:, perm]
def training_step(self, batch, batch_idx):
# inputs, masks, true_policy_probs, true_state_values = batch
inputs, masks, true_policy_probs = self._apply_augmentation(*batch)
predicted_policy_logits = self(inputs)
policy_loss = F.kl_div(torch.log_softmax(predicted_policy_logits, dim=1), true_policy_probs,
reduction='batchmean')
return policy_loss
def validation_step(self, batch, batch_idx):
inputs, masks, true_policy_probs = batch
predicted_policy_logits = self(inputs)
policy_loss = F.kl_div(torch.log_softmax(predicted_policy_logits, dim=1), true_policy_probs,
reduction='batchmean')
pred_ys = torch.argmax(predicted_policy_logits +
1000 * (masks - 1), dim=1)
true_ys = torch.argmax(true_policy_probs + 1000 * (masks - 1), dim=1)
acc = accuracy(pred_ys, true_ys)
# Calling self.log will surface up scalars for you in TensorBoard
return acc, policy_loss
class ValueModel(CardGameModel):
def __init__(self, *, arch_params, loss_function, **kwargs):
super().__init__(**kwargs)
self.save_hyperparameters()
self.loss_function = loss_function
self.backbone = get_arch(**arch_params, num_classes=3)
self.convolutional = 'transformer' not in arch_params.name.lower()
self.validation_metrics = ["val_loss", "val_l1_scaled"]
self.train_metrics = ["train_loss"]
self._metrics = {
m: 0.0 for m in self.train_metrics + self.validation_metrics}
def forward(self, x):
x = x - 0.035
if self.convolutional:
x = x[:, None, ...].repeat([1, 3, 1, 1])
values = self.backbone(x)
return values
@input_to_tensors
@output_to_numpy
def get_value(self, x):
was_singleton = False
if x.ndim == 2:
was_singleton = True
x = x[None, ...]
if torch.cuda.is_available():
self.cuda()
x = x.cuda()
else:
self.to('cpu')
self.eval()
with torch.no_grad():
values = self(x) * self.value_scaling_constant
if was_singleton:
values = values[0]
return values
def _apply_augmentation(self, states, values):
perm = np.array(self._get_random_permutation())
states[:, self.status_rows:, :] = states[:, self.status_rows:, perm]
return states, values
def training_step(self, batch, batch_idx):
states, values = self._apply_augmentation(*batch)
predicted_values = self(states)
loss = self.loss_fn(predicted_values, values)
return loss
def loss_fn(self, predicted_values, true_values):
if self.loss_function == "L2":
return ((predicted_values - true_values / self.value_scaling_constant) ** 2).mean()
elif self.loss_function == 'SmoothL1':
return F.smooth_l1_loss(predicted_values, true_values / self.value_scaling_constant)
def validation_step(self, batch, batch_idx):
states, values = batch
predicted_values = self(states)
loss = self.loss_fn(predicted_values, values)
value_l1_loss = (self.value_scaling_constant *
predicted_values - values).abs().mean()
# Calling self.log will surface up scalars for you in TensorBoard
return loss, value_l1_loss
| [
"pytorch_lightning.metrics.functional.accuracy",
"efficientnet_pytorch.EfficientNet.from_name",
"torch.log_softmax",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.functional.smooth_l1_loss",
"torch.no_grad",
"torchvision.models.resnet._resnet",
"torch.cuda.is_available",
"numpy.empty_like",
"functools.lru_cache",
"itertools.permutations",
"torch.isnan",
"random.randint",
"torch.argmax"
] | [((902, 970), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['name'], {'num_classes': 'num_classes'}), '(name, num_classes=num_classes, **arch_params)\n', (924, 970), False, 'from efficientnet_pytorch import EfficientNet\n'), ((2118, 2139), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (2127, 2139), False, 'from functools import lru_cache\n'), ((2622, 2648), 'numpy.empty_like', 'np.empty_like', (['permutation'], {}), '(permutation)\n', (2635, 2648), True, 'import numpy as np\n'), ((2795, 2816), 'random.randint', 'random.randint', (['(0)', '(23)'], {}), '(0, 23)\n', (2809, 2816), False, 'import random\n'), ((3739, 3806), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {}), '(optimizer, **self.scheduler_params)\n', (3770, 3806), False, 'import torch\n'), ((4765, 4790), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4788, 4790), False, 'import torch\n'), ((6062, 6127), 'torch.argmax', 'torch.argmax', (['(predicted_policy_logits + 1000 * (masks - 1))'], {'dim': '(1)'}), '(predicted_policy_logits + 1000 * (masks - 1), dim=1)\n', (6074, 6127), False, 'import torch\n'), ((6177, 6236), 'torch.argmax', 'torch.argmax', (['(true_policy_probs + 1000 * (masks - 1))'], {'dim': '(1)'}), '(true_policy_probs + 1000 * (masks - 1), dim=1)\n', (6189, 6236), False, 'import torch\n'), ((6251, 6277), 'pytorch_lightning.metrics.functional.accuracy', 'accuracy', (['pred_ys', 'true_ys'], {}), '(pred_ys, true_ys)\n', (6259, 6277), False, 'from pytorch_lightning.metrics.functional import accuracy\n'), ((7334, 7359), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7357, 7359), False, 'import torch\n'), ((1182, 1295), 'torchvision.models.resnet._resnet', '_resnet', (['"""resnet"""', 'BasicBlock'], {'pretrained': '(False)', 'progress': 'None', 'num_classes': 'num_classes', 'layers': '[1, 1, 1, 1]'}), "('resnet', BasicBlock, pretrained=False, progress=None, num_classes=\n num_classes, layers=[1, 1, 1, 1])\n", (1189, 1295), False, 'from torchvision.models.resnet import _resnet, BasicBlock\n'), ((2221, 2257), 'itertools.permutations', 'itertools.permutations', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (2243, 2257), False, 'import itertools\n'), ((4916, 4931), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4929, 4931), False, 'import torch\n'), ((5589, 5638), 'torch.log_softmax', 'torch.log_softmax', (['predicted_policy_logits'], {'dim': '(1)'}), '(predicted_policy_logits, dim=1)\n', (5606, 5638), False, 'import torch\n'), ((5919, 5968), 'torch.log_softmax', 'torch.log_softmax', (['predicted_policy_logits'], {'dim': '(1)'}), '(predicted_policy_logits, dim=1)\n', (5936, 5968), False, 'import torch\n'), ((7485, 7500), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7498, 7500), False, 'import torch\n'), ((621, 635), 'torch.isnan', 'torch.isnan', (['x'], {}), '(x)\n', (632, 635), False, 'import torch\n'), ((8334, 8411), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['predicted_values', '(true_values / self.value_scaling_constant)'], {}), '(predicted_values, true_values / self.value_scaling_constant)\n', (8350, 8411), True, 'from torch.nn import functional as F\n')] |
import numpy as np
import scipy.linalg as la
import scipy.sparse as sp
import scipy.sparse.linalg as spla
import matplotlib.pyplot as plt
np.set_printoptions(linewidth=130)
from fsmpfuncs import *
from ibmfuncs import *
from gridfuncs import *
from case import *
R1 = weight (dx, dy)[0].diagonal()
Mh1 = mass_hat (dx, dy, dxp, dyp)[0].diagonal()
q01 = np.loadtxt('0-ComputeBaseflow/baseflow.txt', unpack=True)
qu01, qv01 = q01[:n*(m-1)].reshape((n, m-1)), q01[n*(m-1):n*(m-1)+n*m].reshape((n, m))
print(dt, 1/iRe, dxmin**2/iRe, dxmin)
nP = n
n = nP*3
height = (y[-1]-y[0])
y = np.concatenate([y, y[1:] + height, y[1:]+ 2*height])
dy = np.diff(y)
yp = 0.5*(y[1:] + y[:-1])
dyp = np.diff(yp)
yu = yp
yv = y[:-1]
pshape = (n, m)
ushape = (n, m-1)
vshape = (n, m)
xi = np.concatenate([xi, xi, xi])
eta = np.concatenate([eta, eta+height, eta+2*height])
lP = l
l = xi.size
ds = np.concatenate([ds, ds, ds])
uB = np.zeros_like(xi)
vB = np.zeros_like(xi)
qu0 = np.vstack([qu01, qu01, qu01])
qv0 = np.vstack([qv01, qv01, qv01])
q0 = np.concatenate([qu0.ravel(), qv0.ravel()])
uE = np.concatenate([uE, uE, uE])
uW = np.concatenate([uW, uW, uW])
vE = np.concatenate([vE, vE, vE])
vW = np.concatenate([vW, vW, vW])
G, DuW, DuE = gradient(dxp, dyp)
R, iR = weight (dx, dy)
Mh, iMh = mass_hat (dx, dy, dxp, dyp)
Lh, Lux0, Lux1, Lvx0, Lvx1, = laplacian_hat(dx, dy, dxp, dyp)
Eh = interpolation_hat(xi, eta, ds, xu, yu, xv, yv, dx, dy, dxp, dyp)
Hh = regularization_hat(xi, eta, ds, xu, yu, xv, yv, dx, dy, dxp, dyp)
E = Eh.dot(iR)
H = Mh.dot(Hh)
L = Mh.dot(Lh.dot(iR))
M = Mh.dot(iR)
iM = R.dot(iMh)
EET = E.dot(E.T)
EH = E.dot(H).tocsc()
iEH = spla.factorized(EH)
iML = iM.dot(L)
Q = sp.hstack([G, E.T])
u0 = iR.dot(q0)[:n*(m-1)].reshape((n,m-1))
plt.figure(figsize=(12,6))
X, Y = np.meshgrid(x, y)
plt.pcolormesh(xu, yu, u0, shading='gouraud');
plt.plot(xi[:lP], eta[:lP], 'ro-');
plt.plot(xi[lP:2*lP], eta[lP:2*lP], 'ro-');
plt.plot(xi[2*lP:3*lP], eta[2*lP:3*lP], 'ro-');
plt.plot([x[0], x[-1]], [y[0]+height, y[0]+height])
plt.axis('equal')
plt.xlim(-1, 2)
#plt.ylim(0.2, 0.6)
plt.savefig('foo.png')
# Linearization
from linpfuncs import *
U0 = iR.dot(q0)
from linpfuncs import *
Nh = linearize_advection_hat (dx, dy, dxp, dyp, U0, uW, uE, vW, vE, test=True)
N = Mh.dot(Nh.dot(iR))
A = (M/dt - 0.5*iRe*L).tocsc()
B = (M/dt + 0.5*iRe*L).tocsr()
# B q^(n+1) = A q^n + Mh f
# Mh is also useful for the inner product
dempty = sp.coo_matrix((Q.shape[1], Q.shape[1]))
oempty = sp.coo_matrix(Q.shape)
MM = sp.bmat([[A, Q],[Q.T, dempty]]).tocsr()
CC = -sp.bmat([[B-1.5*N, oempty], [oempty.T, dempty]]).tocsr()
KK = -sp.bmat([[0.5*N, oempty], [oempty.T, dempty]]).tocsr()
su = nP*(m-1)
svp = nP*m
sxe = lP
idx1 = np.r_[ : su,
3*su : 3*su+svp,
3*su + 3*svp : 3*su+3*svp + svp,
3*su + 6*svp : 3*su+6*svp + sxe,
3*su + 6*svp + 3*sxe : 3*su+6*svp + 3*sxe + sxe]
idx2 = np.r_[su : 2*su,
3*su + svp : 3*su+2*svp,
3*su + 3*svp + svp : 3*su+3*svp + 2*svp,
3*su + 6*svp + sxe : 3*su+6*svp + 2*sxe,
3*su + 6*svp + 3*sxe + sxe : 3*su+6*svp + 3*sxe + 2*sxe]
idx3 = np.r_[2*su : 3*su,
3*su + 2*svp : 3*su+3*svp,
3*su + 3*svp + 2*svp : 3*su+3*svp + 3*svp,
3*su + 6*svp + 2*sxe : 3*su+6*svp + 3*sxe,
3*su + 6*svp + 3*sxe + 2*sxe : 3*su+6*svp + 3*sxe + 3*sxe]
perm = np.r_[idx1, idx2, idx3]
P = sp.eye(len(perm)).tocsr()[perm,:]
#o = np.zeros_like(perm)
#o [su+2*svp+2*sxe+1]=1
#print(P.T.dot(o)[su+1])
MM = P.dot(MM.dot(P.T))
CC = P.dot(CC.dot(P.T))
KK = P.dot(KK.dot(P.T))
IAn = sp.eye(su+2*svp+2*sxe)
ZAn = sp.coo_matrix((su+2*svp+2*sxe,su+2*svp+2*sxe))
S1 = sp.bmat([[IAn, ZAn, ZAn]])
S2 = sp.bmat([[ZAn, IAn, ZAn]])
S3 = sp.bmat([[ZAn, ZAn, IAn]])
M11 = S1.dot(MM.dot(S1.T))
M12 = S1.dot(MM.dot(S2.T))
M13 = S1.dot(MM.dot(S3.T))
M21 = S2.dot(MM.dot(S1.T))
M22 = S2.dot(MM.dot(S2.T))
M23 = S2.dot(MM.dot(S3.T))
M31 = S3.dot(MM.dot(S1.T))
M32 = S3.dot(MM.dot(S2.T))
M33 = S3.dot(MM.dot(S3.T))
C11 = S1.dot(CC.dot(S1.T))
C12 = S1.dot(CC.dot(S2.T))
C13 = S1.dot(CC.dot(S3.T))
C21 = S2.dot(CC.dot(S1.T))
C22 = S2.dot(CC.dot(S2.T))
C23 = S2.dot(CC.dot(S3.T))
C31 = S3.dot(CC.dot(S1.T))
C32 = S3.dot(CC.dot(S2.T))
C33 = S3.dot(CC.dot(S3.T))
K11 = S1.dot(KK.dot(S1.T))
K12 = S1.dot(KK.dot(S2.T))
K13 = S1.dot(KK.dot(S3.T))
K21 = S2.dot(KK.dot(S1.T))
K22 = S2.dot(KK.dot(S2.T))
K23 = S2.dot(KK.dot(S3.T))
K31 = S3.dot(KK.dot(S1.T))
K32 = S3.dot(KK.dot(S2.T))
K33 = S3.dot(KK.dot(S3.T))
IIn = sp.bmat([[sp.eye(su+svp), sp.coo_matrix((su+svp, svp+2*sxe))]])
print(spla.norm(M11 - M22), spla.norm(M11 - M33), spla.norm(M22 - M33))
print(spla.norm(M12 - M23), spla.norm(M12 - M31), spla.norm(M12 - M31))
print(spla.norm(M13 - M21), spla.norm(M13 - M32), spla.norm(M21 - M32))
print(spla.norm(C11 - C22), spla.norm(C11 - C33), spla.norm(C22 - C33))
print(spla.norm(C12 - C23), spla.norm(C12 - C31), spla.norm(C12 - C31))
print(spla.norm(C13 - C21), spla.norm(C13 - C32), spla.norm(C21 - C32))
print(spla.norm(K11 - K22), spla.norm(K11 - K33), spla.norm(K22 - K33))
print(spla.norm(K12 - K23), spla.norm(K12 - K31), spla.norm(K12 - K31))
print(spla.norm(K13 - K21), spla.norm(K13 - K32), spla.norm(K21 - K32))
Mk = np.zeros(M11.shape[0])
Mk[nP*(m-1) + nP*m : nP*(m-1) + 2*nP*m]=1.0
tmp = (M11+M12+M13).dot(Mk)
print(la.norm(tmp))
from PetscBinaryIO import PetscBinaryIO
dest = '1-ExtractMatrices/'
PetscBinaryIO().writeMatSciPy(open(dest+'M1.dat','w'), M11)
PetscBinaryIO().writeMatSciPy(open(dest+'M2.dat','w'), M12)
PetscBinaryIO().writeMatSciPy(open(dest+'M3.dat','w'), M13)
PetscBinaryIO().writeMatSciPy(open(dest+'C1.dat','w'), C11)
PetscBinaryIO().writeMatSciPy(open(dest+'C2.dat','w'), C12)
PetscBinaryIO().writeMatSciPy(open(dest+'C3.dat','w'), C13)
PetscBinaryIO().writeMatSciPy(open(dest+'K1.dat','w'), K11)
PetscBinaryIO().writeMatSciPy(open(dest+'K2.dat','w'), K12)
PetscBinaryIO().writeMatSciPy(open(dest+'K3.dat','w'), K13)
PetscBinaryIO().writeMatSciPy(open(dest+'I.dat','w'), IIn)
PetscBinaryIO().writeVec(open(dest+'Mk.dat','w'), Mk)
PetscBinaryIO().writeVec(open(dest+'Rh12.dat','w'), np.sqrt(R1))
PetscBinaryIO().writeVec(open(dest+'Mh12.dat','w'), np.sqrt(Mh1))
| [
"numpy.sqrt",
"matplotlib.pyplot.pcolormesh",
"scipy.sparse.linalg.factorized",
"scipy.sparse.linalg.norm",
"scipy.sparse.eye",
"matplotlib.pyplot.plot",
"numpy.diff",
"PetscBinaryIO.PetscBinaryIO",
"numpy.vstack",
"numpy.concatenate",
"scipy.sparse.coo_matrix",
"matplotlib.pyplot.axis",
"numpy.meshgrid",
"scipy.sparse.bmat",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlim",
"numpy.set_printoptions",
"matplotlib.pyplot.figure",
"numpy.zeros",
"scipy.sparse.hstack",
"scipy.linalg.norm",
"numpy.loadtxt",
"numpy.zeros_like"
] | [((139, 173), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(130)'}), '(linewidth=130)\n', (158, 173), True, 'import numpy as np\n'), ((357, 414), 'numpy.loadtxt', 'np.loadtxt', (['"""0-ComputeBaseflow/baseflow.txt"""'], {'unpack': '(True)'}), "('0-ComputeBaseflow/baseflow.txt', unpack=True)\n", (367, 414), True, 'import numpy as np\n'), ((585, 640), 'numpy.concatenate', 'np.concatenate', (['[y, y[1:] + height, y[1:] + 2 * height]'], {}), '([y, y[1:] + height, y[1:] + 2 * height])\n', (599, 640), True, 'import numpy as np\n'), ((643, 653), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (650, 653), True, 'import numpy as np\n'), ((686, 697), 'numpy.diff', 'np.diff', (['yp'], {}), '(yp)\n', (693, 697), True, 'import numpy as np\n'), ((773, 801), 'numpy.concatenate', 'np.concatenate', (['[xi, xi, xi]'], {}), '([xi, xi, xi])\n', (787, 801), True, 'import numpy as np\n'), ((808, 861), 'numpy.concatenate', 'np.concatenate', (['[eta, eta + height, eta + 2 * height]'], {}), '([eta, eta + height, eta + 2 * height])\n', (822, 861), True, 'import numpy as np\n'), ((880, 908), 'numpy.concatenate', 'np.concatenate', (['[ds, ds, ds]'], {}), '([ds, ds, ds])\n', (894, 908), True, 'import numpy as np\n'), ((916, 933), 'numpy.zeros_like', 'np.zeros_like', (['xi'], {}), '(xi)\n', (929, 933), True, 'import numpy as np\n'), ((939, 956), 'numpy.zeros_like', 'np.zeros_like', (['xi'], {}), '(xi)\n', (952, 956), True, 'import numpy as np\n'), ((964, 993), 'numpy.vstack', 'np.vstack', (['[qu01, qu01, qu01]'], {}), '([qu01, qu01, qu01])\n', (973, 993), True, 'import numpy as np\n'), ((1000, 1029), 'numpy.vstack', 'np.vstack', (['[qv01, qv01, qv01]'], {}), '([qv01, qv01, qv01])\n', (1009, 1029), True, 'import numpy as np\n'), ((1084, 1112), 'numpy.concatenate', 'np.concatenate', (['[uE, uE, uE]'], {}), '([uE, uE, uE])\n', (1098, 1112), True, 'import numpy as np\n'), ((1118, 1146), 'numpy.concatenate', 'np.concatenate', (['[uW, uW, uW]'], {}), '([uW, uW, uW])\n', (1132, 1146), True, 'import numpy as np\n'), ((1152, 1180), 'numpy.concatenate', 'np.concatenate', (['[vE, vE, vE]'], {}), '([vE, vE, vE])\n', (1166, 1180), True, 'import numpy as np\n'), ((1186, 1214), 'numpy.concatenate', 'np.concatenate', (['[vW, vW, vW]'], {}), '([vW, vW, vW])\n', (1200, 1214), True, 'import numpy as np\n'), ((1647, 1666), 'scipy.sparse.linalg.factorized', 'spla.factorized', (['EH'], {}), '(EH)\n', (1662, 1666), True, 'import scipy.sparse.linalg as spla\n'), ((1688, 1707), 'scipy.sparse.hstack', 'sp.hstack', (['[G, E.T]'], {}), '([G, E.T])\n', (1697, 1707), True, 'import scipy.sparse as sp\n'), ((1753, 1780), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1763, 1780), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1806), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1800, 1806), True, 'import numpy as np\n'), ((1807, 1852), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['xu', 'yu', 'u0'], {'shading': '"""gouraud"""'}), "(xu, yu, u0, shading='gouraud')\n", (1821, 1852), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1888), 'matplotlib.pyplot.plot', 'plt.plot', (['xi[:lP]', 'eta[:lP]', '"""ro-"""'], {}), "(xi[:lP], eta[:lP], 'ro-')\n", (1862, 1888), True, 'import matplotlib.pyplot as plt\n'), ((1890, 1936), 'matplotlib.pyplot.plot', 'plt.plot', (['xi[lP:2 * lP]', 'eta[lP:2 * lP]', '"""ro-"""'], {}), "(xi[lP:2 * lP], eta[lP:2 * lP], 'ro-')\n", (1898, 1936), True, 'import matplotlib.pyplot as plt\n'), ((1934, 1988), 'matplotlib.pyplot.plot', 'plt.plot', (['xi[2 * lP:3 * lP]', 'eta[2 * lP:3 * lP]', '"""ro-"""'], {}), "(xi[2 * lP:3 * lP], eta[2 * lP:3 * lP], 'ro-')\n", (1942, 1988), True, 'import matplotlib.pyplot as plt\n'), ((1982, 2037), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[0], x[-1]]', '[y[0] + height, y[0] + height]'], {}), '([x[0], x[-1]], [y[0] + height, y[0] + height])\n', (1990, 2037), True, 'import matplotlib.pyplot as plt\n'), ((2034, 2051), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2042, 2051), True, 'import matplotlib.pyplot as plt\n'), ((2052, 2067), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(2)'], {}), '(-1, 2)\n', (2060, 2067), True, 'import matplotlib.pyplot as plt\n'), ((2089, 2111), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""foo.png"""'], {}), "('foo.png')\n", (2100, 2111), True, 'import matplotlib.pyplot as plt\n'), ((2441, 2480), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['(Q.shape[1], Q.shape[1])'], {}), '((Q.shape[1], Q.shape[1]))\n', (2454, 2480), True, 'import scipy.sparse as sp\n'), ((2491, 2513), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['Q.shape'], {}), '(Q.shape)\n', (2504, 2513), True, 'import scipy.sparse as sp\n'), ((3852, 3882), 'scipy.sparse.eye', 'sp.eye', (['(su + 2 * svp + 2 * sxe)'], {}), '(su + 2 * svp + 2 * sxe)\n', (3858, 3882), True, 'import scipy.sparse as sp\n'), ((3881, 3944), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['(su + 2 * svp + 2 * sxe, su + 2 * svp + 2 * sxe)'], {}), '((su + 2 * svp + 2 * sxe, su + 2 * svp + 2 * sxe))\n', (3894, 3944), True, 'import scipy.sparse as sp\n'), ((3935, 3961), 'scipy.sparse.bmat', 'sp.bmat', (['[[IAn, ZAn, ZAn]]'], {}), '([[IAn, ZAn, ZAn]])\n', (3942, 3961), True, 'import scipy.sparse as sp\n'), ((3967, 3993), 'scipy.sparse.bmat', 'sp.bmat', (['[[ZAn, IAn, ZAn]]'], {}), '([[ZAn, IAn, ZAn]])\n', (3974, 3993), True, 'import scipy.sparse as sp\n'), ((3999, 4025), 'scipy.sparse.bmat', 'sp.bmat', (['[[ZAn, ZAn, IAn]]'], {}), '([[ZAn, ZAn, IAn]])\n', (4006, 4025), True, 'import scipy.sparse as sp\n'), ((5492, 5514), 'numpy.zeros', 'np.zeros', (['M11.shape[0]'], {}), '(M11.shape[0])\n', (5500, 5514), True, 'import numpy as np\n'), ((4842, 4862), 'scipy.sparse.linalg.norm', 'spla.norm', (['(M11 - M22)'], {}), '(M11 - M22)\n', (4851, 4862), True, 'import scipy.sparse.linalg as spla\n'), ((4864, 4884), 'scipy.sparse.linalg.norm', 'spla.norm', (['(M11 - M33)'], {}), '(M11 - M33)\n', (4873, 4884), True, 'import scipy.sparse.linalg as spla\n'), ((4886, 4906), 'scipy.sparse.linalg.norm', 'spla.norm', (['(M22 - M33)'], {}), '(M22 - M33)\n', (4895, 4906), True, 'import scipy.sparse.linalg as spla\n'), ((4914, 4934), 'scipy.sparse.linalg.norm', 'spla.norm', (['(M12 - M23)'], {}), '(M12 - M23)\n', (4923, 4934), True, 'import scipy.sparse.linalg as spla\n'), ((4936, 4956), 'scipy.sparse.linalg.norm', 'spla.norm', (['(M12 - M31)'], {}), '(M12 - M31)\n', (4945, 4956), True, 'import scipy.sparse.linalg as spla\n'), ((4958, 4978), 'scipy.sparse.linalg.norm', 'spla.norm', (['(M12 - M31)'], {}), '(M12 - M31)\n', (4967, 4978), True, 'import scipy.sparse.linalg as spla\n'), ((4986, 5006), 'scipy.sparse.linalg.norm', 'spla.norm', (['(M13 - M21)'], {}), '(M13 - M21)\n', (4995, 5006), True, 'import scipy.sparse.linalg as spla\n'), ((5008, 5028), 'scipy.sparse.linalg.norm', 'spla.norm', (['(M13 - M32)'], {}), '(M13 - M32)\n', (5017, 5028), True, 'import scipy.sparse.linalg as spla\n'), ((5030, 5050), 'scipy.sparse.linalg.norm', 'spla.norm', (['(M21 - M32)'], {}), '(M21 - M32)\n', (5039, 5050), True, 'import scipy.sparse.linalg as spla\n'), ((5059, 5079), 'scipy.sparse.linalg.norm', 'spla.norm', (['(C11 - C22)'], {}), '(C11 - C22)\n', (5068, 5079), True, 'import scipy.sparse.linalg as spla\n'), ((5081, 5101), 'scipy.sparse.linalg.norm', 'spla.norm', (['(C11 - C33)'], {}), '(C11 - C33)\n', (5090, 5101), True, 'import scipy.sparse.linalg as spla\n'), ((5103, 5123), 'scipy.sparse.linalg.norm', 'spla.norm', (['(C22 - C33)'], {}), '(C22 - C33)\n', (5112, 5123), True, 'import scipy.sparse.linalg as spla\n'), ((5131, 5151), 'scipy.sparse.linalg.norm', 'spla.norm', (['(C12 - C23)'], {}), '(C12 - C23)\n', (5140, 5151), True, 'import scipy.sparse.linalg as spla\n'), ((5153, 5173), 'scipy.sparse.linalg.norm', 'spla.norm', (['(C12 - C31)'], {}), '(C12 - C31)\n', (5162, 5173), True, 'import scipy.sparse.linalg as spla\n'), ((5175, 5195), 'scipy.sparse.linalg.norm', 'spla.norm', (['(C12 - C31)'], {}), '(C12 - C31)\n', (5184, 5195), True, 'import scipy.sparse.linalg as spla\n'), ((5203, 5223), 'scipy.sparse.linalg.norm', 'spla.norm', (['(C13 - C21)'], {}), '(C13 - C21)\n', (5212, 5223), True, 'import scipy.sparse.linalg as spla\n'), ((5225, 5245), 'scipy.sparse.linalg.norm', 'spla.norm', (['(C13 - C32)'], {}), '(C13 - C32)\n', (5234, 5245), True, 'import scipy.sparse.linalg as spla\n'), ((5247, 5267), 'scipy.sparse.linalg.norm', 'spla.norm', (['(C21 - C32)'], {}), '(C21 - C32)\n', (5256, 5267), True, 'import scipy.sparse.linalg as spla\n'), ((5276, 5296), 'scipy.sparse.linalg.norm', 'spla.norm', (['(K11 - K22)'], {}), '(K11 - K22)\n', (5285, 5296), True, 'import scipy.sparse.linalg as spla\n'), ((5298, 5318), 'scipy.sparse.linalg.norm', 'spla.norm', (['(K11 - K33)'], {}), '(K11 - K33)\n', (5307, 5318), True, 'import scipy.sparse.linalg as spla\n'), ((5320, 5340), 'scipy.sparse.linalg.norm', 'spla.norm', (['(K22 - K33)'], {}), '(K22 - K33)\n', (5329, 5340), True, 'import scipy.sparse.linalg as spla\n'), ((5348, 5368), 'scipy.sparse.linalg.norm', 'spla.norm', (['(K12 - K23)'], {}), '(K12 - K23)\n', (5357, 5368), True, 'import scipy.sparse.linalg as spla\n'), ((5370, 5390), 'scipy.sparse.linalg.norm', 'spla.norm', (['(K12 - K31)'], {}), '(K12 - K31)\n', (5379, 5390), True, 'import scipy.sparse.linalg as spla\n'), ((5392, 5412), 'scipy.sparse.linalg.norm', 'spla.norm', (['(K12 - K31)'], {}), '(K12 - K31)\n', (5401, 5412), True, 'import scipy.sparse.linalg as spla\n'), ((5420, 5440), 'scipy.sparse.linalg.norm', 'spla.norm', (['(K13 - K21)'], {}), '(K13 - K21)\n', (5429, 5440), True, 'import scipy.sparse.linalg as spla\n'), ((5442, 5462), 'scipy.sparse.linalg.norm', 'spla.norm', (['(K13 - K32)'], {}), '(K13 - K32)\n', (5451, 5462), True, 'import scipy.sparse.linalg as spla\n'), ((5464, 5484), 'scipy.sparse.linalg.norm', 'spla.norm', (['(K21 - K32)'], {}), '(K21 - K32)\n', (5473, 5484), True, 'import scipy.sparse.linalg as spla\n'), ((5593, 5605), 'scipy.linalg.norm', 'la.norm', (['tmp'], {}), '(tmp)\n', (5600, 5605), True, 'import scipy.linalg as la\n'), ((6384, 6395), 'numpy.sqrt', 'np.sqrt', (['R1'], {}), '(R1)\n', (6391, 6395), True, 'import numpy as np\n'), ((6449, 6461), 'numpy.sqrt', 'np.sqrt', (['Mh1'], {}), '(Mh1)\n', (6456, 6461), True, 'import numpy as np\n'), ((2519, 2551), 'scipy.sparse.bmat', 'sp.bmat', (['[[A, Q], [Q.T, dempty]]'], {}), '([[A, Q], [Q.T, dempty]])\n', (2526, 2551), True, 'import scipy.sparse as sp\n'), ((5679, 5694), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (5692, 5694), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((5739, 5754), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (5752, 5754), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((5799, 5814), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (5812, 5814), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((5859, 5874), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (5872, 5874), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((5919, 5934), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (5932, 5934), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((5979, 5994), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (5992, 5994), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((6039, 6054), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (6052, 6054), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((6099, 6114), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (6112, 6114), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((6159, 6174), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (6172, 6174), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((6219, 6234), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (6232, 6234), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((6278, 6293), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (6291, 6293), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((6332, 6347), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (6345, 6347), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((6397, 6412), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO', ([], {}), '()\n', (6410, 6412), False, 'from PetscBinaryIO import PetscBinaryIO\n'), ((2565, 2617), 'scipy.sparse.bmat', 'sp.bmat', (['[[B - 1.5 * N, oempty], [oempty.T, dempty]]'], {}), '([[B - 1.5 * N, oempty], [oempty.T, dempty]])\n', (2572, 2617), True, 'import scipy.sparse as sp\n'), ((2628, 2676), 'scipy.sparse.bmat', 'sp.bmat', (['[[0.5 * N, oempty], [oempty.T, dempty]]'], {}), '([[0.5 * N, oempty], [oempty.T, dempty]])\n', (2635, 2676), True, 'import scipy.sparse as sp\n'), ((4781, 4797), 'scipy.sparse.eye', 'sp.eye', (['(su + svp)'], {}), '(su + svp)\n', (4787, 4797), True, 'import scipy.sparse as sp\n'), ((4797, 4837), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['(su + svp, svp + 2 * sxe)'], {}), '((su + svp, svp + 2 * sxe))\n', (4810, 4837), True, 'import scipy.sparse as sp\n')] |
from ray.rllib.algorithms.algorithm import ( # noqa
Algorithm,
COMMON_CONFIG,
with_common_config,
)
from ray.rllib.utils.deprecation import deprecation_warning
deprecation_warning(old="rllib.agents::Trainer", new="rllib.algorithms::Algorithm")
# Alias.
Trainer = Algorithm
| [
"ray.rllib.utils.deprecation.deprecation_warning"
] | [((174, 262), 'ray.rllib.utils.deprecation.deprecation_warning', 'deprecation_warning', ([], {'old': '"""rllib.agents::Trainer"""', 'new': '"""rllib.algorithms::Algorithm"""'}), "(old='rllib.agents::Trainer', new=\n 'rllib.algorithms::Algorithm')\n", (193, 262), False, 'from ray.rllib.utils.deprecation import deprecation_warning\n')] |
"""Test log_negativity."""
import numpy as np
from toqito.state_props import log_negativity
def test_log_negativity_rho():
"""Test for log_negativity on rho."""
test_input_mat = np.array(
[[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 / 2]]
)
np.testing.assert_equal(np.isclose(log_negativity(test_input_mat), 1), True)
def test_log_negativity_rho_dim_int():
"""Test for log_negativity on rho."""
test_input_mat = np.array(
[[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 / 2]]
)
np.testing.assert_equal(np.isclose(log_negativity(test_input_mat, 2), 1), True)
def test_log_negativity_invalid_rho_dim_int():
"""Invalid dim parameters."""
with np.testing.assert_raises(ValueError):
test_input_mat = np.array(
[[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 / 2]]
)
log_negativity(test_input_mat, 5)
def test_log_negativity_invalid_rho_dim_vec():
"""Invalid dim parameters."""
with np.testing.assert_raises(ValueError):
test_input_mat = np.array(
[[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 / 2]]
)
log_negativity(test_input_mat, [2, 5])
if __name__ == "__main__":
np.testing.run_module_suite()
| [
"numpy.array",
"toqito.state_props.log_negativity",
"numpy.testing.assert_raises",
"numpy.testing.run_module_suite"
] | [((189, 275), 'numpy.array', 'np.array', (['[[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 / 2]]'], {}), '([[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 /\n 2]])\n', (197, 275), True, 'import numpy as np\n'), ((471, 557), 'numpy.array', 'np.array', (['[[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 / 2]]'], {}), '([[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 /\n 2]])\n', (479, 557), True, 'import numpy as np\n'), ((1294, 1323), 'numpy.testing.run_module_suite', 'np.testing.run_module_suite', ([], {}), '()\n', (1321, 1323), True, 'import numpy as np\n'), ((744, 780), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError'], {}), '(ValueError)\n', (768, 780), True, 'import numpy as np\n'), ((807, 893), 'numpy.array', 'np.array', (['[[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 / 2]]'], {}), '([[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 /\n 2]])\n', (815, 893), True, 'import numpy as np\n'), ((920, 953), 'toqito.state_props.log_negativity', 'log_negativity', (['test_input_mat', '(5)'], {}), '(test_input_mat, 5)\n', (934, 953), False, 'from toqito.state_props import log_negativity\n'), ((1046, 1082), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError'], {}), '(ValueError)\n', (1070, 1082), True, 'import numpy as np\n'), ((1109, 1195), 'numpy.array', 'np.array', (['[[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 / 2]]'], {}), '([[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 /\n 2]])\n', (1117, 1195), True, 'import numpy as np\n'), ((1222, 1260), 'toqito.state_props.log_negativity', 'log_negativity', (['test_input_mat', '[2, 5]'], {}), '(test_input_mat, [2, 5])\n', (1236, 1260), False, 'from toqito.state_props import log_negativity\n'), ((325, 355), 'toqito.state_props.log_negativity', 'log_negativity', (['test_input_mat'], {}), '(test_input_mat)\n', (339, 355), False, 'from toqito.state_props import log_negativity\n'), ((607, 640), 'toqito.state_props.log_negativity', 'log_negativity', (['test_input_mat', '(2)'], {}), '(test_input_mat, 2)\n', (621, 640), False, 'from toqito.state_props import log_negativity\n')] |
from setuptools import setup, find_packages
SRC_DIR = 'src'
def get_version():
import sys
sys.path[:0] = [SRC_DIR]
return __import__('dancing_bear').__version__
setup(
name='dancing-bear',
version=get_version(),
description='Dancing Bear',
author='mogproject',
author_email='<EMAIL>',
license='Apache 2.0 License',
url='https://github.com/mogproject/dancing-bear',
install_requires=[
'six',
'python-dateutil',
'pytz',
'tzlocal',
'argparse',
'pyserial',
'mog-commons >= 0.1.3',
],
tests_require=[
'unittest2',
],
package_dir={'': SRC_DIR},
packages=find_packages(SRC_DIR),
include_package_data=True,
test_suite='tests',
entry_points="""
[console_scripts]
dancing-bear = dancing_bear.dancing_bear:main
""",
)
| [
"setuptools.find_packages"
] | [((682, 704), 'setuptools.find_packages', 'find_packages', (['SRC_DIR'], {}), '(SRC_DIR)\n', (695, 704), False, 'from setuptools import setup, find_packages\n')] |
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras import layers
from tensorflow.keras import activations
def do_pad(pad_size, input_tensor, name_prefix):
if sum(pad_size) > 0:
input_tensor = layers.ZeroPadding2D(padding=pad_size, name=name_prefix + '_pad')(input_tensor)
return input_tensor
def get_conv1d(conv_parameters, is_depthwise, use_bias, input_tensor):
raise NotImplementedError('TBA')
def get_conv2d(conv_parameters, is_depthwise, use_bias, input_tensor):
weights = [conv_parameters['weights'][0].transpose(2, 3, 0, 1) if is_depthwise else
conv_parameters['weights'][0].transpose(2, 3, 1, 0)]
if use_bias:
weights.append(conv_parameters['weights'][1])
if is_depthwise:
input_tensor = layers.DepthwiseConv2D(kernel_size=conv_parameters['kernel_shape'],
strides=conv_parameters['strides'],
padding='valid',
use_bias=use_bias,
dilation_rate=conv_parameters['dilations'],
activation=None,
name=conv_parameters['name'],
weights=weights)(input_tensor)
else:
out_channels = weights[0].shape[-1]
input_tensor = layers.Conv2D(filters=out_channels,
kernel_size=conv_parameters['kernel_shape'],
strides=conv_parameters['strides'],
padding='valid',
use_bias=use_bias,
dilation_rate=conv_parameters['dilations'],
activation=None,
name=conv_parameters['name'],
weights=weights)(input_tensor)
return input_tensor
def get_conv3d(conv_parameters, is_depthwise, use_bias, input_tensor):
raise NotImplementedError('TBA')
def get_conv(op_params, op_name, input_tensor):
use_bias = len(op_params['weights']) > 1
is_depthwise = op_params['group'] > 1
input_tensor = do_pad(op_params['pads'], input_tensor, op_name)
# op_name = conv_parameters['op_type'].capitalize()
weights_size = len(op_params['weights'][0].shape)
if weights_size == 3:
# op_name += '1D'
input_tensor = get_conv1d(op_params, is_depthwise, use_bias, input_tensor)
elif weights_size == 4:
# op_name += '2D'
input_tensor = get_conv2d(op_params, is_depthwise, use_bias, input_tensor)
elif weights_size == 5:
# op_name += '3D'
input_tensor = get_conv3d(op_params, is_depthwise, use_bias, input_tensor)
return input_tensor
def get_dence(op_params, op_name, input_tensor):
weights = [w.transpose() for w in op_params['weights']]
use_bias = len(weights) > 1
out_features = weights[0].shape[1]
input_tensor = layers.Dense(units=out_features,
activation=None,
use_bias=use_bias,
name=op_name,
weights=weights,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None)(input_tensor)
return input_tensor
def get_flatten(op_params, op_name, input_tensor):
if input_tensor.shape.rank > 2:
input_tensor = layers.Flatten(axis=1, name=op_name)(input_tensor)
return input_tensor
def construct_maxpool(op_params, op_name, input_tensor):
if isinstance(op_params['padding'], tuple):
if sum(op_params['padding']) > 0:
input_tensor = layers.ZeroPadding2D(padding=op_params['padding'], name=op_name + '_pad')(input_tensor)
elif op_params['padding'] > 0:
input_tensor = layers.ZeroPadding2D(padding=op_params['padding'], name=op_name + '_pad')(input_tensor)
layers.MaxPool2D(pool_size=op_params.kernel_size, strides=op_params.stride, padding='valid')
return input_tensor
def get_global_avg_pool(op_params, op_name, input_tensor):
return layers.GlobalAvgPool2D(name=op_name)(input_tensor)
def get_maxpool(op_params, op_name, input_tensor):
input_tensor = do_pad(op_params['pads'], input_tensor, op_name)
input_tensor = layers.MaxPool2D(pool_size=op_params['kernel_shape'],
padding='valid',
strides=op_params['strides'])(input_tensor)
return input_tensor
def get_identity(torch_op, op_name, input_tensor):
return tf.identity(input_tensor)
def get_add(inp1, inp2):
x = layers.Add()([inp1, inp2])
return x
def get_concat(inp1, inp2, axis=-1):
x = layers.Concatenate(axis=axis)([inp1, inp2])
return x
def get_activation(inp, layer_properties):
activation_name = layer_properties['op_type']
kwargs = {}
values = layer_properties.get('values', None)
if values is not None:
kwargs = {'max_value': values[-1]}
if activation_name == 'clip':
activation_name = 'relu'
inp = getattr(activations, activation_name)(inp, **kwargs)
return inp
def get_upsample(op_params, op_name, input_tensor):
if np.all(op_params['scale'] >= 1):
input_tensor = layers.UpSampling2D(size=op_params['scale'], name=op_name,
interpolation=op_params['mode'])(input_tensor)
else:
size = (int(input_tensor.shape[1] * op_params['scale'][0]),
int(input_tensor.shape[2] * op_params['scale'][1]))
input_tensor = tf.image.resize(images=input_tensor,
size=size,
method=op_params['mode'],
name=op_name)
return input_tensor
tf_layers_constructors = {'conv': get_conv,
'gemm': get_dence,
'flatten': get_flatten,
'identity': get_identity,
'globalaveragepool': get_global_avg_pool,
'maxpool': get_maxpool,
'add': get_add,
'concat': get_concat,
'upsample': get_upsample,
}
def construct_layers(layers_parameters, connections, inputs):
return inputs
def construct_tf_model(parsed_model):
model_layers, model_connections = parsed_model.nodes, parsed_model.adj_list
input_name, input_size = list(parsed_model.model_input.items())[0]
output_names = list(parsed_model.model_output.keys())
# TODO: find out how to deal with N dimension because for keras Input it is redundant
input_size = [dim for dim in input_size if dim > 1]
# to HWC
input_size = input_size[1:] + input_size[:1]
model_inputs = layers.Input(shape=input_size, name=input_name)
model_outputs = list()
x = model_inputs
last_node_output = model_layers[0]['output']
skip_connections = dict()
for layer in model_layers:
layer_name = layer['name']
layer_id = layer['node_id']
if not np.any(np.isin(last_node_output, layer['input'])):
if layer_id in skip_connections.keys():
skip = x
skip_connections[model_connections[layer_id - 1][0]] = skip
x = skip_connections.pop(layer_id)
if len(layer['input']) > 1:
x = tf_layers_constructors[layer['op_type']](x, skip_connections.pop(layer_id))
else:
if layer['op_type'] in tf_layers_constructors:
x = tf_layers_constructors[layer['op_type']](op_params=layer, op_name=layer_name, input_tensor=x)
else:
x = get_activation(x, layer)
if len(model_connections[layer_id]) > 1:
for connect in model_connections[layer_id][1:]:
skip = x
skip_connections[connect] = skip
if layer['output'][0] in output_names:
model_outputs.append(x)
x = skip_connections.get(layer_id + 1, None)
last_node_output = layer['output']
model = Model(inputs=model_inputs, outputs=model_outputs)
return model
def to_tflite(tf_model_name, tflite_optimization=None):
converter = tf.lite.TFLiteConverter.from_saved_model(tf_model_name)
if tflite_optimization is not None:
if 'optimizations' in tflite_optimization:
converter.optimizations = tflite_optimization['optimizations']
print("TFLite optimizations: ", converter.optimizations)
if 'target_spec.supported_types' in tflite_optimization:
converter.target_spec.supported_types = tflite_optimization['target_spec.supported_types']
print("TFLite target_spec.supported_types: ", converter.target_spec.supported_types)
if 'representative_dataset' in tflite_optimization:
converter.representative_dataset = tflite_optimization['representative_dataset']
print("Assigned representative_dataset")
tflite_model = converter.convert()
# Save the model.
tflite_model_name = tf_model_name.rpartition('.')[0] + '.tflite'
with open(tflite_model_name, 'wb') as f:
f.write(tflite_model)
| [
"tensorflow.keras.layers.Input",
"tensorflow.lite.TFLiteConverter.from_saved_model",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.Conv2D",
"tensorflow.image.resize",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.GlobalAvgPool2D",
"tensorflow.keras.layers.Add",
"numpy.isin",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.ZeroPadding2D",
"tensorflow.keras.Model",
"tensorflow.identity",
"numpy.all",
"tensorflow.keras.layers.DepthwiseConv2D",
"tensorflow.keras.layers.MaxPool2D"
] | [((4280, 4376), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {'pool_size': 'op_params.kernel_size', 'strides': 'op_params.stride', 'padding': '"""valid"""'}), "(pool_size=op_params.kernel_size, strides=op_params.stride,\n padding='valid')\n", (4296, 4376), False, 'from tensorflow.keras import layers\n'), ((4935, 4960), 'tensorflow.identity', 'tf.identity', (['input_tensor'], {}), '(input_tensor)\n', (4946, 4960), True, 'import tensorflow as tf\n'), ((5577, 5608), 'numpy.all', 'np.all', (["(op_params['scale'] >= 1)"], {}), "(op_params['scale'] >= 1)\n", (5583, 5608), True, 'import numpy as np\n'), ((7220, 7267), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'input_size', 'name': 'input_name'}), '(shape=input_size, name=input_name)\n', (7232, 7267), False, 'from tensorflow.keras import layers\n'), ((8523, 8572), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'model_inputs', 'outputs': 'model_outputs'}), '(inputs=model_inputs, outputs=model_outputs)\n', (8528, 8572), False, 'from tensorflow.keras import Model\n'), ((8664, 8719), 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['tf_model_name'], {}), '(tf_model_name)\n', (8704, 8719), True, 'import tensorflow as tf\n'), ((3132, 3366), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'out_features', 'activation': 'None', 'use_bias': 'use_bias', 'name': 'op_name', 'weights': 'weights', 'kernel_regularizer': 'None', 'bias_regularizer': 'None', 'activity_regularizer': 'None', 'kernel_constraint': 'None', 'bias_constraint': 'None'}), '(units=out_features, activation=None, use_bias=use_bias, name=\n op_name, weights=weights, kernel_regularizer=None, bias_regularizer=\n None, activity_regularizer=None, kernel_constraint=None,\n bias_constraint=None)\n', (3144, 3366), False, 'from tensorflow.keras import layers\n'), ((4469, 4505), 'tensorflow.keras.layers.GlobalAvgPool2D', 'layers.GlobalAvgPool2D', ([], {'name': 'op_name'}), '(name=op_name)\n', (4491, 4505), False, 'from tensorflow.keras import layers\n'), ((4660, 4764), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {'pool_size': "op_params['kernel_shape']", 'padding': '"""valid"""', 'strides': "op_params['strides']"}), "(pool_size=op_params['kernel_shape'], padding='valid',\n strides=op_params['strides'])\n", (4676, 4764), False, 'from tensorflow.keras import layers\n'), ((4996, 5008), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (5006, 5008), False, 'from tensorflow.keras import layers\n'), ((5083, 5112), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': 'axis'}), '(axis=axis)\n', (5101, 5112), False, 'from tensorflow.keras import layers\n'), ((5951, 6042), 'tensorflow.image.resize', 'tf.image.resize', ([], {'images': 'input_tensor', 'size': 'size', 'method': "op_params['mode']", 'name': 'op_name'}), "(images=input_tensor, size=size, method=op_params['mode'],\n name=op_name)\n", (5966, 6042), True, 'import tensorflow as tf\n'), ((256, 321), 'tensorflow.keras.layers.ZeroPadding2D', 'layers.ZeroPadding2D', ([], {'padding': 'pad_size', 'name': "(name_prefix + '_pad')"}), "(padding=pad_size, name=name_prefix + '_pad')\n", (276, 321), False, 'from tensorflow.keras import layers\n'), ((815, 1076), 'tensorflow.keras.layers.DepthwiseConv2D', 'layers.DepthwiseConv2D', ([], {'kernel_size': "conv_parameters['kernel_shape']", 'strides': "conv_parameters['strides']", 'padding': '"""valid"""', 'use_bias': 'use_bias', 'dilation_rate': "conv_parameters['dilations']", 'activation': 'None', 'name': "conv_parameters['name']", 'weights': 'weights'}), "(kernel_size=conv_parameters['kernel_shape'], strides\n =conv_parameters['strides'], padding='valid', use_bias=use_bias,\n dilation_rate=conv_parameters['dilations'], activation=None, name=\n conv_parameters['name'], weights=weights)\n", (837, 1076), False, 'from tensorflow.keras import layers\n'), ((1476, 1749), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': 'out_channels', 'kernel_size': "conv_parameters['kernel_shape']", 'strides': "conv_parameters['strides']", 'padding': '"""valid"""', 'use_bias': 'use_bias', 'dilation_rate': "conv_parameters['dilations']", 'activation': 'None', 'name': "conv_parameters['name']", 'weights': 'weights'}), "(filters=out_channels, kernel_size=conv_parameters[\n 'kernel_shape'], strides=conv_parameters['strides'], padding='valid',\n use_bias=use_bias, dilation_rate=conv_parameters['dilations'],\n activation=None, name=conv_parameters['name'], weights=weights)\n", (1489, 1749), False, 'from tensorflow.keras import layers\n'), ((3791, 3827), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {'axis': '(1)', 'name': 'op_name'}), '(axis=1, name=op_name)\n', (3805, 3827), False, 'from tensorflow.keras import layers\n'), ((5633, 5729), 'tensorflow.keras.layers.UpSampling2D', 'layers.UpSampling2D', ([], {'size': "op_params['scale']", 'name': 'op_name', 'interpolation': "op_params['mode']"}), "(size=op_params['scale'], name=op_name, interpolation=\n op_params['mode'])\n", (5652, 5729), False, 'from tensorflow.keras import layers\n'), ((4042, 4115), 'tensorflow.keras.layers.ZeroPadding2D', 'layers.ZeroPadding2D', ([], {'padding': "op_params['padding']", 'name': "(op_name + '_pad')"}), "(padding=op_params['padding'], name=op_name + '_pad')\n", (4062, 4115), False, 'from tensorflow.keras import layers\n'), ((4188, 4261), 'tensorflow.keras.layers.ZeroPadding2D', 'layers.ZeroPadding2D', ([], {'padding': "op_params['padding']", 'name': "(op_name + '_pad')"}), "(padding=op_params['padding'], name=op_name + '_pad')\n", (4208, 4261), False, 'from tensorflow.keras import layers\n'), ((7519, 7560), 'numpy.isin', 'np.isin', (['last_node_output', "layer['input']"], {}), "(last_node_output, layer['input'])\n", (7526, 7560), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import datetime as dt
from flask.ext.login import UserMixin
from octopus.extensions import bcrypt
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import backref
from octopus.database import (
Column,
db,
Model,
ReferenceCol,
relationship,
SurrogatePK
)
# ___________ user section __________
class Role(SurrogatePK, Model):
__tablename__ = 'roles'
name = Column(db.String(80), unique=True, nullable=False)
user_id = ReferenceCol('users', nullable=True)
user = relationship('User', backref='roles')
def __init__(self, name, **kwargs):
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, SurrogatePK, Model):
__tablename__ = 'users'
username = Column(db.String(80), unique=True, nullable=False, index=True)
email = Column(db.String(80), unique=True, nullable=False)
password = Column(db.String(128), nullable=True)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
contract = Column(db.String(20), default='None', nullable=True)
# ref to case staff table
user_cases = db.relationship('CaseStaffMap',
cascade="all, delete-orphan",
backref='users')
cases = association_proxy('user_cases', 'cases')
# two types of task refs
created_tasks = relationship('Task',
backref='creator',
lazy='dynamic')
tasks = relationship('Task', secondary="task_user_map",
backref=db.backref('users', lazy='dynamic'))
# project refs
projects = relationship('Project', secondary="project_user_map",
backref=db.backref('users', lazy='dynamic'))
def __init__(self, username, email, password=None, **kwargs):
db.Model.__init__(self, username=username, email=email, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
return bcrypt.check_password_hash(self.password, value)
@property
def is_permanent(self):
return True if self.contract == 'permanent' else False
@property
def is_contractor(self):
return True if self.contract == 'contractor' else False
@property
def is_manager(self):
return True if self.contract == 'manager' else False
@property
def full_name(self):
if self.first_name and self.last_name:
return "{0} {1}".format(self.first_name, self.last_name)
else:
return self.username
def __repr__(self):
return '<User({username!r})>'.format(username=self.username)
# ____________case section_____________
class CaseType(SurrogatePK, Model):
__tablename__ = 'case_types'
code = Column(db.String(15), unique=True, nullable=False, index=True)
description = Column(db.String(80), nullable=False, index=True)
def __init__(self, name, **kwargs):
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
return '<CaseType (code={code}, description={desc})>'.format(
code=self.code, desc=self.description)
class CaseStatus(SurrogatePK, Model):
__tablename__ = 'case_statuses'
type = Column(db.String(32), unique=True, nullable=True, index=True)
def __init__(self, name, **kwargs):
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
return '<CaseStatus (type={type}>'.format(
type=self.type)
class Region(SurrogatePK, Model):
__tablename__ = 'regions'
code = Column(db.String(4), unique=True, nullable=False)
name = Column(db.String(80), unique=True, nullable=False)
address = Column(db.String(120), unique=False, nullable=False)
city = Column(db.String(80), unique=False, nullable=False)
state = Column(db.String(2), unique=False, nullable=False)
zip = Column(db.String(15), unique=True, nullable=False)
phone = Column(db.String(15), unique=True, nullable=False)
def __init__(self, **kwargs):
db.Model.__init__(self, **kwargs)
def __repr__(self):
return '<Region(code={code})>'.format(code=self.code)
class Tag(SurrogatePK, Model):
__tablename__ = 'tags'
kind = Column(db.Text(), nullable=False, index=True)
tag = Column(db.Text(), nullable=False, index=True)
def __init__(self, **kwargs):
db.Model.__init__(self, **kwargs)
def __repr__(self):
return '<Tag(id={id}, kind={kind}, tag={tag})>'.format(
tag=self.tag, id=self.id, kind=self.kind)
case_tag_map = db.Table('case_tag_map',
db.Column('tag_id', db.Integer,
db.ForeignKey('tags.id'), index=True),
db.Column('case_id', db.Integer,
db.ForeignKey('cases.id'), index=True))
class CaseStaffMap(SurrogatePK, Model):
__tablename__ = 'case_staff_map'
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), index=True)
case_id = db.Column(db.Integer, db.ForeignKey('cases.id'), index=True)
primary = db.Column(db.Boolean, default=False)
secondary = db.Column(db.Boolean, default=False)
case = db.relationship('Case',
backref=backref("user_cases",
cascade="all, delete-orphan"))
user = db.relationship('User')
def __init__(self, user=None, case=None, primary=False,
secondary=False, **kwargs):
db.Model.__init__(self, **kwargs)
self.user = user
self.case = case
self.primary = primary
self.secondary = secondary
def __repr__(self):
return '<CaseStaffMap(id={id}, user_id={user_id}, ' \
'case_id={case_id}, ' \
'primary={primary}, sec={secondary})>' \
.format(id=self.id, user_id=self.user_id, case_id=self.case_id,
primary=self.primary, secondary=self.secondary)
class CaseFile(SurrogatePK, Model):
__tablename__ = 'case_files'
kind = db.Column(db.Text(), unique=False)
name = db.Column(db.Text(), unique=False)
path = db.Column(db.Text(), unique=False)
attributes = db.Column(db.Text(), unique=False, nullable=True)
case_id = ReferenceCol('cases', nullable=False)
def __init__(self, *args, **kwargs):
db.Model.__init__(self, *args, **kwargs)
def __repr__(self):
return '<CaseFile(id={id}, kind={kind}, name={name})>'.format(
id=self.id, kind=self.kind, name=self.name)
class Case(SurrogatePK, Model):
__tablename__ = 'cases'
crd_number = Column(db.Text(), unique=False, nullable=False, index=True)
case_name = Column(db.Text(), unique=False, nullable=True, index=True)
case_desc = Column(db.Text(), unique=False, nullable=True)
start_date = Column(db.Date(), unique=False, nullable=False, index=True)
end_date = Column(db.Date(), unique=False, nullable=True, index=True)
case_type_id = ReferenceCol('case_types', nullable=False)
case_type = relationship('CaseType', backref='case_types')
case_status_id = ReferenceCol('case_statuses', nullable=True,
colname='case_status_id', parent_table='cases')
case_status = relationship('CaseStatus', backref='case_statuses')
region_id = ReferenceCol('regions', nullable=False)
region = relationship('Region', backref='regions')
users = association_proxy('user_cases', 'user')
mars_risk_score = Column(db.Integer, unique=False, nullable=True)
qau_risk_score = Column(db.Integer, unique=False, nullable=True)
examiner_risk_score = Column(db.Integer, unique=False, nullable=True)
tags = relationship('Tag', secondary=case_tag_map,
backref=db.backref('cases', lazy='dynamic'))
files = relationship('CaseFile', backref='case_files')
tasks = relationship('Task', backref='case_tasks')
def __init__(self, *args, **kwargs):
db.Model.__init__(self, *args, **kwargs)
def get_tags(self, kind=None):
"""
Get a case's tags, class must be initialized first
:param kind: risk | non_qau_staff | None
:return: list of unicode tags according to the specified kind
"""
if kind:
return [i.tag for i in self.tags if i.kind == kind]
else:
return [i.tag for i in self.tags]
def __repr__(self):
return '<Case(id={id}, case_name={case_name}, )>'.format(
id=self.id, case_name=self.case_name)
# _________ task section ______________
task_user_map = db.Table('task_user_map',
db.Column('user_id', db.Integer,
db.ForeignKey('users.id'),
index=True),
db.Column('task_id', db.Integer,
db.ForeignKey('tasks.id'),
index=True))
class Task(SurrogatePK, Model):
__tablename__ = 'tasks'
task_name = Column(db.Text(), unique=False, nullable=True, index=True)
task_desc = Column(db.Text(), unique=False, nullable=True)
start_date = Column(db.Date(), unique=False, nullable=False, index=True)
end_date = Column(db.Date(), unique=False, nullable=True, index=True)
# one-to-many user to tasks
creator_id = db.Column('creator_id', db.Integer, db.ForeignKey('users.id'))
# ref to optional associated case
case_id = db.Column('case_id', db.Integer,
db.ForeignKey('cases.id'), nullable=True)
# many-to-many users to tasks
assignees = relationship('User', secondary=task_user_map,
backref=db.backref('user_tasks', lazy='dynamic'))
# ref to optional associated project
project_id = db.Column('project_id', db.Integer,
db.ForeignKey('projects.id'), nullable=True)
def __init__(self, *args, **kwargs):
db.Model.__init__(self, *args, **kwargs)
def __repr__(self):
return '<Task(id={id}, task_name={task_name}, )>'.format(
id=self.id, task_name=self.task_name)
# ________project section___________
project_user_map = db.Table('project_user_map',
db.Column('user_id', db.Integer,
db.ForeignKey('users.id'),
index=True),
db.Column('project_id', db.Integer,
db.ForeignKey('projects.id'),
index=True))
class Project(SurrogatePK, Model):
__tablename__ = 'projects'
project_name = Column(db.Text(), unique=False, nullable=True, index=True)
project_desc = Column(db.Text(), unique=False, nullable=True)
start_date = Column(db.Date(), unique=False, nullable=False, index=True)
end_date = Column(db.Date(), unique=False, nullable=True, index=True)
members = relationship('User', secondary=project_user_map,
backref=db.backref('user_projects', lazy='dynamic'))
tasks = relationship('Task', backref='project_tasks')
def __init__(self, *args, **kwargs):
db.Model.__init__(self, *args, **kwargs)
def __repr__(self):
return '<Project(id={id}, project_name={project_name}, )>'.format(
id=self.id, project_name=self.project_name) | [
"sqlalchemy.ext.associationproxy.association_proxy",
"octopus.database.Column",
"octopus.database.relationship",
"octopus.database.db.Column",
"octopus.database.db.Boolean",
"octopus.extensions.bcrypt.check_password_hash",
"sqlalchemy.orm.backref",
"octopus.database.db.relationship",
"octopus.database.db.Model.__init__",
"octopus.extensions.bcrypt.generate_password_hash",
"octopus.database.db.ForeignKey",
"octopus.database.db.Date",
"octopus.database.db.Text",
"octopus.database.ReferenceCol",
"octopus.database.db.backref",
"octopus.database.db.String"
] | [((496, 532), 'octopus.database.ReferenceCol', 'ReferenceCol', (['"""users"""'], {'nullable': '(True)'}), "('users', nullable=True)\n", (508, 532), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((542, 579), 'octopus.database.relationship', 'relationship', (['"""User"""'], {'backref': '"""roles"""'}), "('User', backref='roles')\n", (554, 579), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((1016, 1079), 'octopus.database.Column', 'Column', (['db.DateTime'], {'nullable': '(False)', 'default': 'dt.datetime.utcnow'}), '(db.DateTime, nullable=False, default=dt.datetime.utcnow)\n', (1022, 1079), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((1389, 1467), 'octopus.database.db.relationship', 'db.relationship', (['"""CaseStaffMap"""'], {'cascade': '"""all, delete-orphan"""', 'backref': '"""users"""'}), "('CaseStaffMap', cascade='all, delete-orphan', backref='users')\n", (1404, 1467), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((1540, 1580), 'sqlalchemy.ext.associationproxy.association_proxy', 'association_proxy', (['"""user_cases"""', '"""cases"""'], {}), "('user_cases', 'cases')\n", (1557, 1580), False, 'from sqlalchemy.ext.associationproxy import association_proxy\n'), ((1627, 1682), 'octopus.database.relationship', 'relationship', (['"""Task"""'], {'backref': '"""creator"""', 'lazy': '"""dynamic"""'}), "('Task', backref='creator', lazy='dynamic')\n", (1639, 1682), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((5347, 5383), 'octopus.database.db.Column', 'db.Column', (['db.Boolean'], {'default': '(False)'}), '(db.Boolean, default=False)\n', (5356, 5383), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((5398, 5434), 'octopus.database.db.Column', 'db.Column', (['db.Boolean'], {'default': '(False)'}), '(db.Boolean, default=False)\n', (5407, 5434), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((5605, 5628), 'octopus.database.db.relationship', 'db.relationship', (['"""User"""'], {}), "('User')\n", (5620, 5628), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((6447, 6484), 'octopus.database.ReferenceCol', 'ReferenceCol', (['"""cases"""'], {'nullable': '(False)'}), "('cases', nullable=False)\n", (6459, 6484), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7144, 7186), 'octopus.database.ReferenceCol', 'ReferenceCol', (['"""case_types"""'], {'nullable': '(False)'}), "('case_types', nullable=False)\n", (7156, 7186), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7201, 7247), 'octopus.database.relationship', 'relationship', (['"""CaseType"""'], {'backref': '"""case_types"""'}), "('CaseType', backref='case_types')\n", (7213, 7247), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7268, 7364), 'octopus.database.ReferenceCol', 'ReferenceCol', (['"""case_statuses"""'], {'nullable': '(True)', 'colname': '"""case_status_id"""', 'parent_table': '"""cases"""'}), "('case_statuses', nullable=True, colname='case_status_id',\n parent_table='cases')\n", (7280, 7364), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7409, 7460), 'octopus.database.relationship', 'relationship', (['"""CaseStatus"""'], {'backref': '"""case_statuses"""'}), "('CaseStatus', backref='case_statuses')\n", (7421, 7460), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7476, 7515), 'octopus.database.ReferenceCol', 'ReferenceCol', (['"""regions"""'], {'nullable': '(False)'}), "('regions', nullable=False)\n", (7488, 7515), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7527, 7568), 'octopus.database.relationship', 'relationship', (['"""Region"""'], {'backref': '"""regions"""'}), "('Region', backref='regions')\n", (7539, 7568), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7580, 7619), 'sqlalchemy.ext.associationproxy.association_proxy', 'association_proxy', (['"""user_cases"""', '"""user"""'], {}), "('user_cases', 'user')\n", (7597, 7619), False, 'from sqlalchemy.ext.associationproxy import association_proxy\n'), ((7641, 7688), 'octopus.database.Column', 'Column', (['db.Integer'], {'unique': '(False)', 'nullable': '(True)'}), '(db.Integer, unique=False, nullable=True)\n', (7647, 7688), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7708, 7755), 'octopus.database.Column', 'Column', (['db.Integer'], {'unique': '(False)', 'nullable': '(True)'}), '(db.Integer, unique=False, nullable=True)\n', (7714, 7755), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7780, 7827), 'octopus.database.Column', 'Column', (['db.Integer'], {'unique': '(False)', 'nullable': '(True)'}), '(db.Integer, unique=False, nullable=True)\n', (7786, 7827), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7959, 8005), 'octopus.database.relationship', 'relationship', (['"""CaseFile"""'], {'backref': '"""case_files"""'}), "('CaseFile', backref='case_files')\n", (7971, 8005), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((8017, 8059), 'octopus.database.relationship', 'relationship', (['"""Task"""'], {'backref': '"""case_tasks"""'}), "('Task', backref='case_tasks')\n", (8029, 8059), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((11129, 11174), 'octopus.database.relationship', 'relationship', (['"""Task"""'], {'backref': '"""project_tasks"""'}), "('Task', backref='project_tasks')\n", (11141, 11174), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((440, 453), 'octopus.database.db.String', 'db.String', (['(80)'], {}), '(80)\n', (449, 453), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((623, 667), 'octopus.database.db.Model.__init__', 'db.Model.__init__', (['self'], {'name': 'name'}), '(self, name=name, **kwargs)\n', (640, 667), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((833, 846), 'octopus.database.db.String', 'db.String', (['(80)'], {}), '(80)\n', (842, 846), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((906, 919), 'octopus.database.db.String', 'db.String', (['(80)'], {}), '(80)\n', (915, 919), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((970, 984), 'octopus.database.db.String', 'db.String', (['(128)'], {}), '(128)\n', (979, 984), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((1102, 1115), 'octopus.database.db.String', 'db.String', (['(30)'], {}), '(30)\n', (1111, 1115), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((1153, 1166), 'octopus.database.db.String', 'db.String', (['(30)'], {}), '(30)\n', (1162, 1166), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((1201, 1213), 'octopus.database.db.Boolean', 'db.Boolean', ([], {}), '()\n', (1211, 1213), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((1250, 1262), 'octopus.database.db.Boolean', 'db.Boolean', ([], {}), '()\n', (1260, 1262), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((1299, 1312), 'octopus.database.db.String', 'db.String', (['(20)'], {}), '(20)\n', (1308, 1312), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((2097, 2162), 'octopus.database.db.Model.__init__', 'db.Model.__init__', (['self'], {'username': 'username', 'email': 'email'}), '(self, username=username, email=email, **kwargs)\n', (2114, 2162), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((2308, 2347), 'octopus.extensions.bcrypt.generate_password_hash', 'bcrypt.generate_password_hash', (['password'], {}), '(password)\n', (2337, 2347), False, 'from octopus.extensions import bcrypt\n'), ((2395, 2443), 'octopus.extensions.bcrypt.check_password_hash', 'bcrypt.check_password_hash', (['self.password', 'value'], {}), '(self.password, value)\n', (2421, 2443), False, 'from octopus.extensions import bcrypt\n'), ((3130, 3143), 'octopus.database.db.String', 'db.String', (['(15)'], {}), '(15)\n', (3139, 3143), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((3209, 3222), 'octopus.database.db.String', 'db.String', (['(80)'], {}), '(80)\n', (3218, 3222), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((3295, 3339), 'octopus.database.db.Model.__init__', 'db.Model.__init__', (['self'], {'name': 'name'}), '(self, name=name, **kwargs)\n', (3312, 3339), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((3564, 3577), 'octopus.database.db.String', 'db.String', (['(32)'], {}), '(32)\n', (3573, 3577), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((3662, 3706), 'octopus.database.db.Model.__init__', 'db.Model.__init__', (['self'], {'name': 'name'}), '(self, name=name, **kwargs)\n', (3679, 3706), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((3879, 3891), 'octopus.database.db.String', 'db.String', (['(4)'], {}), '(4)\n', (3888, 3891), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((3938, 3951), 'octopus.database.db.String', 'db.String', (['(80)'], {}), '(80)\n', (3947, 3951), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((4001, 4015), 'octopus.database.db.String', 'db.String', (['(120)'], {}), '(120)\n', (4010, 4015), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((4063, 4076), 'octopus.database.db.String', 'db.String', (['(80)'], {}), '(80)\n', (4072, 4076), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((4125, 4137), 'octopus.database.db.String', 'db.String', (['(2)'], {}), '(2)\n', (4134, 4137), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((4184, 4197), 'octopus.database.db.String', 'db.String', (['(15)'], {}), '(15)\n', (4193, 4197), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((4245, 4258), 'octopus.database.db.String', 'db.String', (['(15)'], {}), '(15)\n', (4254, 4258), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((4326, 4359), 'octopus.database.db.Model.__init__', 'db.Model.__init__', (['self'], {}), '(self, **kwargs)\n', (4343, 4359), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((4515, 4524), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (4522, 4524), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((4569, 4578), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (4576, 4578), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((4645, 4678), 'octopus.database.db.Model.__init__', 'db.Model.__init__', (['self'], {}), '(self, **kwargs)\n', (4662, 4678), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((4942, 4966), 'octopus.database.db.ForeignKey', 'db.ForeignKey', (['"""tags.id"""'], {}), "('tags.id')\n", (4955, 4966), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((5072, 5097), 'octopus.database.db.ForeignKey', 'db.ForeignKey', (['"""cases.id"""'], {}), "('cases.id')\n", (5085, 5097), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((5223, 5248), 'octopus.database.db.ForeignKey', 'db.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (5236, 5248), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((5296, 5321), 'octopus.database.db.ForeignKey', 'db.ForeignKey', (['"""cases.id"""'], {}), "('cases.id')\n", (5309, 5321), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((5735, 5768), 'octopus.database.db.Model.__init__', 'db.Model.__init__', (['self'], {}), '(self, **kwargs)\n', (5752, 5768), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((6257, 6266), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (6264, 6266), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((6301, 6310), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (6308, 6310), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((6345, 6354), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (6352, 6354), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((6395, 6404), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (6402, 6404), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((6529, 6569), 'octopus.database.db.Model.__init__', 'db.Model.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (6546, 6569), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((6792, 6801), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (6799, 6801), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((6866, 6875), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (6873, 6875), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((6939, 6948), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (6946, 6948), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7001, 7010), 'octopus.database.db.Date', 'db.Date', ([], {}), '()\n', (7008, 7010), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((7074, 7083), 'octopus.database.db.Date', 'db.Date', ([], {}), '()\n', (7081, 7083), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((8104, 8144), 'octopus.database.db.Model.__init__', 'db.Model.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (8121, 8144), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((8791, 8816), 'octopus.database.db.ForeignKey', 'db.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (8804, 8816), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((8959, 8984), 'octopus.database.db.ForeignKey', 'db.ForeignKey', (['"""tasks.id"""'], {}), "('tasks.id')\n", (8972, 8984), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((9115, 9124), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (9122, 9124), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((9188, 9197), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (9195, 9197), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((9250, 9259), 'octopus.database.db.Date', 'db.Date', ([], {}), '()\n', (9257, 9259), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((9323, 9332), 'octopus.database.db.Date', 'db.Date', ([], {}), '()\n', (9330, 9332), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((9457, 9482), 'octopus.database.db.ForeignKey', 'db.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (9470, 9482), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((9588, 9613), 'octopus.database.db.ForeignKey', 'db.ForeignKey', (['"""cases.id"""'], {}), "('cases.id')\n", (9601, 9613), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((9916, 9944), 'octopus.database.db.ForeignKey', 'db.ForeignKey', (['"""projects.id"""'], {}), "('projects.id')\n", (9929, 9944), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((10005, 10045), 'octopus.database.db.Model.__init__', 'db.Model.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (10022, 10045), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((10362, 10387), 'octopus.database.db.ForeignKey', 'db.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (10375, 10387), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((10542, 10570), 'octopus.database.db.ForeignKey', 'db.ForeignKey', (['"""projects.id"""'], {}), "('projects.id')\n", (10555, 10570), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((10713, 10722), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (10720, 10722), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((10789, 10798), 'octopus.database.db.Text', 'db.Text', ([], {}), '()\n', (10796, 10798), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((10851, 10860), 'octopus.database.db.Date', 'db.Date', ([], {}), '()\n', (10858, 10860), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((10924, 10933), 'octopus.database.db.Date', 'db.Date', ([], {}), '()\n', (10931, 10933), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((11219, 11259), 'octopus.database.db.Model.__init__', 'db.Model.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (11236, 11259), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((1835, 1870), 'octopus.database.db.backref', 'db.backref', (['"""users"""'], {'lazy': '"""dynamic"""'}), "('users', lazy='dynamic')\n", (1845, 1870), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((1991, 2026), 'octopus.database.db.backref', 'db.backref', (['"""users"""'], {'lazy': '"""dynamic"""'}), "('users', lazy='dynamic')\n", (2001, 2026), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((5502, 5553), 'sqlalchemy.orm.backref', 'backref', (['"""user_cases"""'], {'cascade': '"""all, delete-orphan"""'}), "('user_cases', cascade='all, delete-orphan')\n", (5509, 5553), False, 'from sqlalchemy.orm import backref\n'), ((7911, 7946), 'octopus.database.db.backref', 'db.backref', (['"""cases"""'], {'lazy': '"""dynamic"""'}), "('cases', lazy='dynamic')\n", (7921, 7946), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((9758, 9798), 'octopus.database.db.backref', 'db.backref', (['"""user_tasks"""'], {'lazy': '"""dynamic"""'}), "('user_tasks', lazy='dynamic')\n", (9768, 9798), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n'), ((11073, 11116), 'octopus.database.db.backref', 'db.backref', (['"""user_projects"""'], {'lazy': '"""dynamic"""'}), "('user_projects', lazy='dynamic')\n", (11083, 11116), False, 'from octopus.database import Column, db, Model, ReferenceCol, relationship, SurrogatePK\n')] |
#imports several modules, submodules and functions
from tkinter import *
from tkinter import messagebox
from time import sleep
from pygame import mixer
from random import randint
from ast import literal_eval
#creates the listening test class
class ListenApp(Frame):
#initiates the class's initial properties
def __init__(self,master):
super(ListenApp,self).__init__(master)
self.master.protocol("WM_DELETE_WINDOW", self.confirmclosure)
self.master.iconbitmap('aclogo.ico')
mixer.init() #initiates the mixer submodule from pygame
self.pack()
self.tries = []
self.used = []
self.resultlab = []
self.play = True
self.max = 3
self.attempts = 0
self.correct = False
self.right = 0
self.creation()
#confirms whether the program will be closed when the top-right close button is clicked
def confirmclosure(self):
if messagebox.askokcancel("Quitting the program","Are you sure you want to close this test? Data entered will NEITHER be saved NOR be considered in the final assessment results!"):
self.master.destroy()
#creates the actual window and widgets for the listening test
def creation(self):
self.lab1 = Label(self, text="Play the recording, then discern the text being said.")
self.lab1.pack()
self.lab2 = Label(self, text="Attempt count: {0} of {1}".format(self.attempts,self.max))
self.lab2.pack()
self.bot1 = Button(self, text="Play Now", command=self.playaudio)
self.bot1.pack()
self.widge = Entry(self, width=40)
self.widge.pack()
self.bot2 = Button(self, text="Enter", command=self.correction)
self.bot2.pack()
self.lab3 = Label(self, text="")
self.lab3.pack()
#chooses which audio files to be played
def playaudio(self):
if self.play and self.attempts < self.max:
self.lab3.configure(text="")
self.pick = randint(1,22)
if self.pick not in self.used:
self.used.append(self.pick)
else:
self.playaudio()
self.play = False
self.audio = str(self.pick)+".ogg"
mixer.music.load(self.audio)
mixer.music.play()
#checks to see if the response is correct, and then, if all the attempts are exhausted, the end results are compiled
def correction(self):
if self.attempts < self.max:
self.attempts += 1
self.play = True
self.check = self.widge.get().lower().strip()
self.dicto = dict([('audio',self.pick),('text',self.check)])
self.tries.append(self.dicto)
with open("Audios.txt",mode='r') as reader:
for line in reader:
if str(line) == str(self.dicto)+"\n":
self.correct = True
self.lab2.configure(text="Attempt count: {0} of {1}".format(self.attempts,self.max))
if self.correct:
self.lab3.configure(text="That is correct.")
self.right += 1
else:
self.lab3.configure(text="That is incorrect.")
self.widge.delete(0, 'end')
self.correct = False
if self.attempts == self.max:
self.end()
#compiles the end results into a list of strings
def end(self):
Label(self,text="").pack()
Label(self,text="Total correct attempts: {0} of {1}".format(self.right,self.attempts)).pack()
Label(self,text="").pack()
self.iter = 0
for info in self.tries:
with open("Audios.txt",mode='r') as reader:
for line in reader:
store = literal_eval(line)
if store["audio"] == info["audio"]:
self.iter += 1
if store["text"] == info["text"]:
axe = "Your response was correct"
else:
axe = "Your response was incorrect"
self.resultlab.append("Attempt {0}\nYour Answer: {1}\nThe Correct Answer: {2}\n{3}\n".format(self.iter, info["text"], store["text"],axe))
Label(self,text=self.resultlab[self.iter-1]).pack()
self.finish = Button(self,text="Quit",command=self.kill)
self.finish.pack()
#ends the program
def kill(self):
self.master.destroy()
#takes the data in resultlab and formats them into one string
def file(self):
self.endreturn = ""
for i in self.resultlab:
self.endreturn += i
return self.endreturn
#loads the Listening test function (NO WEBCAM)
def Listen():
root = Tk()
root.title("Listening Test (NO WEBCAM)")
root.geometry("500x500")
listen = ListenApp(root)
root.mainloop()
box = listen.file()
return box
#if the file itself is run from the main console
if __name__ == "__main__":
Listen()
| [
"pygame.mixer.init",
"tkinter.messagebox.askokcancel",
"ast.literal_eval",
"pygame.mixer.music.load",
"pygame.mixer.music.play",
"random.randint"
] | [((529, 541), 'pygame.mixer.init', 'mixer.init', ([], {}), '()\n', (539, 541), False, 'from pygame import mixer\n'), ((976, 1162), 'tkinter.messagebox.askokcancel', 'messagebox.askokcancel', (['"""Quitting the program"""', '"""Are you sure you want to close this test? Data entered will NEITHER be saved NOR be considered in the final assessment results!"""'], {}), "('Quitting the program',\n 'Are you sure you want to close this test? Data entered will NEITHER be saved NOR be considered in the final assessment results!'\n )\n", (998, 1162), False, 'from tkinter import messagebox\n'), ((2310, 2338), 'pygame.mixer.music.load', 'mixer.music.load', (['self.audio'], {}), '(self.audio)\n', (2326, 2338), False, 'from pygame import mixer\n'), ((2348, 2366), 'pygame.mixer.music.play', 'mixer.music.play', ([], {}), '()\n', (2364, 2366), False, 'from pygame import mixer\n'), ((2070, 2084), 'random.randint', 'randint', (['(1)', '(22)'], {}), '(1, 22)\n', (2077, 2084), False, 'from random import randint\n'), ((3869, 3887), 'ast.literal_eval', 'literal_eval', (['line'], {}), '(line)\n', (3881, 3887), False, 'from ast import literal_eval\n')] |
import os
from unittest.mock import MagicMock
from unittest.mock import patch
from cauldron import environ
from cauldron.test import support
from pytest import mark
CONFIG_VALUES = {
'recent_paths': [
environ.paths.resources('examples', 'hello_cauldron'),
environ.paths.resources('examples', 'hello_text'),
environ.paths.resources('examples', 'does_not_exist'),
],
'folder_aliases': {}
}
@patch('cauldron.environ.configs')
def test_list_all(configs: MagicMock):
"""Should list all known projects."""
configs.fetch.side_effect = CONFIG_VALUES.get
response = support.run_command('list all')
assert response.success, 'Expect command to succeed.'
assert support.has_success_code(response, 'FOUND')
examples_directory = environ.paths.resources('examples')
data = response.data
assert {examples_directory} == set(data['spec_groups'].keys()), """
Expect only a single spec group because we've only given
the discovery function a single project to work from in the
examples directory.
"""
children = os.listdir(examples_directory)
assert len(children) == len(data['spec_groups'][examples_directory]), """
Expect each folder in the examples directory to be a project
that should be represented within the spec group.
"""
assert len(children) == len(data['specs']), """
Expect each folder in the examples directory to be a project
that should be represented by a spec.
"""
@patch('cauldron.environ.configs')
def test_list_recent(configs: MagicMock):
"""Should list recent existing projects."""
configs.fetch.side_effect = CONFIG_VALUES.get
response = support.run_command('list recent')
assert response.success, 'Expect command to succeed.'
assert support.has_success_code(response, 'PROJECT_HISTORY')
examples_directory = environ.paths.resources('examples')
data = response.data
names = {'hello_cauldron', 'hello_text'}
assert names == {p['name'] for p in data['projects']}, """
Expect one entry for each of the projects listed in the configs
that exist. The third non-existent project should be ignored from
the returned results.
"""
@patch('cauldron.environ.configs')
def test_list_recent_none_available(configs: MagicMock):
"""Should list no recent projects when none are available."""
configs.fetch.side_effect = {}.get
response = support.run_command('list recent')
assert response.success, 'Expect command to succeed.'
assert support.has_success_code(response, 'PROJECT_HISTORY')
assert response.messages[0].message == 'No recent projects found.'
ERASE_SCENARIOS = [
{'args': '', 'code': 'NO_IDENTIFIER_SET', 'success': False},
{'args': 'a', 'code': 'NO_MATCH_FOUND', 'success': False},
{'args': 'hello_text', 'code': 'USER_ABORTED', 'success': True},
{'args': 'hello_text', 'code': 'REMOVED', 'success': True, 'input': 'yes'},
{'args': 'hello_text --yes', 'code': 'REMOVED', 'success': True},
]
@mark.parametrize('scenario', ERASE_SCENARIOS)
@patch('cauldron.cli.commands.listing._remover.input')
@patch('cauldron.environ.configs')
def test_list_erase(
configs: MagicMock,
remover_input: MagicMock,
scenario: dict
):
"""Should execute remove action according to specified scenario."""
configs.fetch.side_effect = CONFIG_VALUES.get
remover_input.return_value = scenario.get('input', '')
response = support.run_command('list erase {}'.format(scenario['args']))
if scenario['success']:
assert response.success, 'Expect command to succeed.'
assert support.has_success_code(response, scenario['code'])
else:
assert response.failed, 'Expect command to fail.'
assert support.has_error_code(response, scenario['code'])
assert configs.put.called == (scenario['code'] == 'REMOVED')
AUTO_COMPLETE_SCENARIOS = [
{'args': '', 'expected': {'all', 'erase', 'recent'}},
{'args': 'r', 'expected': {'recent'}},
{'args': 'recent ', 'expected': set()},
]
@mark.parametrize('scenario', AUTO_COMPLETE_SCENARIOS)
def test_autocomplete(scenario):
"""Should return expected autocompletes based on scenario."""
result = support.autocomplete('list {}'.format(scenario['args']))
assert scenario['expected'] == set(result)
| [
"os.listdir",
"cauldron.environ.paths.resources",
"cauldron.test.support.run_command",
"cauldron.test.support.has_success_code",
"pytest.mark.parametrize",
"cauldron.test.support.has_error_code",
"unittest.mock.patch"
] | [((429, 462), 'unittest.mock.patch', 'patch', (['"""cauldron.environ.configs"""'], {}), "('cauldron.environ.configs')\n", (434, 462), False, 'from unittest.mock import patch\n'), ((1533, 1566), 'unittest.mock.patch', 'patch', (['"""cauldron.environ.configs"""'], {}), "('cauldron.environ.configs')\n", (1538, 1566), False, 'from unittest.mock import patch\n'), ((2267, 2300), 'unittest.mock.patch', 'patch', (['"""cauldron.environ.configs"""'], {}), "('cauldron.environ.configs')\n", (2272, 2300), False, 'from unittest.mock import patch\n'), ((3082, 3127), 'pytest.mark.parametrize', 'mark.parametrize', (['"""scenario"""', 'ERASE_SCENARIOS'], {}), "('scenario', ERASE_SCENARIOS)\n", (3098, 3127), False, 'from pytest import mark\n'), ((3129, 3182), 'unittest.mock.patch', 'patch', (['"""cauldron.cli.commands.listing._remover.input"""'], {}), "('cauldron.cli.commands.listing._remover.input')\n", (3134, 3182), False, 'from unittest.mock import patch\n'), ((3184, 3217), 'unittest.mock.patch', 'patch', (['"""cauldron.environ.configs"""'], {}), "('cauldron.environ.configs')\n", (3189, 3217), False, 'from unittest.mock import patch\n'), ((4125, 4178), 'pytest.mark.parametrize', 'mark.parametrize', (['"""scenario"""', 'AUTO_COMPLETE_SCENARIOS'], {}), "('scenario', AUTO_COMPLETE_SCENARIOS)\n", (4141, 4178), False, 'from pytest import mark\n'), ((609, 640), 'cauldron.test.support.run_command', 'support.run_command', (['"""list all"""'], {}), "('list all')\n", (628, 640), False, 'from cauldron.test import support\n'), ((711, 754), 'cauldron.test.support.has_success_code', 'support.has_success_code', (['response', '"""FOUND"""'], {}), "(response, 'FOUND')\n", (735, 754), False, 'from cauldron.test import support\n'), ((781, 816), 'cauldron.environ.paths.resources', 'environ.paths.resources', (['"""examples"""'], {}), "('examples')\n", (804, 816), False, 'from cauldron import environ\n'), ((1103, 1133), 'os.listdir', 'os.listdir', (['examples_directory'], {}), '(examples_directory)\n', (1113, 1133), False, 'import os\n'), ((1722, 1756), 'cauldron.test.support.run_command', 'support.run_command', (['"""list recent"""'], {}), "('list recent')\n", (1741, 1756), False, 'from cauldron.test import support\n'), ((1827, 1880), 'cauldron.test.support.has_success_code', 'support.has_success_code', (['response', '"""PROJECT_HISTORY"""'], {}), "(response, 'PROJECT_HISTORY')\n", (1851, 1880), False, 'from cauldron.test import support\n'), ((1907, 1942), 'cauldron.environ.paths.resources', 'environ.paths.resources', (['"""examples"""'], {}), "('examples')\n", (1930, 1942), False, 'from cauldron import environ\n'), ((2478, 2512), 'cauldron.test.support.run_command', 'support.run_command', (['"""list recent"""'], {}), "('list recent')\n", (2497, 2512), False, 'from cauldron.test import support\n'), ((2583, 2636), 'cauldron.test.support.has_success_code', 'support.has_success_code', (['response', '"""PROJECT_HISTORY"""'], {}), "(response, 'PROJECT_HISTORY')\n", (2607, 2636), False, 'from cauldron.test import support\n'), ((215, 268), 'cauldron.environ.paths.resources', 'environ.paths.resources', (['"""examples"""', '"""hello_cauldron"""'], {}), "('examples', 'hello_cauldron')\n", (238, 268), False, 'from cauldron import environ\n'), ((278, 327), 'cauldron.environ.paths.resources', 'environ.paths.resources', (['"""examples"""', '"""hello_text"""'], {}), "('examples', 'hello_text')\n", (301, 327), False, 'from cauldron import environ\n'), ((337, 390), 'cauldron.environ.paths.resources', 'environ.paths.resources', (['"""examples"""', '"""does_not_exist"""'], {}), "('examples', 'does_not_exist')\n", (360, 390), False, 'from cauldron import environ\n'), ((3692, 3744), 'cauldron.test.support.has_success_code', 'support.has_success_code', (['response', "scenario['code']"], {}), "(response, scenario['code'])\n", (3716, 3744), False, 'from cauldron.test import support\n'), ((3828, 3878), 'cauldron.test.support.has_error_code', 'support.has_error_code', (['response', "scenario['code']"], {}), "(response, scenario['code'])\n", (3850, 3878), False, 'from cauldron.test import support\n')] |
# PyAutomate
# Copyright (c) <NAME>
# Licensed under CC Attribution
#Import time for time.wait
import time
#Import sys for sys.argv
import sys
while True:
#Figure out if any Args have been given
if not len(sys.argv) >= 3:
#If not, print Usage
print("PyAutomate 0.1")
print("Usage: python pyautomate.py [time] [false/true] [path]")
print("")
print("Time betwenn executions - [time] - In Seconds")
print("Repeat or not? - [false/true] - HAS to be true or false")
print("File to run - [path] - Full Directory")
raise SystemExit
#Run the parser if more than 3 are given
if len(sys.argv) >= 3:
#Get the Time to wait and convert to float
timetowait = float(sys.argv[1])
#If repeat is false, Run after X amount of time
if sys.argv[2] == "false":
time.sleep(timetowait)
exec(open(str(sys.argv[3])).read())
raise SystemExit
#If repeat is true, Run every X amount of time
if sys.argv[2] == "true":
while True:
time.sleep(timetowait)
exec(open(str(sys.argv[3])).read())
| [
"time.sleep"
] | [((879, 901), 'time.sleep', 'time.sleep', (['timetowait'], {}), '(timetowait)\n', (889, 901), False, 'import time\n'), ((1109, 1131), 'time.sleep', 'time.sleep', (['timetowait'], {}), '(timetowait)\n', (1119, 1131), False, 'import time\n')] |
# Test Case service functions
import pytest
import datetime
from core import CaseService
from core.CaseService import Authorizer
from data import Case
from data.CaseRepository import CaseRepository
@pytest.fixture(scope="class")
def case_service():
case_repo = CaseRepository()
autz = Authorizer()
return CaseService.CaseService(case_repo, autz)
class TestCaseService(object):
def test_create(self, case_service: CaseService.CaseService):
case = Case(1, "testCase", "testDescription")
new_id = case_service.save(case)
print(f"{case}")
assert new_id > 0
assert new_id == case.case_id
def test_edit(self, case_service: CaseService.CaseService):
case = Case(1, "testCase", "testDescription")
new_id = case_service.save(case)
assert case.name == "testCase"
case.name = "new_name"
case_service.save(case)
case_to_test = case_service.find_by_id(new_id)
assert case_to_test == case
assert case_to_test.name == "new_name"
assert case_to_test.updatedOn is not None
assert datetime.datetime.now() - case_to_test.updatedOn < datetime.timedelta(minutes=1)
def test_delete(self, case_service: CaseService.CaseService):
case = Case(1, "testCase", "testDescription")
new_id = case_service.save(case)
assert case.deleted is False
assert case.deletedOn is None
case_service.archive(case)
case_to_test = case_service.find_by_id(new_id)
assert case_to_test == case
assert case_to_test.deleted is True
assert case_to_test.deletedOn is not None
assert datetime.datetime.now() - case_to_test.deletedOn < datetime.timedelta(minutes=1)
| [
"data.CaseRepository.CaseRepository",
"core.CaseService.Authorizer",
"datetime.datetime.now",
"data.Case",
"pytest.fixture",
"datetime.timedelta",
"core.CaseService.CaseService"
] | [((202, 231), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (216, 231), False, 'import pytest\n'), ((268, 284), 'data.CaseRepository.CaseRepository', 'CaseRepository', ([], {}), '()\n', (282, 284), False, 'from data.CaseRepository import CaseRepository\n'), ((296, 308), 'core.CaseService.Authorizer', 'Authorizer', ([], {}), '()\n', (306, 308), False, 'from core.CaseService import Authorizer\n'), ((320, 360), 'core.CaseService.CaseService', 'CaseService.CaseService', (['case_repo', 'autz'], {}), '(case_repo, autz)\n', (343, 360), False, 'from core import CaseService\n'), ((475, 513), 'data.Case', 'Case', (['(1)', '"""testCase"""', '"""testDescription"""'], {}), "(1, 'testCase', 'testDescription')\n", (479, 513), False, 'from data import Case\n'), ((724, 762), 'data.Case', 'Case', (['(1)', '"""testCase"""', '"""testDescription"""'], {}), "(1, 'testCase', 'testDescription')\n", (728, 762), False, 'from data import Case\n'), ((1274, 1312), 'data.Case', 'Case', (['(1)', '"""testCase"""', '"""testDescription"""'], {}), "(1, 'testCase', 'testDescription')\n", (1278, 1312), False, 'from data import Case\n'), ((1162, 1191), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (1180, 1191), False, 'import datetime\n'), ((1717, 1746), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (1735, 1746), False, 'import datetime\n'), ((1111, 1134), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1132, 1134), False, 'import datetime\n'), ((1666, 1689), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1687, 1689), False, 'import datetime\n')] |
import numpy as np
import cv2
import torch
from torch.utils.data import Dataset
from utils.jpeg import JPEG
import random
from preprocess_jpeg import load_file
from utils.djmd import *
class DCTDataset(Dataset):
def __init__(self, filename="dataset", q=50) -> None:
super().__init__()
self.data = load_file("data", filename)
del self.data["y"]
self.q = q
self.jpeg = JPEG(q, False)
self.x = []
self.y = []
self.quan()
def quan(self):
for i in self.data['x']:
self.x.append([self.jpeg.quanti(item, idx>3) for idx, item in enumerate(i)])
self.y.append(i)
SHIFT_X, SCALE_X = get_shift_scale_maxmin(self.x)
with open(f"./weights/normalize_q{self.q}.data", "w") as file:
file.write(f"{SHIFT_X},{SCALE_X}")
self.x = shift_and_normalize(np.array(self.x), SHIFT_X, SCALE_X)
self.y = shift_and_normalize(np.array(self.y), SHIFT_Y, SCALE_Y)
def __getitem__(self, index):
# x = y = self.data["x"][index]
# q_arr = [self.jpeg.quanti(item, idx>3) for idx, item in enumerate(x)]
# x = np.array(q_arr)
return self.x[index], self.y[index]
def __len__(self):
return len(self.x)
| [
"numpy.array",
"utils.jpeg.JPEG",
"preprocess_jpeg.load_file"
] | [((318, 345), 'preprocess_jpeg.load_file', 'load_file', (['"""data"""', 'filename'], {}), "('data', filename)\n", (327, 345), False, 'from preprocess_jpeg import load_file\n'), ((412, 426), 'utils.jpeg.JPEG', 'JPEG', (['q', '(False)'], {}), '(q, False)\n', (416, 426), False, 'from utils.jpeg import JPEG\n'), ((880, 896), 'numpy.array', 'np.array', (['self.x'], {}), '(self.x)\n', (888, 896), True, 'import numpy as np\n'), ((953, 969), 'numpy.array', 'np.array', (['self.y'], {}), '(self.y)\n', (961, 969), True, 'import numpy as np\n')] |
import os
import sys
import shutil
import tempfile
from launcher import schema
from launcher.vendor import yaml
self = sys.modules[__name__]
def setup():
self.root = tempfile.mkdtemp()
self.config = {
"schema": "avalon-core:config-1.0",
"apps": [
{
"name": "maya2016"
},
{
"name": "python",
"args": [
"-u",
"-c",
"print('Something nice')"
]
}
],
"tasks": [
{
"label": "animation",
"name": "animation"
}
],
"template": {
"publish": "{projectpath}/publish",
"work": "{projectpath}/work"
}
}
self.inventory = {
"schema": "avalon-core:inventory-1.0",
"assets": {
"Batman": None,
"Tarantula": None
},
"film": {
"1000": {
"edit_in": 1000,
"edit_out": 1143
},
"1200": {
"edit_in": 1000,
"edit_out": 1081
},
"2000": None,
"2100": None,
"2400": None
}
}
self.application = {
"schema": "avalon-core:application-1.0",
"label": "Autodesk Maya 2016x64",
"description": "",
"application_dir": "maya",
"executable": "maya2016",
"default_dirs": [
"scenes",
"data",
],
"environment": {
"MAYA_DISABLE_CLIC_IPM": "Yes",
"PYTHONPATH": [
"{PYBLISH_MAYA}/pyblish_maya/pythonpath",
"{AVALON_CORE}/avalon/maya/pythonpath",
"{PYTHONPATH}"
]
},
"arguments": [
"-proj",
"{AVALON_WORKDIR}"
],
"copy": {
"{AVALON_CORE}/res/workspace.mel":
"{AVALON_WORKDIR}/workspace.mel"
}
}
os.environ["PATH"] += os.pathsep.join([
os.environ["PATH"],
self.root
])
with open(os.path.join(self.root, "python.yml"), "w") as f:
yaml.dump({
"executable": "python",
"application_dir": "python",
"label": "Python 2.7"
}, f)
def teardown():
shutil.rmtree(self.root)
def test_config():
schema.validate(self.config, "config")
def test_inventory():
schema.validate(self.inventory, "inventory")
def test_application():
schema.validate(self.application, "application")
| [
"launcher.vendor.yaml.dump",
"os.pathsep.join",
"os.path.join",
"tempfile.mkdtemp",
"shutil.rmtree",
"launcher.schema.validate"
] | [((174, 192), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (190, 192), False, 'import tempfile\n'), ((2094, 2142), 'os.pathsep.join', 'os.pathsep.join', (["[os.environ['PATH'], self.root]"], {}), "([os.environ['PATH'], self.root])\n", (2109, 2142), False, 'import os\n'), ((2397, 2421), 'shutil.rmtree', 'shutil.rmtree', (['self.root'], {}), '(self.root)\n', (2410, 2421), False, 'import shutil\n'), ((2447, 2485), 'launcher.schema.validate', 'schema.validate', (['self.config', '"""config"""'], {}), "(self.config, 'config')\n", (2462, 2485), False, 'from launcher import schema\n'), ((2514, 2558), 'launcher.schema.validate', 'schema.validate', (['self.inventory', '"""inventory"""'], {}), "(self.inventory, 'inventory')\n", (2529, 2558), False, 'from launcher import schema\n'), ((2589, 2637), 'launcher.schema.validate', 'schema.validate', (['self.application', '"""application"""'], {}), "(self.application, 'application')\n", (2604, 2637), False, 'from launcher import schema\n'), ((2238, 2332), 'launcher.vendor.yaml.dump', 'yaml.dump', (["{'executable': 'python', 'application_dir': 'python', 'label': 'Python 2.7'}", 'f'], {}), "({'executable': 'python', 'application_dir': 'python', 'label':\n 'Python 2.7'}, f)\n", (2247, 2332), False, 'from launcher.vendor import yaml\n'), ((2180, 2217), 'os.path.join', 'os.path.join', (['self.root', '"""python.yml"""'], {}), "(self.root, 'python.yml')\n", (2192, 2217), False, 'import os\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, core, out_nodes):
super(Model, self).__init__()
self.core = core
self.fc1 = nn.Linear(4, 24)
self.fc2 = nn.Linear(24, 24)
self.fc3 = nn.Linear(24, out_nodes)
def forward(self, x):
x = self.core(x)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.softmax(x, dim=1)
return x
| [
"torch.nn.functional.softmax",
"torch.nn.functional.relu",
"torch.nn.Linear"
] | [((217, 233), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(24)'], {}), '(4, 24)\n', (226, 233), True, 'import torch.nn as nn\n'), ((253, 270), 'torch.nn.Linear', 'nn.Linear', (['(24)', '(24)'], {}), '(24, 24)\n', (262, 270), True, 'import torch.nn as nn\n'), ((290, 314), 'torch.nn.Linear', 'nn.Linear', (['(24)', 'out_nodes'], {}), '(24, out_nodes)\n', (299, 314), True, 'import torch.nn as nn\n'), ((403, 412), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (409, 412), True, 'import torch.nn.functional as F\n'), ((449, 458), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (455, 458), True, 'import torch.nn.functional as F\n'), ((495, 514), 'torch.nn.functional.softmax', 'F.softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (504, 514), True, 'import torch.nn.functional as F\n')] |
import argparse
import os
############################################ Arguments and declarations ##############################################
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i",
help="input dir", type=str, default='.',metavar='current dir (.)')
parser.add_argument("-f",
help="input filename", type=str, default='input.faa',metavar='input.faa')
################################################## Definition ########################################################
args = parser.parse_args()
################################################### Function #######################################################
def hmmformat(hmmresult):
# format hmm results
f1 = open(hmmresult + '2.txt', 'a')
for line in open(hmmresult, 'r'):
if str(line)[0] == '#':
pass
else:
line = str(line).replace(' # ', '#')
while line != str(line).replace(' ', ' '):
line = str(line).replace(' ', ' ')
line = str(line).replace(' ', '\t')
line = str(line).replace('#', ' # ')
filedir, filename = os.path.split(hmmresult)
filename = filename.split('.hmm')[0]
f1.write(filename + '_' + line)
f1.close()
################################################### Programme #######################################################
hmmformat(os.path.join(args.i,args.f))
| [
"os.path.join",
"argparse.ArgumentParser",
"os.path.split"
] | [((159, 236), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(formatter_class=argparse.RawDescriptionHelpFormatter)\n', (182, 236), False, 'import argparse\n'), ((1509, 1537), 'os.path.join', 'os.path.join', (['args.i', 'args.f'], {}), '(args.i, args.f)\n', (1521, 1537), False, 'import os\n'), ((1239, 1263), 'os.path.split', 'os.path.split', (['hmmresult'], {}), '(hmmresult)\n', (1252, 1263), False, 'import os\n')] |
import logging
import requests
import atexit
logger = logging.getLogger(f"scraper.{__name__}")
logging.basicConfig(level=logging.INFO)
logging.getLogger('suds').setLevel(logging.INFO)
import config
import utils
from properties import properties_info, num_properties_with_data, num_properties, Property
# add property id: example: https://phoenix.onmap.co.il/v1/properties/BJOWzFv5K
class Scraper:
onmap_endpoint = """https://phoenix.onmap.co.il/v1/properties/mixed_search"""
property_info_endpoint = 'https://phoenix.onmap.co.il/v1/properties/'
params = {'option': '',
# 'section': 'residence',
# '$sort': '-is_top_promoted -search_date',
'$limit': '300',
'$skip': '0'}
num_properties_with_data = num_properties_with_data
properties_info = properties_info
session = requests.Session()
@classmethod
def scrape_properties_ids(cls, buy_or_rent_option: str) -> list:
"""
Scrapes the properties ids from the website and saves them in the properties info json
:param buy_or_rent_option: can take either the value 'rent' or 'buy' or 'both' to combine both options
:return:
"""
if buy_or_rent_option == 'both':
all_properties_ids = []
for option in ['buy', 'rent']:
all_properties_ids.extend(cls.scrape_properties_ids(buy_or_rent_option=option))
return all_properties_ids
scraped_properties_ids_list = []
newly_scraped_properties = True
cls.params['$skip'] = '0'
cls.params['option'] = buy_or_rent_option
with cls.session as session:
while newly_scraped_properties:
response = session.get(url=cls.onmap_endpoint, params=cls.params)
data = response.json()['data']
newly_scraped_properties = [property.get('id') for property in data]
scraped_properties_ids_list.extend(newly_scraped_properties)
# To scrape the properties that follow
cls.params['$skip'] = str(int(cls.params['$skip']) + 300)
cls.register_properties_ids(scraped_properties_ids_list)
logger.info(f'Finished scraping properties ids with option {buy_or_rent_option}')
return scraped_properties_ids_list
@staticmethod
def get_properties_ids_without_data(properties_info: dict) -> list:
"""
Retrieves the properties ids without the data info already in the DB
:param properties_info:
:return:
"""
return [property for property in properties_info if not properties_info[property].get('data')]
@staticmethod
def get_properties_ids_with_data(properties_info: dict) -> list:
"""
Retrieves the properties ids without the data info already in the DB
:param properties_info:
:return:
"""
return [property for property in properties_info if properties_info[property].get('data')]
@staticmethod
def register_properties_ids(properties_ids_list: list) -> None:
"""
:param properties_ids_list:
:return:
"""
# We would like to save the properties ids in a file for later retrieval
existing_properties_ids = utils.load_properties_ids()
# Only keep the ones which are not already in the json
properties_ids_to_save = list(set(properties_ids_list) - set(existing_properties_ids))
for property_id in properties_ids_to_save:
properties_info[property_id] = dict()
@classmethod
def get_list_properties_ids(cls):
"""
:return:
"""
scraped_properties_ids_list = []
existing_properties_ids = utils.load_properties_ids()
if not existing_properties_ids:
scraped_properties_ids_list = cls.scrape_properties_ids(buy_or_rent_option='both')
all_properties_ids = set(existing_properties_ids).union(set(scraped_properties_ids_list))
return list(all_properties_ids)
@classmethod
def scrape_and_register_properties_info(cls, list_properties_ids: list,
buy_or_rent_option: str) -> None:
"""
:param buy_or_rent_option:
:param list_properties_ids:
:return:
"""
with cls.session as session:
for property_id in list_properties_ids:
try:
property_info_url = cls.property_info_endpoint + '/' + property_id
response = session.get(url=property_info_url)
data = response.json()
if data:
scraping_date = config.TODAY_DATE
properties_info[property_id] = {'data': data,
'scraping_date': scraping_date,}
# 'buy_or_rent_option': buy_or_rent_option}
cls.num_properties_with_data += 1
except requests.exceptions.RequestException as re:
logger.info(re)
# Save properties info regularly
if cls.num_properties_with_data % 50 == 0:
logger.info(f'Saving scraped properties info...')
logger.info(f"{cls.num_properties_with_data}")
logger.info(f'Progress: {round(100*cls.num_properties_with_data/num_properties)}%')
Property.save_properties_info(properties_info)
def scrape_properties_from_onmap(buy_or_rent_option: str = 'both') -> None:
"""
:param:
:return:
"""
# TODO: scrape the properties only if data older than 1 week OR force_scraping flag is True
scraped_properties_ids = Scraper.scrape_properties_ids(buy_or_rent_option=buy_or_rent_option)
without_data_properties_ids = Scraper.get_properties_ids_without_data(properties_info=properties_info)
properties_ids_with_data = Scraper.get_properties_ids_with_data(properties_info=properties_info)
properties_ids_to_scrape = list(set(scraped_properties_ids).union(set(without_data_properties_ids)) - set(properties_ids_with_data))
logger.info(f"number properties ids to scrape {len(properties_ids_to_scrape)}")
Scraper.scrape_and_register_properties_info(list_properties_ids=properties_ids_to_scrape,
buy_or_rent_option=buy_or_rent_option)
def exit_handler():
Property().save_properties_info(properties_info=properties_info)
def main():
scrape_properties_from_onmap(buy_or_rent_option='both')
atexit.register(exit_handler)
if __name__ == '__main__':
main() | [
"logging.getLogger",
"logging.basicConfig",
"properties.Property.save_properties_info",
"requests.Session",
"properties.Property",
"utils.load_properties_ids",
"atexit.register"
] | [((55, 95), 'logging.getLogger', 'logging.getLogger', (['f"""scraper.{__name__}"""'], {}), "(f'scraper.{__name__}')\n", (72, 95), False, 'import logging\n'), ((96, 135), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (115, 135), False, 'import logging\n'), ((853, 871), 'requests.Session', 'requests.Session', ([], {}), '()\n', (869, 871), False, 'import requests\n'), ((6669, 6698), 'atexit.register', 'atexit.register', (['exit_handler'], {}), '(exit_handler)\n', (6684, 6698), False, 'import atexit\n'), ((136, 161), 'logging.getLogger', 'logging.getLogger', (['"""suds"""'], {}), "('suds')\n", (153, 161), False, 'import logging\n'), ((3292, 3319), 'utils.load_properties_ids', 'utils.load_properties_ids', ([], {}), '()\n', (3317, 3319), False, 'import utils\n'), ((3755, 3782), 'utils.load_properties_ids', 'utils.load_properties_ids', ([], {}), '()\n', (3780, 3782), False, 'import utils\n'), ((6526, 6536), 'properties.Property', 'Property', ([], {}), '()\n', (6534, 6536), False, 'from properties import properties_info, num_properties_with_data, num_properties, Property\n'), ((5530, 5576), 'properties.Property.save_properties_info', 'Property.save_properties_info', (['properties_info'], {}), '(properties_info)\n', (5559, 5576), False, 'from properties import properties_info, num_properties_with_data, num_properties, Property\n')] |
#!/usr/bin/python
# Oct 2019 JMA
# splits_aggregator.py write scale events to mongoDB
'''
Run a simulation of interactive incremental classification.
Usage:
$ ./splits_aggregator.py [-v] [-d root_dir] [-g pattern]
-v verbose output (more than normal)
-d root directory to read from
-q quiet output (less than normal)
-r rules per sample (how deep into the sample sorted words)
-p Rule pairs
'''
import os
import copy
import glob
import math
import pprint
import subprocess
import sys
import re
import string
import time
from pathlib import Path
from collections import namedtuple
import numpy as np
import pandas as pd
# import bokeh as bk
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
import metric_graphics as mg
import pq
__author__ = '<NAME> <EMAIL>'
### config constants
VERBOSE = False
ROOT_DIR = Path('C:/Users/joagosta/OneDrive - Microsoft/data/20news')
QUIET = True
RULES_PER_SAMPLE = 1
RULE_PAIRS =400
Rule = namedtuple('Rule', ['pattern', 'label', 'hits'])
ss = dict(group1 = (4,3), group2=(4,4), pattern1=(5,3), pattern2=(5,4), sample1=(7,3), sample2=(7,4))
########################################################################
class CollectSplits(object):
'Collect csv files from participants and aggregate.'
def __init__(self, cvs_dir, glob_pattern = '*.csv'):
self.user_rules = []
#self.as_df = None
for k, data_file in enumerate(cvs_dir.glob( glob_pattern)):
print(k,':', end = ' ')
try:
self.add_file(data_file)
except Exception as err:
print(f"{err}\nSkipping file: {data_file} Cannot load it.", file=sys.stderr)
def add_file(self, the_fname):
'convert ss to rule'
self.as_df = pd.read_csv(the_fname, header=None )
#patterns = (self.as_df.iloc[ss['pattern1']], self.as_df.iloc[ss['pattern2']]) # comp.iloc[ss["group1"]]
# Create rules out of the pattens.
try:
rule1 = Rule(self.get_cell('pattern1'), self.get_cell('group1'), 0)
self.user_rules.append(rule1)
rule2 = Rule(self.get_cell('pattern2'), self.get_cell('group2'), 0)
self.user_rules.append(rule2)
if VERBOSE:
print("Rule patterns: ", rule1, rule2)
except ValueError:
print(the_fname, " corrupt contents", file=sys.stderr)
def get_cell(self, cell_name):
cell = self.as_df.iloc[ss[cell_name]]
if (type(cell) is str) and (len(cell) > 2):
cell = cell.strip('"\'')
return cell
else:
raise ValueError
########################################################################
########################################################################
class BinaryComparisons(object):
'Randomly pair training cases to find words specific to each class.'
def __init__(self, input_dir):
self.patterns_dir = input_dir / 'patterns'
self.rules_dir = input_dir / 'rules'
# check that the shared and rules directories have been created.
if not self.patterns_dir.exists():
self.patterns_dir.mkdir()
if not self.rules_dir.exists():
self.rules_dir.mkdir()
self.full_df = pq.reload_parquet(input_dir / 'train_clean') # 'merge all training data'
self.full_df.reset_index()
def random_pairs(self, no_pairs):
'Sample without replacement for pairwise item comparisons'
pair_df = np.empty(shape=(0,6))
for n0 in range(no_pairs):
reps =0
while reps < 100:
pair = self.full_df.sample(2, replace=False)
labels = pair['label'].values
if labels[0] !=labels[1]:
break
reps +=1
row = np.hstack([pair.iloc[0,].values, pair.iloc[1,].values])
pair_df = np.vstack([pair_df, row])
pair_df = pd.DataFrame(pair_df)
pair_df.columns = ['label1', 'item1', 'msg1', 'label2', 'item2', 'msg2']
return pair_df
def simulate_splits(self, pair_df):
'Find pairs of words that distinguish the pair. '
selection_rules = []
for r in range(len(pair_df)):
row = pair_df.iloc[r,]
msg1 = row[2]
lbl1 = row[0]
msg2 = row[5]
lbl2 = row[3]
# Cheap heuristic - use the longest word as a candidate classifiers
w1 = sorted(msg1.split(), key= lambda w: len(w), reverse=True)
w2 = sorted(msg2.split(), key= lambda w: len(w), reverse=True)
# Create rules out of the first RULES_PER_SAMPLE words
for k in range(RULES_PER_SAMPLE):
# Check if the word appears in the opposite sample and fail if it does.
if len(w1) > 0 and not (w1[k] in msg2.split()):
selection_rules.append(Rule(w1[k], lbl1, 0))
else:
print('Failed selection rule ', lbl2, ':', w1[k], file=sys.stderr)
if len(w2) and not (w2[k] in msg1.split()):
selection_rules.append(Rule(w2[k], lbl2, 0))
else:
print('Failed selection rule ', lbl1, ':', w2[k], file=sys.stderr)
# if not QUIET:
print(len(selection_rules), " selection rules.")
return selection_rules
def embed_in_excel(self, pairs, groups_template = Path('../../template/pairwise_comparisions.csv')):
'Export ss files with examples of pair-wise comparisons that users can fill in and submit. '
if groups_template.exists():
the_template= pd.read_csv(groups_template, header=None )
else:
print(groups_template, " not found", file=sys.stderr)
return None
for k in range(len(pairs)):
comp = copy.copy(the_template)
a_pair = pairs.iloc[k,]
comp.iloc[ss["group1"]] = a_pair['label1'] # ['label1', 'item1', 'msg1', 'label2', 'item2', 'msg2']
comp.iloc[ss["group2"]] = a_pair['label2']
comp.iloc[ss["sample1"]] = a_pair['msg1']
comp.iloc[ss["sample2"]] = a_pair['msg2']
if VERBOSE:
for vals in ss.values():
print(comp.iloc[vals])
case_fn = self.patterns_dir / (str(a_pair["item1"]) + '-' + str(a_pair["item2"]) + '.csv')
comp.to_csv(case_fn, header=False, index=False )
########################################################################
class SplitClassifier (object):
'Assemble the splits are run them with highest precision lowest coverage first.'
def __init__(self, rules):
self.rules = rules
def order_by_hits(self, full_df):
'Run each rule over all msgs, counting msgs that fire the rule.'
# TODO count the number of hits over all samples for each rule.
hits = [0] * len(self.rules)
match = 0
miss = 0
for j, a_rule in enumerate(self.rules):
for k, v in enumerate(full_df['msg']):
if a_rule.pattern in v:
# Add a count of how many times the rule matches
hits[j] += 1
if a_rule.label == full_df['label'].iloc[k]:
if VERBOSE: print(k,'-', a_rule.label, ':',a_rule.pattern)
match +=1
else:
#print(a_rule.label, ':',full_df['label'].iloc[k] , end = ' ')
miss +=1
if not QUIET: print(f'\n{j}:{a_rule.pattern} Match {match}, miss {miss}, hits {hits[j]}.')
print(f'\n============\nMatch {match}, miss {miss}, TOT {match+miss}.')
# TODO Sort by most specific rules first.
def compute_confusion(self, full_df):
'Return the confusion matrix and stats for this classifier.'
# Run the ruleset over the sample item until a rule fires
# Then record the class of the rule.
predicted_labels = len(full_df)*["None"]
for k, content in enumerate(full_df['msg']):
for j, a_rule in enumerate(self.rules):
if a_rule.pattern in content:
predicted_labels[k] = a_rule.label
break
true_y = list(full_df["label"])
class_names = list(set(true_y))
cm = confusion_matrix(true_y, predicted_labels, class_names)
# Accuracy
diagonal = sum([cm[x,x] for x in range(len(class_names))])
totals = sum(sum(cm))
print("Accuracy on matches =", diagonal, ' / ', totals, ' = ', diagonal/totals)
cm = pd.DataFrame(cm, index=class_names)
if VERBOSE:
print(cm)
#cm.to_csv(Path(ROOT_DIR) / f"cm{'%.3f' % (diagonal/totals)}.csv")
cm.to_csv(Path(ROOT_DIR) / f"cm.csv")
prfs = precision_recall_fscore_support(true_y, predicted_labels, labels=class_names)
prfs_df = pd.DataFrame.from_dict(dict(prec= prfs[0],recall=prfs[1],F=prfs[2], sup=prfs[3], nms=class_names) )
# Compute macro averages
colsums = np.sum(prfs_df.values, axis=0)
colavgs = list(colsums[0:4]/len(prfs_df))
colavgs.append("AVGS")
prfs_df = prfs_df.append(pd.DataFrame([colavgs], columns= ['prec', 'recall', 'F', 'sup', 'nms']))#
prfs_df.set_index('nms', inplace=True)
if VERBOSE:
print(prfs_df)
# mg.matrix_heatmap(prfs_df)
return [diagonal/totals, colavgs[0], colavgs[1]]# dict(accuracy=diagonal/totals, precision=colavgs[0], recall=colavgs[1])
###############################################################################
def main(input_dir):
# Test split pattern extraction
cs = BinaryComparisons(input_dir)
pair_df = cs.random_pairs(RULE_PAIRS)
the_rules = cs.simulate_splits(pair_df) # Creates simulated rules.
if VERBOSE: pprint.pprint(the_rules)
learner = SplitClassifier(the_rules)
# learner.order_by_hits(cs.full_df)
learner.compute_confusion(cs.full_df)
# Do the same for the test data
test_df = pq.reload_parquet(input_dir / 'test_clean')
learner.compute_confusion(test_df)
return 0
########################################################################
if __name__ == '__main__':
if '-v' in sys.argv:
k = sys.argv.index('-v')
VERBOSE = True
## Inputs
if '-d' in sys.argv:
d = sys.argv.index('-d')
ROOT_DIR = Path(sys.argv[d+1]) # Assuming the path is relative to the user's home path
# else:
# ROOT_DIR = Path(DATA_DIR)
if '-q' in sys.argv:
q = sys.argv.index('-q')
QUIET = True
if '-r' in sys.argv:
r = sys.argv.index('-r')
RULES_PER_SAMPLE = int(sys.argv[r+1])
if '-p' in sys.argv:
p = sys.argv.index('-p')
RULE_PAIRS = int(sys.argv[p+1])
np.set_printoptions(linewidth=100)
main(ROOT_DIR)
print(sys.argv, "\nDone in ", '%5.3f' % time.process_time(), " secs! At UTC: ", time.asctime(time.gmtime()), file=sys.stderr)
#EOF
| [
"pq.reload_parquet",
"collections.namedtuple",
"sklearn.metrics.confusion_matrix",
"pandas.read_csv",
"pathlib.Path",
"numpy.hstack",
"sklearn.metrics.precision_recall_fscore_support",
"time.process_time",
"numpy.sum",
"numpy.empty",
"numpy.vstack",
"pandas.DataFrame",
"copy.copy",
"sys.argv.index",
"time.gmtime",
"pprint.pprint",
"numpy.set_printoptions"
] | [((862, 920), 'pathlib.Path', 'Path', (['"""C:/Users/joagosta/OneDrive - Microsoft/data/20news"""'], {}), "('C:/Users/joagosta/OneDrive - Microsoft/data/20news')\n", (866, 920), False, 'from pathlib import Path\n'), ((979, 1027), 'collections.namedtuple', 'namedtuple', (['"""Rule"""', "['pattern', 'label', 'hits']"], {}), "('Rule', ['pattern', 'label', 'hits'])\n", (989, 1027), False, 'from collections import namedtuple\n'), ((10151, 10194), 'pq.reload_parquet', 'pq.reload_parquet', (["(input_dir / 'test_clean')"], {}), "(input_dir / 'test_clean')\n", (10168, 10194), False, 'import pq\n'), ((10940, 10974), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(100)'}), '(linewidth=100)\n', (10959, 10974), True, 'import numpy as np\n'), ((1789, 1824), 'pandas.read_csv', 'pd.read_csv', (['the_fname'], {'header': 'None'}), '(the_fname, header=None)\n', (1800, 1824), True, 'import pandas as pd\n'), ((3296, 3340), 'pq.reload_parquet', 'pq.reload_parquet', (["(input_dir / 'train_clean')"], {}), "(input_dir / 'train_clean')\n", (3313, 3340), False, 'import pq\n'), ((3543, 3565), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 6)'}), '(shape=(0, 6))\n', (3551, 3565), True, 'import numpy as np\n'), ((3987, 4008), 'pandas.DataFrame', 'pd.DataFrame', (['pair_df'], {}), '(pair_df)\n', (3999, 4008), True, 'import pandas as pd\n'), ((5489, 5537), 'pathlib.Path', 'Path', (['"""../../template/pairwise_comparisions.csv"""'], {}), "('../../template/pairwise_comparisions.csv')\n", (5493, 5537), False, 'from pathlib import Path\n'), ((8423, 8478), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['true_y', 'predicted_labels', 'class_names'], {}), '(true_y, predicted_labels, class_names)\n', (8439, 8478), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support\n'), ((8697, 8732), 'pandas.DataFrame', 'pd.DataFrame', (['cm'], {'index': 'class_names'}), '(cm, index=class_names)\n', (8709, 8732), True, 'import pandas as pd\n'), ((8914, 8991), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['true_y', 'predicted_labels'], {'labels': 'class_names'}), '(true_y, predicted_labels, labels=class_names)\n', (8945, 8991), False, 'from sklearn.metrics import confusion_matrix, precision_recall_fscore_support\n'), ((9161, 9191), 'numpy.sum', 'np.sum', (['prfs_df.values'], {'axis': '(0)'}), '(prfs_df.values, axis=0)\n', (9167, 9191), True, 'import numpy as np\n'), ((9953, 9977), 'pprint.pprint', 'pprint.pprint', (['the_rules'], {}), '(the_rules)\n', (9966, 9977), False, 'import pprint\n'), ((10386, 10406), 'sys.argv.index', 'sys.argv.index', (['"""-v"""'], {}), "('-v')\n", (10400, 10406), False, 'import sys\n'), ((10482, 10502), 'sys.argv.index', 'sys.argv.index', (['"""-d"""'], {}), "('-d')\n", (10496, 10502), False, 'import sys\n'), ((10522, 10543), 'pathlib.Path', 'Path', (['sys.argv[d + 1]'], {}), '(sys.argv[d + 1])\n', (10526, 10543), False, 'from pathlib import Path\n'), ((10689, 10709), 'sys.argv.index', 'sys.argv.index', (['"""-q"""'], {}), "('-q')\n", (10703, 10709), False, 'import sys\n'), ((10769, 10789), 'sys.argv.index', 'sys.argv.index', (['"""-r"""'], {}), "('-r')\n", (10783, 10789), False, 'import sys\n'), ((10874, 10894), 'sys.argv.index', 'sys.argv.index', (['"""-p"""'], {}), "('-p')\n", (10888, 10894), False, 'import sys\n'), ((3864, 3919), 'numpy.hstack', 'np.hstack', (['[pair.iloc[0,].values, pair.iloc[1,].values]'], {}), '([pair.iloc[0,].values, pair.iloc[1,].values])\n', (3873, 3919), True, 'import numpy as np\n'), ((3943, 3968), 'numpy.vstack', 'np.vstack', (['[pair_df, row]'], {}), '([pair_df, row])\n', (3952, 3968), True, 'import numpy as np\n'), ((5704, 5745), 'pandas.read_csv', 'pd.read_csv', (['groups_template'], {'header': 'None'}), '(groups_template, header=None)\n', (5715, 5745), True, 'import pandas as pd\n'), ((5906, 5929), 'copy.copy', 'copy.copy', (['the_template'], {}), '(the_template)\n', (5915, 5929), False, 'import copy\n'), ((9306, 9376), 'pandas.DataFrame', 'pd.DataFrame', (['[colavgs]'], {'columns': "['prec', 'recall', 'F', 'sup', 'nms']"}), "([colavgs], columns=['prec', 'recall', 'F', 'sup', 'nms'])\n", (9318, 9376), True, 'import pandas as pd\n'), ((11038, 11057), 'time.process_time', 'time.process_time', ([], {}), '()\n', (11055, 11057), False, 'import time\n'), ((11091, 11104), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (11102, 11104), False, 'import time\n'), ((8870, 8884), 'pathlib.Path', 'Path', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (8874, 8884), False, 'from pathlib import Path\n')] |
from django.contrib.auth.decorators import login_required
from django.http.response import JsonResponse
from spendtrackapp.forms import EntryForm
from spendtrackapp.models import Entry
from spendtrackapp.views.utils import *
@login_required
def add_handler(request):
"""
Handle add new entries request
:return on success: an empty JSON object
on failure: an JSON object whose properties' names are invalid fields
and whose values are list of errors in those fields
"""
form = EntryForm(get_post(request))
if not form.is_valid():
return JsonResponse(form.errors, status=400)
entry_id = form.save().id
return JsonResponse({'id': entry_id})
@login_required
def edit_handler(request):
"""Handle edit entry requests"""
entry = get_entry(request)
# return errors, if any
if isinstance(entry, dict):
return JsonResponse(entry, status=400)
form = EntryForm(get_post(request), instance=entry)
if not form.is_valid():
return JsonResponse(form.errors, status=400)
form.save()
return JsonResponse({})
@login_required
def delete_handler(request):
"""Handle delete plan requests"""
entry = get_entry(request)
# return errors, if any
if isinstance(entry, dict):
return JsonResponse(entry, status=400)
entry.delete()
return JsonResponse({})
def get_entry(request):
if 'id' not in request.POST:
errors = 'Missing entry id'
else:
try:
entry_id = int(request.POST['id'])
entry = Entry.objects.get(id=entry_id)
if entry.user_id != request.user.id:
raise ValueError
return entry
except (Entry.DoesNotExist, ValueError):
errors = 'Invalid entry id'
return {'id': [errors]}
| [
"spendtrackapp.models.Entry.objects.get",
"django.http.response.JsonResponse"
] | [((688, 718), 'django.http.response.JsonResponse', 'JsonResponse', (["{'id': entry_id}"], {}), "({'id': entry_id})\n", (700, 718), False, 'from django.http.response import JsonResponse\n'), ((1107, 1123), 'django.http.response.JsonResponse', 'JsonResponse', (['{}'], {}), '({})\n', (1119, 1123), False, 'from django.http.response import JsonResponse\n'), ((1380, 1396), 'django.http.response.JsonResponse', 'JsonResponse', (['{}'], {}), '({})\n', (1392, 1396), False, 'from django.http.response import JsonResponse\n'), ((608, 645), 'django.http.response.JsonResponse', 'JsonResponse', (['form.errors'], {'status': '(400)'}), '(form.errors, status=400)\n', (620, 645), False, 'from django.http.response import JsonResponse\n'), ((909, 940), 'django.http.response.JsonResponse', 'JsonResponse', (['entry'], {'status': '(400)'}), '(entry, status=400)\n', (921, 940), False, 'from django.http.response import JsonResponse\n'), ((1041, 1078), 'django.http.response.JsonResponse', 'JsonResponse', (['form.errors'], {'status': '(400)'}), '(form.errors, status=400)\n', (1053, 1078), False, 'from django.http.response import JsonResponse\n'), ((1317, 1348), 'django.http.response.JsonResponse', 'JsonResponse', (['entry'], {'status': '(400)'}), '(entry, status=400)\n', (1329, 1348), False, 'from django.http.response import JsonResponse\n'), ((1582, 1612), 'spendtrackapp.models.Entry.objects.get', 'Entry.objects.get', ([], {'id': 'entry_id'}), '(id=entry_id)\n', (1599, 1612), False, 'from spendtrackapp.models import Entry\n')] |
#!/usr/bin/env python3
# Tool that assists in upgrading the Envoy source tree to the latest API.
# Internally, Envoy uses the latest vN or vNalpha for a given package. Envoy
# will perform a reflection based version upgrade on any older protos that are
# presented to it in configuration at ingestion time.
#
# Usage (from a clean tree):
#
# api_boost.py --generate_compilation_database \
# --build_api_booster
import argparse
import functools
import json
import os
import multiprocessing as mp
import pathlib
import re
import subprocess as sp
# Temporary location of modified files.
TMP_SWP_SUFFIX = '.tmp.swp'
# Detect API #includes.
API_INCLUDE_REGEX = re.compile('#include "(envoy/.*)/[^/]+\.pb\.(validate\.)?h"')
# Update a C++ file to the latest API.
def ApiBoostFile(llvm_include_path, debug_log, path):
print('Processing %s' % path)
# Run the booster
try:
result = sp.run([
'./bazel-bin/external/envoy_dev/clang_tools/api_booster/api_booster',
'--extra-arg-before=-xc++',
'--extra-arg=-isystem%s' % llvm_include_path, '--extra-arg=-Wno-undefined-internal', path
],
capture_output=True,
check=True)
except sp.CalledProcessError as e:
print('api_booster failure for %s: %s %s' % (path, e, e.stderr.decode('utf-8')))
raise
if debug_log:
print(result.stderr.decode('utf-8'))
# Consume stdout containing the list of inferred API headers. We don't have
# rewrite capabilities yet in the API booster, so we rewrite here in Python
# below.
inferred_api_includes = sorted(set(result.stdout.decode('utf-8').splitlines()))
# We just dump the inferred API header includes at the start of the #includes
# in the file and remove all the present API header includes. This does not
# match Envoy style; we rely on later invocations of fix_format.sh to take
# care of this alignment.
output_lines = []
include_lines = ['#include "%s"' % f for f in inferred_api_includes]
input_text = pathlib.Path(path).read_text()
for line in input_text.splitlines():
if include_lines and line.startswith('#include'):
output_lines.extend(include_lines)
include_lines = None
# Exclude API includes, except for a special case related to v2alpha
# ext_authz; this is needed to include the service descriptor in the build
# and is a hack that will go away when we remove v2.
if re.match(API_INCLUDE_REGEX, line) and 'envoy/service/auth/v2alpha' not in line:
continue
output_lines.append(line)
# Write to temporary file. We can't overwrite in place as we're executing
# concurrently with other ApiBoostFile() invocations that might need the file
# we're writing to.
pathlib.Path(path + TMP_SWP_SUFFIX).write_text('\n'.join(output_lines) + '\n')
# Replace the original file with the temporary file created by ApiBoostFile()
# for a given path.
def SwapTmpFile(path):
pathlib.Path(path + TMP_SWP_SUFFIX).rename(path)
# Update the Envoy source tree the latest API.
def ApiBoostTree(args):
# Optional setup of state. We need the compilation database and api_booster
# tool in place before we can start boosting.
if args.generate_compilation_database:
sp.run(['./tools/gen_compilation_database.py', '--run_bazel_build', '--include_headers'],
check=True)
if args.build_api_booster:
# Similar to gen_compilation_database.py, we only need the cc_library for
# setup. The long term fix for this is in
# https://github.com/bazelbuild/bazel/issues/9578.
dep_build_targets = [
'//source/...',
'//test/...',
]
# Figure out some cc_libraries that cover most of our external deps. This is
# the same logic as in gen_compilation_database.py.
query = 'kind(cc_library, {})'.format(' union '.join(dep_build_targets))
dep_lib_build_targets = sp.check_output(['bazel', 'query', query]).decode().splitlines()
# We also need some misc. stuff such as test binaries for setup of benchmark
# dep.
query = 'attr("tags", "compilation_db_dep", {})'.format(' union '.join(dep_build_targets))
dep_lib_build_targets.extend(sp.check_output(['bazel', 'query', query]).decode().splitlines())
extra_api_booster_args = []
if args.debug_log:
extra_api_booster_args.append('--copt=-DENABLE_DEBUG_LOG')
# Slightly easier to debug when we build api_booster on its own.
sp.run([
'bazel',
'build',
'--strip=always',
'@envoy_dev//clang_tools/api_booster',
] + extra_api_booster_args,
check=True)
sp.run([
'bazel',
'build',
'--strip=always',
] + dep_lib_build_targets, check=True)
# Figure out where the LLVM include path is. We need to provide this
# explicitly as the api_booster is built inside the Bazel cache and doesn't
# know about this path.
# TODO(htuch): this is fragile and depends on Clang version, should figure out
# a cleaner approach.
llvm_include_path = os.path.join(
sp.check_output([os.getenv('LLVM_CONFIG'), '--libdir']).decode().rstrip(),
'clang/9.0.0/include')
# Determine the files in the target dirs eligible for API boosting, based on
# known files in the compilation database.
paths = set([])
for entry in json.loads(pathlib.Path('compile_commands.json').read_text()):
file_path = entry['file']
if any(file_path.startswith(prefix) for prefix in args.paths):
paths.add(file_path)
# The API boosting is file local, so this is trivially parallelizable, use
# multiprocessing pool with default worker pool sized to cpu_count(), since
# this is CPU bound.
with mp.Pool() as p:
# We need two phases, to ensure that any dependency on files being modified
# in one thread on consumed transitive headers on the other thread isn't an
# issue. This also ensures that we complete all analysis error free before
# any mutation takes place.
p.map(functools.partial(ApiBoostFile, llvm_include_path, args.debug_log), paths)
p.map(SwapTmpFile, paths)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Update Envoy tree to the latest API')
parser.add_argument('--generate_compilation_database', action='store_true')
parser.add_argument('--build_api_booster', action='store_true')
parser.add_argument('--debug_log', action='store_true')
parser.add_argument('paths', nargs='*', default=['source', 'test', 'include'])
args = parser.parse_args()
ApiBoostTree(args)
| [
"subprocess.check_output",
"argparse.ArgumentParser",
"pathlib.Path",
"re.compile",
"os.getenv",
"subprocess.run",
"re.match",
"functools.partial",
"multiprocessing.Pool"
] | [((662, 726), 're.compile', 're.compile', (['"""#include "(envoy/.*)/[^/]+\\\\.pb\\\\.(validate\\\\.)?h\\""""'], {}), '(\'#include "(envoy/.*)/[^/]+\\\\.pb\\\\.(validate\\\\.)?h"\')\n', (672, 726), False, 'import re\n'), ((6086, 6160), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Update Envoy tree to the latest API"""'}), "(description='Update Envoy tree to the latest API')\n", (6109, 6160), False, 'import argparse\n'), ((891, 1138), 'subprocess.run', 'sp.run', (["['./bazel-bin/external/envoy_dev/clang_tools/api_booster/api_booster',\n '--extra-arg-before=-xc++', '--extra-arg=-isystem%s' %\n llvm_include_path, '--extra-arg=-Wno-undefined-internal', path]"], {'capture_output': '(True)', 'check': '(True)'}), "([\n './bazel-bin/external/envoy_dev/clang_tools/api_booster/api_booster',\n '--extra-arg-before=-xc++', '--extra-arg=-isystem%s' %\n llvm_include_path, '--extra-arg=-Wno-undefined-internal', path],\n capture_output=True, check=True)\n", (897, 1138), True, 'import subprocess as sp\n'), ((3212, 3317), 'subprocess.run', 'sp.run', (["['./tools/gen_compilation_database.py', '--run_bazel_build',\n '--include_headers']"], {'check': '(True)'}), "(['./tools/gen_compilation_database.py', '--run_bazel_build',\n '--include_headers'], check=True)\n", (3218, 3317), True, 'import subprocess as sp\n'), ((4399, 4528), 'subprocess.run', 'sp.run', (["(['bazel', 'build', '--strip=always', '@envoy_dev//clang_tools/api_booster'\n ] + extra_api_booster_args)"], {'check': '(True)'}), "(['bazel', 'build', '--strip=always',\n '@envoy_dev//clang_tools/api_booster'] + extra_api_booster_args, check=True\n )\n", (4405, 4528), True, 'import subprocess as sp\n'), ((4574, 4659), 'subprocess.run', 'sp.run', (["(['bazel', 'build', '--strip=always'] + dep_lib_build_targets)"], {'check': '(True)'}), "(['bazel', 'build', '--strip=always'] + dep_lib_build_targets, check=True\n )\n", (4580, 4659), True, 'import subprocess as sp\n'), ((5644, 5653), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (5651, 5653), True, 'import multiprocessing as mp\n'), ((2001, 2019), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (2013, 2019), False, 'import pathlib\n'), ((2409, 2442), 're.match', 're.match', (['API_INCLUDE_REGEX', 'line'], {}), '(API_INCLUDE_REGEX, line)\n', (2417, 2442), False, 'import re\n'), ((2715, 2750), 'pathlib.Path', 'pathlib.Path', (['(path + TMP_SWP_SUFFIX)'], {}), '(path + TMP_SWP_SUFFIX)\n', (2727, 2750), False, 'import pathlib\n'), ((2919, 2954), 'pathlib.Path', 'pathlib.Path', (['(path + TMP_SWP_SUFFIX)'], {}), '(path + TMP_SWP_SUFFIX)\n', (2931, 2954), False, 'import pathlib\n'), ((5941, 6007), 'functools.partial', 'functools.partial', (['ApiBoostFile', 'llvm_include_path', 'args.debug_log'], {}), '(ApiBoostFile, llvm_include_path, args.debug_log)\n', (5958, 6007), False, 'import functools\n'), ((5282, 5319), 'pathlib.Path', 'pathlib.Path', (['"""compile_commands.json"""'], {}), "('compile_commands.json')\n", (5294, 5319), False, 'import pathlib\n'), ((3854, 3896), 'subprocess.check_output', 'sp.check_output', (["['bazel', 'query', query]"], {}), "(['bazel', 'query', query])\n", (3869, 3896), True, 'import subprocess as sp\n'), ((4139, 4181), 'subprocess.check_output', 'sp.check_output', (["['bazel', 'query', query]"], {}), "(['bazel', 'query', query])\n", (4154, 4181), True, 'import subprocess as sp\n'), ((5026, 5050), 'os.getenv', 'os.getenv', (['"""LLVM_CONFIG"""'], {}), "('LLVM_CONFIG')\n", (5035, 5050), False, 'import os\n')] |
#!/usr/bin/python3
# encoding: utf-8
"""
@version: v1.0
@author: W_H_J
@license: Apache Licence
@contact: <EMAIL>
@software: PyCharm
@file: config.py
@time: 2019/5/27 18:12
@describe: 配置数据库连接
"""
import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
sys.path.append("..")
class Config(object):
# 设置密匙要没有规律,别被人轻易猜到哦
SECRET_KEY = '<KEY>'
class Config(object):
# .......
# 格式为mysql+pymysql://数据库用户名:密码@数据库地址:端口号/数据库的名字?数据库格式
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root@localhost:3306/bigdata?charset=utf8'
# 如果你不打算使用mysql,使用这个连接sqlite也可以
# SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR,'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
| [
"os.path.dirname",
"sys.path.append"
] | [((304, 325), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (319, 325), False, 'import sys\n'), ((263, 288), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (278, 288), False, 'import os\n')] |
import unittest
from mmap import mmap, ACCESS_READ
from funowl.converters.functional_converter import to_python
@unittest.skipIf(True, "NOT Ready for SNOMED")
class SnomedTestCase(unittest.TestCase):
def test_snomed(self):
doc = to_python('/Users/solbrig/data/terminology/snomedct/'
'SnomedCT_InternationalRF2_PRODUCTION_20190731T120000Z/Snapshot/Terminology/snomed.owl')
self.assertTrue(False, "Implement Me")
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"unittest.skipIf",
"funowl.converters.functional_converter.to_python"
] | [((116, 161), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""NOT Ready for SNOMED"""'], {}), "(True, 'NOT Ready for SNOMED')\n", (131, 161), False, 'import unittest\n'), ((490, 505), 'unittest.main', 'unittest.main', ([], {}), '()\n', (503, 505), False, 'import unittest\n'), ((244, 393), 'funowl.converters.functional_converter.to_python', 'to_python', (['"""/Users/solbrig/data/terminology/snomedct/SnomedCT_InternationalRF2_PRODUCTION_20190731T120000Z/Snapshot/Terminology/snomed.owl"""'], {}), "(\n '/Users/solbrig/data/terminology/snomedct/SnomedCT_InternationalRF2_PRODUCTION_20190731T120000Z/Snapshot/Terminology/snomed.owl'\n )\n", (253, 393), False, 'from funowl.converters.functional_converter import to_python\n')] |
import os
import we
import json
import pickle
import argparse
import numpy as np
from numba import jit
from tqdm import tqdm
import utils
utils.seed_everything() #Reproducibility
def get_nbs(E, word, k=100):
return np.argsort(E.vecs.dot(E.v(word)))[-k:][::-1]
@jit(nopython=False)
def get_pair_idb(w, v, g):
w_orth = w - (np.dot(w, g)) * g
v_orth = v - (np.dot(v, g)) * g
dots = np.dot(w, v)
orth_dots = np.dot(w_orth, v_orth)
I = (dots - orth_dots / (np.linalg.norm(w_orth) * np.linalg.norm(v_orth))) / (dots)
return I
def get_ns_idb(E, word, g):
tops = get_nbs(E, word, 200) #We only need 100 neighbours, I am storing 200 anyway.
wv = E.v(word)
d = dict(zip([E.words[v] for v in tops], [get_pair_idb(E.vecs[v], wv, g) for v in tops]))
return d
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--f', default="../embeddings/glove", type=str)
parser.add_argument('--o', default="../data/_glove_ns.pkl", type=str)
parser.add_argument('--data_path', default="../data/", type=str)
args = parser.parse_args()
E = we.WordEmbedding(args.f)
words = E.words
g = utils.get_g(E)
neighbour_idb_dict = dict(zip([w for w in tqdm(words)],
[get_ns_idb(E, w, g) for w in tqdm(words)]))
with open(args.o, 'wb') as handle:
pickle.dump(neighbour_idb_dict, handle)
| [
"we.WordEmbedding",
"pickle.dump",
"argparse.ArgumentParser",
"tqdm.tqdm",
"utils.seed_everything",
"numpy.dot",
"numba.jit",
"utils.get_g",
"numpy.linalg.norm"
] | [((138, 161), 'utils.seed_everything', 'utils.seed_everything', ([], {}), '()\n', (159, 161), False, 'import utils\n'), ((268, 287), 'numba.jit', 'jit', ([], {'nopython': '(False)'}), '(nopython=False)\n', (271, 287), False, 'from numba import jit\n'), ((398, 410), 'numpy.dot', 'np.dot', (['w', 'v'], {}), '(w, v)\n', (404, 410), True, 'import numpy as np\n'), ((427, 449), 'numpy.dot', 'np.dot', (['w_orth', 'v_orth'], {}), '(w_orth, v_orth)\n', (433, 449), True, 'import numpy as np\n'), ((835, 860), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (858, 860), False, 'import argparse\n'), ((1120, 1144), 'we.WordEmbedding', 'we.WordEmbedding', (['args.f'], {}), '(args.f)\n', (1136, 1144), False, 'import we\n'), ((1173, 1187), 'utils.get_g', 'utils.get_g', (['E'], {}), '(E)\n', (1184, 1187), False, 'import utils\n'), ((1375, 1414), 'pickle.dump', 'pickle.dump', (['neighbour_idb_dict', 'handle'], {}), '(neighbour_idb_dict, handle)\n', (1386, 1414), False, 'import pickle\n'), ((333, 345), 'numpy.dot', 'np.dot', (['w', 'g'], {}), '(w, g)\n', (339, 345), True, 'import numpy as np\n'), ((369, 381), 'numpy.dot', 'np.dot', (['v', 'g'], {}), '(v, g)\n', (375, 381), True, 'import numpy as np\n'), ((479, 501), 'numpy.linalg.norm', 'np.linalg.norm', (['w_orth'], {}), '(w_orth)\n', (493, 501), True, 'import numpy as np\n'), ((504, 526), 'numpy.linalg.norm', 'np.linalg.norm', (['v_orth'], {}), '(v_orth)\n', (518, 526), True, 'import numpy as np\n'), ((1239, 1250), 'tqdm.tqdm', 'tqdm', (['words'], {}), '(words)\n', (1243, 1250), False, 'from tqdm import tqdm\n'), ((1312, 1323), 'tqdm.tqdm', 'tqdm', (['words'], {}), '(words)\n', (1316, 1323), False, 'from tqdm import tqdm\n')] |
# encoding: UTF-8
#
# Copyright 2012-2013 <NAME>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://pygubu.web.here
try:
import tkinter as tk
except:
import Tkinter as tk
from pygubu.builder.builderobject import *
from pygubu.builder.tkstdwidgets import TKToplevel
class ToplevelFramePreview(tk.Frame):
def __init__(self, master=None, **kw):
tk.Frame.__init__(self, master, **kw)
self.tl_attrs = {}
self._w_set = False
self._h_set = False
def configure(self, cnf=None, **kw):
if kw:
cnf = tk._cnfmerge((cnf, kw))
elif cnf:
cnf = tk._cnfmerge(cnf)
key = 'width'
if key in cnf:
value = int(cnf[key])
minsize = self.tl_attrs.get('minsize', None)
maxsize = self.tl_attrs.get('maxsize', None)
# print(value, minsize, maxsize)
remove = False
# print('tl_attrs:', self.tl_attrs)
if minsize and value < minsize[0]:
remove = True
if maxsize and value > maxsize[0]:
remove = True
if self._w_set:
resizable = self.tl_attrs.get('resizable', None)
if resizable and not TKToplevel.RESIZABLE[resizable][0]:
remove = True
if remove:
# print('rm', key, value)
cnf.pop(key)
else:
self._w_set = True
key = 'height'
if key in cnf:
value = int(cnf[key])
minsize = self.tl_attrs.get('minsize', None)
maxsize = self.tl_attrs.get('maxsize', None)
# print(value, minsize, maxsize)
remove = False
if minsize and value < minsize[1]:
remove = True
if maxsize and value > maxsize[1]:
remove = True
if self._h_set:
resizable = self.tl_attrs.get('resizable', None)
if resizable and not TKToplevel.RESIZABLE[resizable][1]:
remove = True
if remove:
# print('rm', key, value)
cnf.pop(key)
else:
self._h_set = True
return tk.Frame.configure(self, cnf)
class ToplevelFramePreviewBO(BuilderObject):
class_ = ToplevelFramePreview
container = True
#Add fake 'modal' property for Dialog preview
properties = TKToplevel.properties + ('modal',)
def _set_property(self, target_widget, pname, value):
tw = target_widget
tw.tl_attrs[pname] = value
method_props = ('overrideredirect', 'title')
if pname in method_props:
pass
elif pname in ('maxsize', 'minsize'):
if not value:
del tw.tl_attrs[pname]
elif '|' in value:
w, h = value.split('|')
if w and h:
tw.tl_attrs[pname] = (int(w), int(h))
else:
del tw.tl_attrs[pname]
elif pname == 'geometry':
if value:
dim = value.split('+')[0]
dim = dim.split('-')[0]
w, h = dim.split('x')
if w and h:
tw.tl_attrs['minsize'] = (int(w), int(h))
tw._h_set = tw._w_set = False
tw.configure(width=w, height=h)
tw.grid_propagate(0)
elif pname == 'resizable':
if value:
if value in ('both', 'horizontally'):
tw.columnconfigure(0, weight=1)
if value in ('both', 'vertically'):
tw.rowconfigure(0, weight=1)
elif pname == 'modal':
# Do nothing, fake 'modal' property for dialog preview
pass
else:
super(ToplevelFramePreviewBO, self)._set_property(tw, pname, value)
register_widget('pygubudesigner.ToplevelFramePreview',
ToplevelFramePreviewBO, 'ToplevelFramePreview', tuple())
| [
"Tkinter.Frame.configure",
"Tkinter._cnfmerge",
"Tkinter.Frame.__init__"
] | [((950, 987), 'Tkinter.Frame.__init__', 'tk.Frame.__init__', (['self', 'master'], {}), '(self, master, **kw)\n', (967, 987), True, 'import Tkinter as tk\n'), ((2815, 2844), 'Tkinter.Frame.configure', 'tk.Frame.configure', (['self', 'cnf'], {}), '(self, cnf)\n', (2833, 2844), True, 'import Tkinter as tk\n'), ((1146, 1169), 'Tkinter._cnfmerge', 'tk._cnfmerge', (['(cnf, kw)'], {}), '((cnf, kw))\n', (1158, 1169), True, 'import Tkinter as tk\n'), ((1206, 1223), 'Tkinter._cnfmerge', 'tk._cnfmerge', (['cnf'], {}), '(cnf)\n', (1218, 1223), True, 'import Tkinter as tk\n')] |
"""Download replays from osu! website"""
import http.cookiejar
import urllib.parse
import urllib.request
import sys
import json
import os
import requests
JAR = http.cookiejar.CookieJar()
OPENER = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(JAR))
with open('config.json', 'r') as f:
CONFIG = json.load(f)
f.close()
def get_json(url):
"""Gets JSON data from a given URL"""
try:
data = requests.get(url=url).json()
return data
except requests.exceptions.Timeout:
data = requests.get(url=url).json()
except requests.exceptions.TooManyRedirects:
print("Invalid link given")
except requests.exceptions.RequestException as err:
print(err)
def login(beatmap_md5):
"""Responsible for logging into the osu! website """
print("Attempting to log into the osu! website...")
payload = {
'username': CONFIG['username'],
'password': CONFIG['password'],
'redirect': 'https://osu.ppy.sh/forum/ucp.php',
'sid': '',
'login': 'login'
}
payload = urllib.parse.urlencode(payload).encode("utf-8")
response = OPENER.open("https://osu.ppy.sh/forum/ucp.php?mode=login", payload)
data = bytes(str(response.read()), "utf-8").decode("unicode_escape")
#check if invalid credentials were given
if "incorrect password" in data:
sys.exit("You have specified an invalid password. Please check config.json")
print("Successfully logged into the osu! website!")
return get_scores(beatmap_md5)
def get_scores(beatmap_md5):
"""Gets all scores for a given beatmap."""
#get beatmap_id from md5 hash
url = 'https://osu.ppy.sh/api/get_beatmaps?k={}&h={}&mode=0&limit=50'.format(CONFIG['osu_api_key'], beatmap_md5)
beatmap_data = get_json(url)
if len(beatmap_data) < 1:
sys.exit("The beatmap is either invalid or not ranked on osu!")
beatmap_data_string = """
------------------------------------------------
| Comparing Replays For Map:
| Artist: {}
| Title: {}
| Beatmap Id: {}
------------------------------------------------
""".format(beatmap_data[0]['artist'], beatmap_data[0]['title'], beatmap_data[0]['beatmap_id'])
print(beatmap_data_string)
#get list of score ids from beatmap
score_url = 'https://osu.ppy.sh/api/get_scores?k={}&b={}&mode=0&limit=50'.format(CONFIG['osu_api_key'], beatmap_data[0]['beatmap_id'])
score_data = get_json(score_url)
score_ids = []
for score in score_data:
score_ids.append(score['score_id'])
return download_replays(score_ids)
def download_replays(score_ids):
"""Takes a list of scoreIds and downloads the replay to a new directory."""
#create a new path for the replays to be housed.
new_path = os.getcwd() + "/" + "replays"
if not os.path.exists(new_path):
os.makedirs(new_path)
for score_id in score_ids:
try:
directory = os.path.join(new_path)
full_path = directory + "/" + str(score_id) + ".osr"
print("\rDownloading replay: {}..." .format(score_id), end="")
url = 'https://osu.ppy.sh/web/osu-getreplay.php?c={}&m=0'.format(score_id)
f_2 = OPENER.open(url, {})
data = f_2.read()
with open(full_path, 'wb') as code:
code.write(data)
code.close()
except IOError as err:
print(err)
sys.exit()
| [
"os.path.exists",
"os.makedirs",
"os.path.join",
"requests.get",
"os.getcwd",
"sys.exit",
"json.load"
] | [((316, 328), 'json.load', 'json.load', (['f'], {}), '(f)\n', (325, 328), False, 'import json\n'), ((1372, 1448), 'sys.exit', 'sys.exit', (['"""You have specified an invalid password. Please check config.json"""'], {}), "('You have specified an invalid password. Please check config.json')\n", (1380, 1448), False, 'import sys\n'), ((1842, 1905), 'sys.exit', 'sys.exit', (['"""The beatmap is either invalid or not ranked on osu!"""'], {}), "('The beatmap is either invalid or not ranked on osu!')\n", (1850, 1905), False, 'import sys\n'), ((2834, 2858), 'os.path.exists', 'os.path.exists', (['new_path'], {}), '(new_path)\n', (2848, 2858), False, 'import os\n'), ((2868, 2889), 'os.makedirs', 'os.makedirs', (['new_path'], {}), '(new_path)\n', (2879, 2889), False, 'import os\n'), ((2793, 2804), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2802, 2804), False, 'import os\n'), ((2959, 2981), 'os.path.join', 'os.path.join', (['new_path'], {}), '(new_path)\n', (2971, 2981), False, 'import os\n'), ((429, 450), 'requests.get', 'requests.get', ([], {'url': 'url'}), '(url=url)\n', (441, 450), False, 'import requests\n'), ((3455, 3465), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3463, 3465), False, 'import sys\n'), ((533, 554), 'requests.get', 'requests.get', ([], {'url': 'url'}), '(url=url)\n', (545, 554), False, 'import requests\n')] |
import pandas as pd
import numpy as np
import os
from glob import glob
import csv
import multiprocessing as mp
import numpy as np
column_names = ['frame_no', 'ts', 'ts_delta', 'protocols', 'frame_len', 'eth_src',
'eth_dst', 'ip_src', 'ip_dst', 'tcp_srcport', 'tcp_dstport',
'http_host', 'sni', 'udp_srcport', 'udp_dstport']
root = '/Volumes/Abhijit-Seagate/Data_iot/Intermediate/idle-intermediate/echo'
idle_time_dict = {}
idle_time_dict['idle'] = []
for files in os.listdir(root):
tmp = pd.read_csv(f'{root}/{files}',sep='\t',names=column_names)
idle_time_dict['idle'].extend(tmp['ts'])
root = '/Volumes/Abhijit-Seagate/Data_iot/Intermediate/tagged_intermediate/google-home-mini'
time_dict = {}
for file in os.listdir(root):
if '.DS' not in file:
print(f"Generating Dictionary for --> {file}")
time_dict[file] = []
for files in os.listdir(f'{root}/{file}'):
tmp = pd.read_csv(f'{root}/{file}/{files}',sep='\t',names=column_names)
time_dict[file].extend(tmp['ts'])
results = pd.read_csv('/Volumes/Abhijit-Seagate/Data_iot/results/results_google_mini_req/results/model_results.csv')
results.head()
results['labelled_data'] = 'unknown'
max_arr = []
min_arr = []
for keys,values in time_dict.items():
max_arr.append(max(values))
min_arr.append(min(values))
max_arr = max(max_arr)
min_arr = min(min_arr)
test_label = results[(results['start_time']>=min_arr) & (results['end_time'] <= max_arr)]
def label_tagged(split_df):
num_rows = 0
for index, row in split_df.iterrows():
num_rows+=1
print(f"Completed labelling of {num_rows}/{split_df.shape[0]} rows \n")
for label, time_stamps in time_dict.items():
for time_step in time_stamps:
if (time_step<= row['end_time']) and (time_step>= row['start_time']):
if row['labelled_data']=='unknown':
row['labelled_data']=f"{label}"
split_df.at[index,'labelled_data']= f"{label}"
print(f'Row {index} has been labelled as a {label}')
break
else:
row['labelled_data']=f"{row['labelled_data']}|{label}"
split_df.at[index,'labelled_data']= f"{row['labelled_data']}|{label}"
print(f'Row {index} has been labelled as a {label}')
break
print(f'Row {index} has been labelled as a {label}')
return(split_df)
def idle_tagged(split_df):
num_rows = 0
for index, row in split_df.iterrows():
num_rows+=1
print(f"Completed labelling of {num_rows}/{split_df.shape[0]} rows \n")
for label, time_stamps in idle_time_dict.items():
for time_step in time_stamps:
if (time_step<= row['end_time']) and (time_step>= row['start_time']):
if row['labelled_data']=='unknown':
row['labelled_data']=f"{label}"
split_df.at[index,'labelled_data']= f"{label}"
print(f'Row {index} has been labelled as a {label}')
break
else:
row['labelled_data']=f"{row['labelled_data']}|{label}"
split_df.at[index,'labelled_data']= f"{row['labelled_data']}|{label}"
print(f'Row {index} has been labelled as a {label}')
break
print(f'Row {index} has been labelled as a {label}')
return(split_df)
def parallelize_dataframe(df, func):
num_cores = mp.cpu_count()-4
num_partitions = num_cores #number of partitions to split dataframe
df_split = np.array_split(df, num_partitions)
pool = mp.Pool(num_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
print("Labelling Tagged Data")
test_label = parallelize_dataframe(test_label,label_tagged)
test_label.head(50)
test_label.to_csv('/Volumes/Abhijit-Seagate/Data_iot/results/results_winkhub2/results/parallel_labelled_results.csv',index=False)
results_table = test_label
results_table['Accurate-label'] = 0
results_table['Accurate-anomaly'] = 1
for index,rows in results_table.iterrows():
if rows['prediction'] in rows['labelled_data']:
results_table.at[index,'Accurate-label'] = 1
else:
pass
for index,rows in results_table.iterrows():
if rows['labelled_data'] =='unknown':
results_table.at[index,'Accurate-anomaly'] = 0
else:
pass
results_table = results_table[results_table['labelled_data']!='unknown']
filtered_table = results_table
Accuracy_labelling = sum(filtered_table['Accurate-label'])/filtered_table.shape[0]
Accuracy_anomaly = sum(filtered_table['Accurate-anomaly'])/filtered_table.shape[0]
print(f'Labelling -> {Accuracy_labelling}',
f'Anomaly -->{Accuracy_anomaly}')
max_arr = []
min_arr = []
for keys,values in idle_time_dict.items():
max_arr.append(max(values))
min_arr.append(min(values))
max_arr = max(max_arr)
min_arr = min(min_arr)
filtered_table = results[(results['start_time']>=min_arr) & (results['end_time'] <= max_arr)]
filtered_table.shape
filtered_table = parallelize_dataframe(filtered_table,idle_tagged)
filtered_table.to_csv('/Volumes/Abhijit-Seagate/Data_iot/results/results_google_mini_req/results/parallel_idle_labelled_results.csv',index=False)
idle_filtered = filtered_table[filtered_table['labelled_data']=='idle']
idle_filtered['prediction'] = idle_filtered['prediction'].map(lambda x: 'idle' if x=='anomaly' else x)
Accuracy_labelling = sum(results_table['Accurate-anomaly'])/results_table.shape[0]
print(f'Accuracy --> {Accuracy_labelling}')
| [
"os.listdir",
"pandas.read_csv",
"multiprocessing.cpu_count",
"numpy.array_split",
"multiprocessing.Pool"
] | [((498, 514), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (508, 514), False, 'import os\n'), ((751, 767), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (761, 767), False, 'import os\n'), ((1070, 1186), 'pandas.read_csv', 'pd.read_csv', (['"""/Volumes/Abhijit-Seagate/Data_iot/results/results_google_mini_req/results/model_results.csv"""'], {}), "(\n '/Volumes/Abhijit-Seagate/Data_iot/results/results_google_mini_req/results/model_results.csv'\n )\n", (1081, 1186), True, 'import pandas as pd\n'), ((526, 586), 'pandas.read_csv', 'pd.read_csv', (['f"""{root}/{files}"""'], {'sep': '"""\t"""', 'names': 'column_names'}), "(f'{root}/{files}', sep='\\t', names=column_names)\n", (537, 586), True, 'import pandas as pd\n'), ((3779, 3813), 'numpy.array_split', 'np.array_split', (['df', 'num_partitions'], {}), '(df, num_partitions)\n', (3793, 3813), True, 'import numpy as np\n'), ((3825, 3843), 'multiprocessing.Pool', 'mp.Pool', (['num_cores'], {}), '(num_cores)\n', (3832, 3843), True, 'import multiprocessing as mp\n'), ((900, 928), 'os.listdir', 'os.listdir', (['f"""{root}/{file}"""'], {}), "(f'{root}/{file}')\n", (910, 928), False, 'import os\n'), ((3675, 3689), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (3687, 3689), True, 'import multiprocessing as mp\n'), ((948, 1015), 'pandas.read_csv', 'pd.read_csv', (['f"""{root}/{file}/{files}"""'], {'sep': '"""\t"""', 'names': 'column_names'}), "(f'{root}/{file}/{files}', sep='\\t', names=column_names)\n", (959, 1015), True, 'import pandas as pd\n')] |
from tqdm import tqdm
import os, sys
import numpy as np
import argparse, time, pickle
import torch
import torch.nn as nn
import torch.optim as optim
from utils import pretrained_matrix
from dataloader import DialogLoader
from model import End2EndModel, MaskedNLLLoss
from torchnlp.encoders.text import SpacyEncoder
from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report
def configure_dataloaders(dataset, classify, batch_size):
"Prepare dataloaders"
if dataset == 'persuasion':
utt_file1 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_train_' + classify + '_utterances.tsv'
utt_file2 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_valid_' + classify + '_utterances.tsv'
utt_file3 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_test_' + classify + '_utterances.tsv'
mask_file1 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_train_' + classify + '_loss_mask.tsv'
mask_file2 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_valid_' + classify + '_loss_mask.tsv'
mask_file3 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_test_' + classify + '_loss_mask.tsv'
else:
utt_file1 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_train_utterances.tsv'
utt_file2 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_valid_utterances.tsv'
utt_file3 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_test_utterances.tsv'
mask_file1 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_train_loss_mask.tsv'
mask_file2 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_valid_loss_mask.tsv'
mask_file3 = 'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_test_loss_mask.tsv'
train_loader = DialogLoader(
utt_file1,
'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_train_' + classify + '.tsv',
mask_file1,
mask_file1, # dummy speaker mask
batch_size,
shuffle=True
)
valid_loader = DialogLoader(
utt_file2,
'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_valid_' + classify + '.tsv',
mask_file2,
mask_file2, # dummy speaker mask
batch_size,
shuffle=False
)
test_loader = DialogLoader(
utt_file3,
'datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_test_' + classify + '.tsv',
mask_file3,
mask_file3, # dummy speaker mask
batch_size,
shuffle=False
)
return train_loader, valid_loader, test_loader
def update_context(conversations, target_idx, cc):
return [
(conv[: max(0, idx.item() - cc['past_context'])] if cc['past_del'] else conv[max(0, idx.item() - cc['past_context']) : idx.item()]) +
[conv[idx.item()]] +
(conv[idx.item() + 1 + cc['future_context'] :] if cc['future_del'] else conv[idx.item() + 1 : idx.item() + 1 + cc['future_context']])
for conv, idx in zip(conversations, target_idx)
]
def train_or_eval_model(model, loss_function, dataloader, epoch, cc=None, optimizer=None, train=False, one_element=False):
losses, preds, labels, masks = [], [], [], []
assert not train or optimizer!=None
if train:
model.train()
else:
model.eval()
for conversations, label, loss_mask, dummy_speaker_mask, dummy_indices in tqdm(dataloader, leave=False):
if train:
optimizer.zero_grad()
# create labels and mask
if cc:
loss_mask_ = torch.nn.utils.rnn.pad_sequence([torch.tensor(item) for item in loss_mask],
batch_first=True).cuda()
target_idx = loss_mask_.argmax(dim=1)
conversations = update_context(conversations, target_idx, cc)
label = update_context(label, target_idx, cc)
loss_mask = update_context(loss_mask, target_idx, cc)
label = torch.nn.utils.rnn.pad_sequence([torch.tensor(item) for item in label],
batch_first=True).cuda()
loss_mask = torch.nn.utils.rnn.pad_sequence([torch.tensor(item) for item in loss_mask],
batch_first=True).cuda()
# create umask and qmask
lengths = [len(item) for item in conversations]
umask = torch.zeros(len(lengths), max(lengths)).long().cuda()
for j in range(len(lengths)):
umask[j][:lengths[j]] = 1
# obtain log probabilities
log_prob = model(conversations, lengths, umask)
if dataset == 'persuasion' and classify == 'er':
log_prob = log_prob[0]
if dataset == 'persuasion' and classify == 'ee':
log_prob = log_prob[1]
# compute loss and metrics
lp_ = log_prob.transpose(0, 1).contiguous().view(-1, log_prob.size()[2])
labels_ = label.view(-1)
loss = loss_function(lp_, labels_, loss_mask)
pred_ = torch.argmax(lp_, 1)
preds.append(pred_.data.cpu().numpy())
labels.append(labels_.data.cpu().numpy())
masks.append(loss_mask.view(-1).cpu().numpy())
# save_grad = True
losses.append(loss.item()*masks[-1].sum())
if train:
loss.backward()
optimizer.step()
if preds!=[]:
preds = np.concatenate(preds)
labels = np.concatenate(labels)
masks = np.concatenate(masks)
else:
return float('nan'), float('nan'), float('nan'), [], [], []
avg_loss = round(np.sum(losses)/np.sum(masks), 4)
avg_accuracy = round(accuracy_score(labels, preds, sample_weight=masks)*100, 2)
if dataset in ['iemocap', 'multiwoz']:
avg_fscore = round(f1_score(labels, preds, sample_weight=masks, average='weighted')*100, 2)
fscores = [avg_fscore]
if one_element:
fscores = fscores[0]
elif dataset in ['persuasion']:
avg_fscore1 = round(f1_score(labels, preds, sample_weight=masks, average='weighted')*100, 2)
avg_fscore2 = round(f1_score(labels, preds, sample_weight=masks, average='micro')*100, 2)
avg_fscore3 = round(f1_score(labels, preds, sample_weight=masks, average='macro')*100, 2)
fscores = [avg_fscore1, avg_fscore2, avg_fscore3]
if one_element:
fscores = fscores[2]
elif dataset == 'dailydialog':
if classify == 'emotion':
avg_fscore1 = round(f1_score(labels, preds, sample_weight=masks, average='weighted')*100, 2)
avg_fscore2 = round(f1_score(labels, preds, sample_weight=masks, average='weighted', labels=[0,2,3,4,5,6])*100, 2)
avg_fscore3 = round(f1_score(labels, preds, sample_weight=masks, average='micro')*100, 2)
avg_fscore4 = round(f1_score(labels, preds, sample_weight=masks, average='micro', labels=[0,2,3,4,5,6])*100, 2)
avg_fscore5 = round(f1_score(labels, preds, sample_weight=masks, average='macro')*100, 2)
avg_fscore6 = round(f1_score(labels, preds, sample_weight=masks, average='macro', labels=[0,2,3,4,5,6])*100, 2)
fscores = [avg_fscore1, avg_fscore2, avg_fscore3, avg_fscore4, avg_fscore5, avg_fscore6]
if one_element:
fscores = fscores[5]
elif classify == 'act':
avg_fscore1 = round(f1_score(labels, preds, sample_weight=masks, average='weighted')*100, 2)
avg_fscore2 = round(f1_score(labels, preds, sample_weight=masks, average='micro')*100, 2)
avg_fscore3 = round(f1_score(labels, preds, sample_weight=masks, average='macro')*100, 2)
fscores = [avg_fscore1, avg_fscore2, avg_fscore3]
if one_element:
fscores = fscores[2]
return avg_loss, avg_accuracy, fscores, labels, preds, masks
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate')
parser.add_argument('--weight_decay', default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument('--adam_epsilon', default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument('--dropout', default=0.1, type=float, help="Dropout probability.")
parser.add_argument('--rec-dropout', default=0.1, type=float, help="DialogRNN Dropout probability.")
parser.add_argument('--batch-size', type=int, default=32, metavar='BS', help='batch size')
parser.add_argument('--epochs', type=int, default=10, metavar='E', help='number of epochs')
parser.add_argument('--class-weight', action='store_true', default=False, help='use class weight')
parser.add_argument('--attention', action='store_true', default=False, help='use attention on top of lstm model')
parser.add_argument('--cls-model', default='lstm', help='lstm or logreg')
parser.add_argument('--mode', default='840B', help='which glove model')
parser.add_argument('--dataset', default='iemocap', help='which dataset')
parser.add_argument('--classify', help='what to classify')
parser.add_argument('--cattn', default='general', help='context attention for dialogrnn simple|general|general2')
parser.add_argument('--residual', action='store_true', default=False, help='use residual connection')
parser.add_argument('--run' , help='which run')
parser.add_argument('--cc-active', action='store_true', default=False, help='cc active')
parser.add_argument('--cc-past-context', type=int, default=-1, help='# past utterances')
parser.add_argument('--cc-future-context', type=int, default=-1, help='# future utterances')
parser.add_argument('--cc-past-del', action='store_true', default=False, help='remove context')
parser.add_argument('--cc-future-del', action='store_true', default=False, help='remove context')
parser.add_argument('--cc-train', action='store_true', default=False, help='cc on training set')
parser.add_argument('--cc-dev', action='store_true', default=False, help='cc on dev set')
parser.add_argument('--cc-test', action='store_true', default=False, help='cc on test set')
parser.add_argument('--inference', default=None, help='model ID')
args = parser.parse_args()
print(args)
if not args.inference:
run_ID = int(time.time())
print(f'model_ID: {run_ID}')
global dataset
global classify
dataset = args.dataset
cc = {
'active': args.cc_active,
'past_context': 0 if args.cc_past_context<0 else args.cc_past_context,
'future_context': 0 if args.cc_future_context<0 else args.cc_future_context,
'past_del': True if args.cc_past_context<0 else args.cc_past_del,
'future_del': True if args.cc_future_context<0 else args.cc_future_del,
'train': args.cc_train,
'dev': args.cc_dev,
'test': args.cc_test,
}
D_h = 100
D_e = 100
if dataset in ['multiwoz']:
D_e = 200
cnn_output_size = 100
cnn_filters = 100
cnn_kernel_sizes = (1,2,3)
if dataset in ['multiwoz']:
cnn_kernel_sizes = (2,3,4)
mode = args.mode
cnn_dropout = args.dropout
dropout = args.dropout
rec_dropout = args.rec_dropout
attention = args.attention
batch_size = args.batch_size
n_epochs = args.epochs
classification_model = args.cls_model
context_attention = args.cattn
residual = args.residual
if dataset == 'iemocap':
print ('Classifying emotion in iemocap.')
classify = 'emotion'
n_classes = 6
loss_weights = torch.FloatTensor([1.0, 0.60072, 0.38066, 0.54019, 0.67924, 0.34332])
elif dataset == 'multiwoz':
print ('Classifying intent in multiwoz.')
classify = 'intent'
n_classes = 11
elif dataset == 'persuasion':
classify = args.classify
if classify == 'er':
print ('Classifying persuador in Persuasion for good.')
n_classes = 11
elif classify == 'ee':
print ('Classifying persuadee in Persuasion for good.')
n_classes = 13
else:
raise ValueError('--classify must be er or ee for persuasion')
elif dataset == 'dailydialog':
classify = args.classify
if classify == 'emotion':
print ('Classifying emotion in dailydialog.')
n_classes = 7
elif classify == 'act':
print ('Classifying act in dailydialog.')
n_classes = 4
else:
raise ValueError('--classify must be emotion or act for dailydialog')
train_loader, valid_loader, test_loader = configure_dataloaders(dataset, classify, batch_size)
## Tokenizer and Embedding Matrix
if os.path.isfile('datasets/dialogue_level_minibatch/' + dataset + '/' + dataset + mode + '_embedding.matrix'):
tokenizer = pickle.load(open('datasets/dialogue_level_minibatch/' + dataset + '/' + dataset + mode + '.tokenizer', 'rb'))
embedding_matrix = pickle.load(open('datasets/dialogue_level_minibatch/' + dataset + '/' + dataset + mode + '_embedding.matrix', 'rb'))
print ('Tokenizer and embedding matrix exists. Loaded from pickle files.')
else:
print ('Creating tokenizer and embedding matrix.')
all_utterances = []
for loader in [train_loader, valid_loader, test_loader]:
for conversations, label, loss_mask, speakers, indices in loader:
all_utterances += [sent.lower() for conv in conversations for sent in conv]
tokenizer = SpacyEncoder(all_utterances)
id_to_token = {i: item for i, item in enumerate(tokenizer.vocab)}
if mode == '6B':
embedding_matrix = pretrained_matrix('glove/glove.6B.300d.txt', id_to_token)
elif mode == '840B':
embedding_matrix = pretrained_matrix('glove/glove.840B.300d.txt', id_to_token)
pickle.dump(tokenizer, open('datasets/dialogue_level_minibatch/' + dataset + '/' + dataset + mode + '.tokenizer', 'wb'))
pickle.dump(embedding_matrix, open('datasets/dialogue_level_minibatch/' + dataset + '/' + dataset + mode + '_embedding.matrix', 'wb'))
print ('Done.')
vocab_size, embedding_dim = embedding_matrix.shape
model = End2EndModel(dataset, vocab_size, embedding_dim, tokenizer, classification_model,
cnn_output_size, cnn_filters, cnn_kernel_sizes, cnn_dropout,
D_e, D_h, n_classes, dropout, attention, context_attention, rec_dropout, residual)
if args.inference:
if dataset == 'iemocap':
model.load_state_dict(torch.load(f'saved/iemocap/lstm_emotion_{args.inference}.pt'))
elif dataset == 'multiwoz':
model.load_state_dict(torch.load(f'saved/multiwoz/lstm_intent_{args.inference}.pt'))
elif dataset == 'persuasion' and classify == 'er':
model.load_state_dict(torch.load(f'saved/persuasion/lstm_er_{args.inference}.pt'))
elif dataset == 'persuasion' and classify == 'ee':
model.load_state_dict(torch.load(f'saved/persuasion/lstm_ee_{args.inference}.pt'))
elif dataset == 'dailydialog' and classify == 'emotion':
model.load_state_dict(torch.load(f'saved/dailydialog/lstm_emotion_{args.inference}.pt'))
elif dataset == 'dailydialog' and classify == 'act':
model.load_state_dict(torch.load(f'saved/dailydialog/lstm_act_{args.inference}.pt'))
n_epochs = 1
model.init_pretrained_embeddings(embedding_matrix)
model.cuda()
if args.class_weight:
loss_function = MaskedNLLLoss(loss_weights.cuda())
else:
loss_function = MaskedNLLLoss()
if not args.inference:
optimizer = optim.Adam(model.parameters(), lr=args.lr)
valid_losses, valid_fscores = [], []
test_fscores = []
best_loss, best_label, best_pred, best_mask, best_fscore = None, None, None, None, None
for e in range(n_epochs):
start_time = time.time()
if not args.inference:
train_loss, train_acc, train_fscore, _, _, _ = train_or_eval_model(model, loss_function,
train_loader, e,
cc if cc['train'] and cc['active'] else None,
optimizer=optimizer if not args.inference else None,
train=True if not args.inference else False,
one_element=True)
valid_loss, valid_acc, valid_fscore, _, _, _ = train_or_eval_model(model, loss_function,
valid_loader, e,
cc if cc['dev'] and cc['active'] else None,
one_element=True
)
valid_losses.append(valid_loss)
valid_fscores.append(valid_fscore)
test_loss, test_acc, test_fscore, test_label, test_pred, test_mask = train_or_eval_model(model, loss_function,
test_loader, e,
cc if cc['test'] and cc['active'] else None, one_element=True)
test_fscores.append(test_fscore)
# WARNING: model hyper-parameters are not stored
if not args.inference:
if best_fscore == None or valid_fscore > best_fscore:
best_fscore = valid_fscore
if not os.path.exists('mapping/'):
os.makedirs('mapping/')
with open(f'mapping/{dataset}_classify_{classify}_run{args.run}_{run_ID}.tsv', 'w') as f:
f.write(f'{args}\t{run_ID}\t{best_fscore}')
if dataset == 'iemocap':
dirName = 'saved/iemocap/'
if not os.path.exists(dirName):
os.makedirs(dirName)
torch.save(model.state_dict(), f'saved/iemocap/lstm_emotion_run{args.run}_{run_ID}.pt')
elif dataset == 'multiwoz':
dirName = 'saved/multiwoz/'
if not os.path.exists(dirName):
os.makedirs(dirName)
torch.save(model.state_dict(), f'saved/multiwoz/lstm_intent_run{args.run}_{run_ID}.pt')
elif dataset == 'persuasion' and classify == 'er':
dirName = 'saved/persuasion/'
if not os.path.exists(dirName):
os.makedirs(dirName)
torch.save(model.state_dict(), f'saved/persuasion/lstm_er_run{args.run}_{run_ID}.pt')
elif dataset == 'persuasion' and classify == 'ee':
dirName = 'saved/persuasion/'
if not os.path.exists(dirName):
os.makedirs(dirName)
torch.save(model.state_dict(), f'saved/persuasion/lstm_ee_run{args.run}_{run_ID}.pt')
elif dataset == 'dailydialog' and classify == 'emotion':
dirName = 'saved/dailydialog/'
if not os.path.exists(dirName):
os.makedirs(dirName)
torch.save(model.state_dict(), f'saved/dailydialog/lstm_emotion_run{args.run}_{run_ID}.pt')
elif dataset == 'dailydialog' and classify == 'act':
dirName = 'saved/dailydialog/'
if not os.path.exists(dirName):
os.makedirs(dirName)
torch.save(model.state_dict(), f'saved/dailydialog/lstm_act_run{args.run}_{run_ID}.pt')
if not args.inference:
if best_loss == None or best_loss > valid_loss:
best_loss, best_label, best_pred, best_mask =\
valid_loss, test_label, test_pred, test_mask
x = 'Epoch {} train_loss {} train_acc {} train_fscore {} valid_loss {} valid_acc {} valid_fscore {} test_loss {} test_acc {} test_fscore {} time {}'.\
format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore,\
test_loss, test_acc, test_fscore, round(time.time()-start_time, 2))
print (x)
# valid_fscores = np.array(valid_fscores).transpose()
test_fscores = np.array(test_fscores).transpose()
if not args.inference:
sys.exit(0)
else:
print (test_fscores)
ccf = open('results/context_control/' + dataset + '_glove_utt_level_context_control_' + classification_model + '_' + classify + '.txt', 'a')
ccf.write(str(test_fscores[0]) + '\t' + str(args.inference) + '\t' + str(args) + '\n')
| [
"utils.pretrained_matrix",
"numpy.array",
"sys.exit",
"os.path.exists",
"argparse.ArgumentParser",
"dataloader.DialogLoader",
"model.End2EndModel",
"numpy.concatenate",
"torch.argmax",
"os.path.isfile",
"torchnlp.encoders.text.SpacyEncoder",
"time.time",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.f1_score",
"os.makedirs",
"model.MaskedNLLLoss",
"torch.load",
"tqdm.tqdm",
"numpy.sum",
"torch.tensor",
"torch.FloatTensor"
] | [((1992, 2170), 'dataloader.DialogLoader', 'DialogLoader', (['utt_file1', "('datasets/utterance_level_minibatch/' + dataset + '/' + dataset +\n '_train_' + classify + '.tsv')", 'mask_file1', 'mask_file1', 'batch_size'], {'shuffle': '(True)'}), "(utt_file1, 'datasets/utterance_level_minibatch/' + dataset +\n '/' + dataset + '_train_' + classify + '.tsv', mask_file1, mask_file1,\n batch_size, shuffle=True)\n", (2004, 2170), False, 'from dataloader import DialogLoader\n'), ((2264, 2443), 'dataloader.DialogLoader', 'DialogLoader', (['utt_file2', "('datasets/utterance_level_minibatch/' + dataset + '/' + dataset +\n '_valid_' + classify + '.tsv')", 'mask_file2', 'mask_file2', 'batch_size'], {'shuffle': '(False)'}), "(utt_file2, 'datasets/utterance_level_minibatch/' + dataset +\n '/' + dataset + '_valid_' + classify + '.tsv', mask_file2, mask_file2,\n batch_size, shuffle=False)\n", (2276, 2443), False, 'from dataloader import DialogLoader\n'), ((2537, 2715), 'dataloader.DialogLoader', 'DialogLoader', (['utt_file3', "('datasets/utterance_level_minibatch/' + dataset + '/' + dataset + '_test_' +\n classify + '.tsv')", 'mask_file3', 'mask_file3', 'batch_size'], {'shuffle': '(False)'}), "(utt_file3, 'datasets/utterance_level_minibatch/' + dataset +\n '/' + dataset + '_test_' + classify + '.tsv', mask_file3, mask_file3,\n batch_size, shuffle=False)\n", (2549, 2715), False, 'from dataloader import DialogLoader\n'), ((3667, 3696), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'leave': '(False)'}), '(dataloader, leave=False)\n', (3671, 3696), False, 'from tqdm import tqdm\n'), ((8170, 8195), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8193, 8195), False, 'import argparse, time, pickle\n'), ((13088, 13199), 'os.path.isfile', 'os.path.isfile', (["('datasets/dialogue_level_minibatch/' + dataset + '/' + dataset + mode +\n '_embedding.matrix')"], {}), "('datasets/dialogue_level_minibatch/' + dataset + '/' +\n dataset + mode + '_embedding.matrix')\n", (13102, 13199), False, 'import os, sys\n'), ((14612, 14849), 'model.End2EndModel', 'End2EndModel', (['dataset', 'vocab_size', 'embedding_dim', 'tokenizer', 'classification_model', 'cnn_output_size', 'cnn_filters', 'cnn_kernel_sizes', 'cnn_dropout', 'D_e', 'D_h', 'n_classes', 'dropout', 'attention', 'context_attention', 'rec_dropout', 'residual'], {}), '(dataset, vocab_size, embedding_dim, tokenizer,\n classification_model, cnn_output_size, cnn_filters, cnn_kernel_sizes,\n cnn_dropout, D_e, D_h, n_classes, dropout, attention, context_attention,\n rec_dropout, residual)\n', (14624, 14849), False, 'from model import End2EndModel, MaskedNLLLoss\n'), ((5310, 5330), 'torch.argmax', 'torch.argmax', (['lp_', '(1)'], {}), '(lp_, 1)\n', (5322, 5330), False, 'import torch\n'), ((5674, 5695), 'numpy.concatenate', 'np.concatenate', (['preds'], {}), '(preds)\n', (5688, 5695), True, 'import numpy as np\n'), ((5713, 5735), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (5727, 5735), True, 'import numpy as np\n'), ((5753, 5774), 'numpy.concatenate', 'np.concatenate', (['masks'], {}), '(masks)\n', (5767, 5774), True, 'import numpy as np\n'), ((11931, 12000), 'torch.FloatTensor', 'torch.FloatTensor', (['[1.0, 0.60072, 0.38066, 0.54019, 0.67924, 0.34332]'], {}), '([1.0, 0.60072, 0.38066, 0.54019, 0.67924, 0.34332])\n', (11948, 12000), False, 'import torch\n'), ((13908, 13936), 'torchnlp.encoders.text.SpacyEncoder', 'SpacyEncoder', (['all_utterances'], {}), '(all_utterances)\n', (13920, 13936), False, 'from torchnlp.encoders.text import SpacyEncoder\n'), ((16028, 16043), 'model.MaskedNLLLoss', 'MaskedNLLLoss', ([], {}), '()\n', (16041, 16043), False, 'from model import End2EndModel, MaskedNLLLoss\n'), ((16343, 16354), 'time.time', 'time.time', ([], {}), '()\n', (16352, 16354), False, 'import argparse, time, pickle\n'), ((21176, 21187), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (21184, 21187), False, 'import os, sys\n'), ((5875, 5889), 'numpy.sum', 'np.sum', (['losses'], {}), '(losses)\n', (5881, 5889), True, 'import numpy as np\n'), ((5890, 5903), 'numpy.sum', 'np.sum', (['masks'], {}), '(masks)\n', (5896, 5903), True, 'import numpy as np\n'), ((5933, 5983), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels', 'preds'], {'sample_weight': 'masks'}), '(labels, preds, sample_weight=masks)\n', (5947, 5983), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((10625, 10636), 'time.time', 'time.time', ([], {}), '()\n', (10634, 10636), False, 'import argparse, time, pickle\n'), ((14068, 14125), 'utils.pretrained_matrix', 'pretrained_matrix', (['"""glove/glove.6B.300d.txt"""', 'id_to_token'], {}), "('glove/glove.6B.300d.txt', id_to_token)\n", (14085, 14125), False, 'from utils import pretrained_matrix\n'), ((21105, 21127), 'numpy.array', 'np.array', (['test_fscores'], {}), '(test_fscores)\n', (21113, 21127), True, 'import numpy as np\n'), ((6063, 6127), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""weighted"""'}), "(labels, preds, sample_weight=masks, average='weighted')\n", (6071, 6127), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((14186, 14245), 'utils.pretrained_matrix', 'pretrained_matrix', (['"""glove/glove.840B.300d.txt"""', 'id_to_token'], {}), "('glove/glove.840B.300d.txt', id_to_token)\n", (14203, 14245), False, 'from utils import pretrained_matrix\n'), ((14979, 15040), 'torch.load', 'torch.load', (['f"""saved/iemocap/lstm_emotion_{args.inference}.pt"""'], {}), "(f'saved/iemocap/lstm_emotion_{args.inference}.pt')\n", (14989, 15040), False, 'import torch\n'), ((6289, 6353), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""weighted"""'}), "(labels, preds, sample_weight=masks, average='weighted')\n", (6297, 6353), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((6390, 6451), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""micro"""'}), "(labels, preds, sample_weight=masks, average='micro')\n", (6398, 6451), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((6488, 6549), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""macro"""'}), "(labels, preds, sample_weight=masks, average='macro')\n", (6496, 6549), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((15113, 15174), 'torch.load', 'torch.load', (['f"""saved/multiwoz/lstm_intent_{args.inference}.pt"""'], {}), "(f'saved/multiwoz/lstm_intent_{args.inference}.pt')\n", (15123, 15174), False, 'import torch\n'), ((18291, 18317), 'os.path.exists', 'os.path.exists', (['"""mapping/"""'], {}), "('mapping/')\n", (18305, 18317), False, 'import os, sys\n'), ((18339, 18362), 'os.makedirs', 'os.makedirs', (['"""mapping/"""'], {}), "('mapping/')\n", (18350, 18362), False, 'import os, sys\n'), ((4291, 4309), 'torch.tensor', 'torch.tensor', (['item'], {}), '(item)\n', (4303, 4309), False, 'import torch\n'), ((4456, 4474), 'torch.tensor', 'torch.tensor', (['item'], {}), '(item)\n', (4468, 4474), False, 'import torch\n'), ((15270, 15329), 'torch.load', 'torch.load', (['f"""saved/persuasion/lstm_er_{args.inference}.pt"""'], {}), "(f'saved/persuasion/lstm_er_{args.inference}.pt')\n", (15280, 15329), False, 'import torch\n'), ((18649, 18672), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (18663, 18672), False, 'import os, sys\n'), ((18698, 18718), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (18709, 18718), False, 'import os, sys\n'), ((20977, 20988), 'time.time', 'time.time', ([], {}), '()\n', (20986, 20988), False, 'import argparse, time, pickle\n'), ((3857, 3875), 'torch.tensor', 'torch.tensor', (['item'], {}), '(item)\n', (3869, 3875), False, 'import torch\n'), ((6775, 6839), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""weighted"""'}), "(labels, preds, sample_weight=masks, average='weighted')\n", (6783, 6839), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((6880, 6975), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""weighted"""', 'labels': '[0, 2, 3, 4, 5, 6]'}), "(labels, preds, sample_weight=masks, average='weighted', labels=[0,\n 2, 3, 4, 5, 6])\n", (6888, 6975), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((7007, 7068), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""micro"""'}), "(labels, preds, sample_weight=masks, average='micro')\n", (7015, 7068), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((7109, 7201), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""micro"""', 'labels': '[0, 2, 3, 4, 5, 6]'}), "(labels, preds, sample_weight=masks, average='micro', labels=[0, 2,\n 3, 4, 5, 6])\n", (7117, 7201), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((7233, 7294), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""macro"""'}), "(labels, preds, sample_weight=masks, average='macro')\n", (7241, 7294), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((7335, 7427), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""macro"""', 'labels': '[0, 2, 3, 4, 5, 6]'}), "(labels, preds, sample_weight=masks, average='macro', labels=[0, 2,\n 3, 4, 5, 6])\n", (7343, 7427), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((15425, 15484), 'torch.load', 'torch.load', (['f"""saved/persuasion/lstm_ee_{args.inference}.pt"""'], {}), "(f'saved/persuasion/lstm_ee_{args.inference}.pt')\n", (15435, 15484), False, 'import torch\n'), ((18947, 18970), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (18961, 18970), False, 'import os, sys\n'), ((18996, 19016), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (19007, 19016), False, 'import os, sys\n'), ((7658, 7722), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""weighted"""'}), "(labels, preds, sample_weight=masks, average='weighted')\n", (7666, 7722), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((7763, 7824), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""micro"""'}), "(labels, preds, sample_weight=masks, average='micro')\n", (7771, 7824), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((7865, 7926), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'sample_weight': 'masks', 'average': '"""macro"""'}), "(labels, preds, sample_weight=masks, average='macro')\n", (7873, 7926), False, 'from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n'), ((15586, 15651), 'torch.load', 'torch.load', (['f"""saved/dailydialog/lstm_emotion_{args.inference}.pt"""'], {}), "(f'saved/dailydialog/lstm_emotion_{args.inference}.pt')\n", (15596, 15651), False, 'import torch\n'), ((19270, 19293), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (19284, 19293), False, 'import os, sys\n'), ((19319, 19339), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (19330, 19339), False, 'import os, sys\n'), ((15749, 15810), 'torch.load', 'torch.load', (['f"""saved/dailydialog/lstm_act_{args.inference}.pt"""'], {}), "(f'saved/dailydialog/lstm_act_{args.inference}.pt')\n", (15759, 15810), False, 'import torch\n'), ((19591, 19614), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (19605, 19614), False, 'import os, sys\n'), ((19640, 19660), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (19651, 19660), False, 'import os, sys\n'), ((19919, 19942), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (19933, 19942), False, 'import os, sys\n'), ((19968, 19988), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (19979, 19988), False, 'import os, sys\n'), ((20249, 20272), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (20263, 20272), False, 'import os, sys\n'), ((20298, 20318), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (20309, 20318), False, 'import os, sys\n')] |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test library A for Subpar."""
import pkgutil
def lib():
print('In a_lib.py lib()')
# Test resource extraction
a_lib_dat = pkgutil.get_data('subpar.tests.package_a', 'a_lib_dat.txt')
assert (a_lib_dat == b'Dummy data file for a_lib.py\n'), a_lib_dat
| [
"pkgutil.get_data"
] | [((749, 808), 'pkgutil.get_data', 'pkgutil.get_data', (['"""subpar.tests.package_a"""', '"""a_lib_dat.txt"""'], {}), "('subpar.tests.package_a', 'a_lib_dat.txt')\n", (765, 808), False, 'import pkgutil\n')] |
"""实现去重容器"""
class BasicFilterContainer(object):
def add_fp(self, fp):
"""把指纹数据添加到容器中"""
pass
def exists(self, fp):
"""用于判断该指纹是否存在"""
pass
class NormalFilterContainer(BasicFilterContainer):
def __init__(self):
"""内存版去重容器, 使用set集合存储指纹数据"""
# filter_container:公共的
# _filter_container 受保护的 不能通过 from xxx import * 进行导入
# __filter_container: 私有, 只能在本类中使用, 外边都不能用
# __filter_container__: 内置魔法方法, 只能在本类中使用, 外边都不能用
self._filter_container = set()
def add_fp(self, fp):
# 添加指纹到set集合中
self._filter_container.add(fp)
def exists(self, fp):
"""判断指纹是否存在"""
if fp in self._filter_container:
return True
else:
return False
from ..conf.settings import REDIS_SET_NAME, REDIS_HOST, REDIS_PORT, REDIS_DB
import redis
class RedisFilterContainer(BasicFilterContainer):
def __init__(self, name=REDIS_SET_NAME, host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB):
# 建立Redis数据库连接
self.__server = redis.StrictRedis(host=host, port=port, db=db)
# set集合在redis中的key
self.name = name
def add_fp(self, fp):
"""把指纹添加到Redis的set集合中"""
self.__server.sadd(self.name, fp)
def exists(self, fp):
"""判断是否存储"""
return self.__server.sismember(self.name, fp)
def clear(self):
"""清空指纹数据"""
self.__server.delete(self.name)
| [
"redis.StrictRedis"
] | [((1062, 1108), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'host', 'port': 'port', 'db': 'db'}), '(host=host, port=port, db=db)\n', (1079, 1108), False, 'import redis\n')] |
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.topology import event, switches
from ryu.topology.api import get_switch, get_link
import networkx as nx
class ShortestForwarding(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(ShortestForwarding, self).__init__(*args, **kwargs)
self.network = nx.DiGraph()
self.topology_api_app = self
self.paths = {}
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
match = ofp_parser.OFPMatch()
actions = [ofp_parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions):
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
inst = [ofp_parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,actions)]
mod = ofp_parser.OFPFlowMod(datapath=datapath, priority=priority,match=match, instructions=inst)
datapath.send_msg(mod)
events = [event.EventSwitchEnter,
event.EventSwitchLeave, event.EventPortAdd,
event.EventPortDelete, event.EventPortModify,
event.EventLinkAdd, event.EventLinkDelete]
@set_ev_cls(events)
def get_topology(self, ev):
switch_list = get_switch(self.topology_api_app, None)
switches = [switch.dp.id for switch in switch_list]
self.network.add_nodes_from(switches)
links_list = get_link(self.topology_api_app, None)
links = [(link.src.dpid, link.dst.dpid, {'port': link.src.port_no}) for link in links_list]
self.network.add_edges_from(links)
links = [(link.dst.dpid, link.src.dpid, {'port': link.dst.port_no}) for link in links_list]
self.network.add_edges_from(links)
def get_out_port(self, src, dst, datapath, in_port):
dpid = datapath.id
if src not in self.network:
self.network.add_node(src)
self.network.add_edge(dpid, src, {'port': in_port})
self.network.add_edge(src, dpid)
self.paths.setdefault(src, {})
if dst in self.network:
if dst not in self.paths[src]:
path = nx.shortest_path(self.network, src, dst)
self.paths[src][dst] = path
path = self.paths[src][dst]
next_hop = path[path.index(dpid) + 1]
out_port = self.network[dpid][next_hop]['port']
else:
out_port = datapath.ofproto.OFPP_FLOOD
return out_port
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
in_port = msg.match['in_port']
out_port = self.get_out_port(eth.src, eth.dst, datapath, in_port)
actions = [ofp_parser.OFPActionOutput(out_port)]
if out_port != ofproto.OFPP_FLOOD:
match = ofp_parser.OFPMatch(in_port=in_port, eth_dst=eth.dst)
self.add_flow(datapath, 1, match, actions)
# send packet_out msg to flood packet.
out = ofp_parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port,actions=actions)
datapath.send_msg(out)
'''
发包程序
from scapy.all import sendp
from scapy.all import Packet
from scapy.all import Ether, IP, UDP, TCP
pkt = Ether(src=src, dst=dst)
pkt = pkt /IP(dst=addr) / TCP(dport=1234, sport=random.randint(49152,65535)) / 'hello world'
sendp(pkt, iface=iface, verbose=False)
'''
| [
"ryu.lib.packet.packet.Packet",
"ryu.topology.api.get_switch",
"networkx.DiGraph",
"networkx.shortest_path",
"ryu.controller.handler.set_ev_cls",
"ryu.topology.api.get_link"
] | [((712, 775), 'ryu.controller.handler.set_ev_cls', 'set_ev_cls', (['ofp_event.EventOFPSwitchFeatures', 'CONFIG_DISPATCHER'], {}), '(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)\n', (722, 775), False, 'from ryu.controller.handler import set_ev_cls\n'), ((1705, 1723), 'ryu.controller.handler.set_ev_cls', 'set_ev_cls', (['events'], {}), '(events)\n', (1715, 1723), False, 'from ryu.controller.handler import set_ev_cls\n'), ((3011, 3066), 'ryu.controller.handler.set_ev_cls', 'set_ev_cls', (['ofp_event.EventOFPPacketIn', 'MAIN_DISPATCHER'], {}), '(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n', (3021, 3066), False, 'from ryu.controller.handler import set_ev_cls\n'), ((632, 644), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (642, 644), True, 'import networkx as nx\n'), ((1778, 1817), 'ryu.topology.api.get_switch', 'get_switch', (['self.topology_api_app', 'None'], {}), '(self.topology_api_app, None)\n', (1788, 1817), False, 'from ryu.topology.api import get_switch, get_link\n'), ((1945, 1982), 'ryu.topology.api.get_link', 'get_link', (['self.topology_api_app', 'None'], {}), '(self.topology_api_app, None)\n', (1953, 1982), False, 'from ryu.topology.api import get_switch, get_link\n'), ((3252, 3275), 'ryu.lib.packet.packet.Packet', 'packet.Packet', (['msg.data'], {}), '(msg.data)\n', (3265, 3275), False, 'from ryu.lib.packet import packet\n'), ((2680, 2720), 'networkx.shortest_path', 'nx.shortest_path', (['self.network', 'src', 'dst'], {}), '(self.network, src, dst)\n', (2696, 2720), True, 'import networkx as nx\n')] |
#
# Copyright (C) 2021 Stephane "Twidi" Angel <<EMAIL>>
#
# This file is part of StreamDeckFS
# (see https://github.com/twidi/streamdeckfs).
#
# License: MIT, see https://opensource.org/licenses/MIT
#
import re
from dataclasses import dataclass
from ..common import file_flags, logger
from .base import (
RE_PARTS,
VAR_PREFIX,
VAR_RE_NAME_PART,
EntityFile,
InvalidArg,
UnavailableVar,
)
from .deck import DeckContent
from .key import KeyContent
from .page import PageContent
ELIF_THEN_RE = re.compile(r"^(?:elif|then)(?:\.\d+)?$")
@dataclass(eq=False)
class BaseVar(EntityFile):
path_glob = "VAR_*"
main_part_re = re.compile(r"^(?P<kind>VAR)_" + VAR_RE_NAME_PART + "$")
main_part_compose = lambda args: f'VAR_{args["name"]}'
get_main_args = lambda self: {"name": self.name}
allowed_args = EntityFile.allowed_args | {
"value": re.compile(r"^(?P<arg>value)=(?P<value>.+)$"),
"if": re.compile(r"^(?P<arg>if)=(?P<value>" + RE_PARTS["bool"] + ")$"),
"elif": re.compile(r"^(?P<arg>elif)=(?P<value>" + RE_PARTS["bool"] + ")$"),
"elif.": re.compile(r"^(?P<arg>elif\.\d+)=(?P<value>" + RE_PARTS["bool"] + ")$"),
"then": re.compile(r"^(?P<arg>then)=(?P<value>.+)$"),
"then.": re.compile(r"^(?P<arg>then\.\d+)=(?P<value>.+)$"),
"else": re.compile(r"^(?P<arg>else)=(?P<value>.+)$"),
}
# no `name` arg, we already have it in main
allowed_args.pop("name")
allowed_partial_args = EntityFile.allowed_partial_args | {
"then": re.compile(r"^then\.\d+$"),
"elif": re.compile(r"^elif\.\d+$"),
}
unique_args_combinations = [
("value", "file"),
("value", "if"),
("value", "elif"),
("value", "then"),
("value", "else"),
("file", "if"),
("file", "elif"),
("file", "then"),
("file", "else"),
]
identifier_attr = "name"
parent_container_attr = "vars"
def __post_init__(self):
super().__post_init__()
self.value = None
self.cached_value = None
self.used_by = set()
@property
def str(self):
return f'VAR {self.name}{", disabled" if self.disabled else ""})'
def __str__(self):
return f"{self.parent}, {self.str}"
@classmethod
def save_raw_arg(cls, name, value, args):
if name == "elif":
if "elif" not in args:
args["elif"] = []
args["elif"].append(value)
elif name == "then":
if "then" not in args:
args["then"] = []
args["then"].append(value)
else:
super().save_raw_arg(name, value, args)
@classmethod
def convert_args(cls, main, args):
final_args = super().convert_args(main, args)
for unique_args in cls.unique_args_combinations:
if len([1 for key in unique_args if args.get(key)]) > 1:
raise InvalidArg(
"Only one of these arguments must be used: " + (", ".join(f'"{arg}"' for arg in unique_args))
)
if "if" in args or "else" in args or "then" in args:
if "if" not in args or "else" not in args or "then" not in args:
raise InvalidArg('"if", "then" and "else" arguments must all be present')
all_ifs = [args["if"]] + args.get("elif", [])
if len(args["then"]) != len(all_ifs) or None in args["then"] or None in all_ifs:
raise InvalidArg('Invalide number of "elif" or "then"')
del final_args["name"]
for arg in ("value", "if", "else"):
if arg in args:
final_args[arg] = cls.replace_special_chars(args[arg], args)
for arg in ("elif", "then"):
if arg in args:
final_args[arg] = [cls.replace_special_chars(subarg, args) for subarg in args[arg]]
return final_args
@classmethod
def create_from_args(cls, path, parent, identifier, args, path_modified_at):
var = super().create_from_args(path, parent, identifier, args, path_modified_at)
if "if" in args:
elif_ = [args["if"]] + args.get("elif", [])
for if_, then_ in zip(elif_, args["then"]):
if if_.lower() == "true":
var.value = then_
break
else:
var.value = args["else"]
else:
var.value = args.get("value")
return var
@classmethod
def merge_partial_arg(cls, main_key, values, args):
if main_key in ("elif", "then"):
if main_key not in args:
args[main_key] = []
arg = args[main_key]
for key, value in values.items():
try:
index = int(key.split(".")[-1])
if index >= len(arg):
# complete list with `None` values
arg.extend([None] * (index - len(arg) + 1))
arg[index] = value
except Exception:
pass
else:
super().merge_partial_arg(main_key, values, args)
@staticmethod
def args_matching_filter(main, args, filter):
if filter is None:
return True
return main.get("name") == filter
@property
def resolved_value(self):
if self.cached_value is not None:
return self.cached_value
if self.value is not None:
self.cached_value = self.value
else:
try:
path = None
if self.mode == "content":
path = self.resolved_path
elif self.mode in ("file", "inside"):
path = self.get_file_path()
assert path
except Exception:
logger.error(f"[{self}] File to read cannot be found{'' if path is None else ' (path: %s)' % path}")
raise UnavailableVar
try:
self.cached_value = path.read_text().strip()
except Exception:
logger.error(f"[{self}] File could not be read: {path}")
raise UnavailableVar
if VAR_PREFIX in self.cached_value:
self.cached_value = self.replace_vars_in_content(self.cached_value)
if "{" in self.cached_value and "}" in self.cached_value:
self.cached_value = self.replace_exprs(self.cached_value, self.path.name)
return self.cached_value
def iterate_children_dirs(self):
return []
def activate(self, root=None):
if root is None:
root = self.parent
for vars_holder in root.iterate_vars_holders():
for entity_class, name, is_virtual, var_names in vars_holder.get_waiting_for_vars(self.name):
path = vars_holder.path / name
if not (is_virtual or path.exists()) or vars_holder.on_file_change(
vars_holder.path,
name,
file_flags.CREATE
| (file_flags.ISDIR if (entity_class.is_dir if is_virtual else path.is_dir()) else 0),
modified_at=vars_holder.path_modified_at,
entity_class=entity_class,
is_virtual=is_virtual,
):
vars_holder.remove_waiting_for_vars(name)
def deactivate(self, root=None):
for entity in list(self.used_by):
if root is not None and not entity.path.is_relative_to(root.path):
continue
entity.on_var_deleted()
def version_activated(self):
super().version_activated()
# if we have a variable at a upper level with the same name,
# (our parent is the one holding us, so we want our grand-parent)
if grand_parent := self.parent.parent:
try:
grand_parent_var = grand_parent.get_var(self.name)
except UnavailableVar:
pass
else:
# then we deactivate it, but only for our current var holder (our parent)
grand_parent_var.deactivate(self.parent)
return
# same if our parent is a reference, we don't want to use the one for the reference anymore
reference = self.parent.reference
while reference:
try:
referenced_var = reference.get_var(self.name)
except UnavailableVar:
reference = reference.reference
else:
referenced_var.deactivate(self.parent)
break
def on_create(self):
super().on_create()
self.activate()
def on_delete(self):
super().on_delete()
self.deactivate()
def version_deactivated(self):
super().version_deactivated()
# if we have one at a upper level with the same name,
# (our parent is the one holding us, so we want our grand-parent)
if grand_parent := self.parent.parent:
try:
grand_parent_var = grand_parent.get_var(self.name)
except UnavailableVar:
pass
else:
# then we use it to re-render the var just unrendered
grand_parent_var.activate(self.parent)
return
# else if our parent is a reference that have this variable, we want to use it
reference = self.parent.reference
while reference:
try:
referenced_var = reference.get_var(self.name)
except UnavailableVar:
reference = reference.reference
else:
referenced_var.activate(self.parent)
break
def on_file_content_changed(self):
super().on_file_content_changed()
current_value = self.cached_value
self.cached_value = None
try:
new_value = self.resolved_value
except UnavailableVar:
pass
else:
if new_value != current_value:
for entity in list(self.used_by):
entity.on_var_deleted()
self.activate()
@dataclass(eq=False)
class DeckVar(BaseVar, DeckContent):
pass
@dataclass(eq=False)
class PageVar(BaseVar, PageContent):
pass
@dataclass(eq=False)
class KeyVar(BaseVar, KeyContent):
allowed_args = BaseVar.allowed_args | {
"ref": re.compile(
r"^(?P<arg>ref)=(?P<page>.+):(?P<key>.+):(?P<var>.+)$" # for internal use only, so we can enforce all parts
),
}
@classmethod
def find_reference(cls, parent, ref_conf, main, args):
final_ref_conf, key = cls.find_reference_key(parent, ref_conf)
final_ref_conf["var"] = main["name"]
if not key:
return final_ref_conf, None
return final_ref_conf, key.find_var(final_ref_conf["var"])
def get_waiting_references(self):
return [
(path, parent, ref_conf)
for key, path, parent, ref_conf in self.iter_waiting_references_for_key(self.key)
if (var := key.find_var(ref_conf["var"])) and var.name == self.name
]
def on_file_content_changed(self):
super().on_file_content_changed()
for reference in self.referenced_by:
reference.on_file_content_changed()
| [
"dataclasses.dataclass",
"re.compile"
] | [((517, 558), 're.compile', 're.compile', (['"""^(?:elif|then)(?:\\\\.\\\\d+)?$"""'], {}), "('^(?:elif|then)(?:\\\\.\\\\d+)?$')\n", (527, 558), False, 'import re\n'), ((561, 580), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (570, 580), False, 'from dataclasses import dataclass\n'), ((10213, 10232), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (10222, 10232), False, 'from dataclasses import dataclass\n'), ((10282, 10301), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (10291, 10301), False, 'from dataclasses import dataclass\n'), ((10351, 10370), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (10360, 10370), False, 'from dataclasses import dataclass\n'), ((651, 705), 're.compile', 're.compile', (["('^(?P<kind>VAR)_' + VAR_RE_NAME_PART + '$')"], {}), "('^(?P<kind>VAR)_' + VAR_RE_NAME_PART + '$')\n", (661, 705), False, 'import re\n'), ((884, 928), 're.compile', 're.compile', (['"""^(?P<arg>value)=(?P<value>.+)$"""'], {}), "('^(?P<arg>value)=(?P<value>.+)$')\n", (894, 928), False, 'import re\n'), ((945, 1008), 're.compile', 're.compile', (["('^(?P<arg>if)=(?P<value>' + RE_PARTS['bool'] + ')$')"], {}), "('^(?P<arg>if)=(?P<value>' + RE_PARTS['bool'] + ')$')\n", (955, 1008), False, 'import re\n'), ((1027, 1092), 're.compile', 're.compile', (["('^(?P<arg>elif)=(?P<value>' + RE_PARTS['bool'] + ')$')"], {}), "('^(?P<arg>elif)=(?P<value>' + RE_PARTS['bool'] + ')$')\n", (1037, 1092), False, 'import re\n'), ((1112, 1184), 're.compile', 're.compile', (["('^(?P<arg>elif\\\\.\\\\d+)=(?P<value>' + RE_PARTS['bool'] + ')$')"], {}), "('^(?P<arg>elif\\\\.\\\\d+)=(?P<value>' + RE_PARTS['bool'] + ')$')\n", (1122, 1184), False, 'import re\n'), ((1201, 1244), 're.compile', 're.compile', (['"""^(?P<arg>then)=(?P<value>.+)$"""'], {}), "('^(?P<arg>then)=(?P<value>.+)$')\n", (1211, 1244), False, 'import re\n'), ((1264, 1314), 're.compile', 're.compile', (['"""^(?P<arg>then\\\\.\\\\d+)=(?P<value>.+)$"""'], {}), "('^(?P<arg>then\\\\.\\\\d+)=(?P<value>.+)$')\n", (1274, 1314), False, 'import re\n'), ((1331, 1374), 're.compile', 're.compile', (['"""^(?P<arg>else)=(?P<value>.+)$"""'], {}), "('^(?P<arg>else)=(?P<value>.+)$')\n", (1341, 1374), False, 'import re\n'), ((1540, 1567), 're.compile', 're.compile', (['"""^then\\\\.\\\\d+$"""'], {}), "('^then\\\\.\\\\d+$')\n", (1550, 1567), False, 'import re\n'), ((1584, 1611), 're.compile', 're.compile', (['"""^elif\\\\.\\\\d+$"""'], {}), "('^elif\\\\.\\\\d+$')\n", (1594, 1611), False, 'import re\n'), ((10465, 10530), 're.compile', 're.compile', (['"""^(?P<arg>ref)=(?P<page>.+):(?P<key>.+):(?P<var>.+)$"""'], {}), "('^(?P<arg>ref)=(?P<page>.+):(?P<key>.+):(?P<var>.+)$')\n", (10475, 10530), False, 'import re\n')] |
from django.urls import path, include
from rest_framework import routers
from .views import ArticleViewSet, SiteBoardViewSet, CategoryViewSet
router = routers.DefaultRouter()
router.register('articles', ArticleViewSet)
router.register('boards', SiteBoardViewSet)
router.register('categories', CategoryViewSet)
urlpatterns = [
path('', include(router.urls)),
]
# urlpatterns = [
# path('articles/', ArticleViewSet.as_view({'get': 'list'})),
# path('articles/<int:pk>/', ArticleViewSet.as_view({'get': 'retrieve'})),
# path('boards/', SiteBoardViewSet.as_view({'get': 'list'})),
# path('boards/<int:pk>/', SiteBoardViewSet.as_view({'get': 'retrieve'})),
# path('categories/', CategoryViewSet.as_view({'get': 'list'})),
# path('categories/<int:pk>/', CategoryViewSet.as_view({'get': 'retrieve'})),
# ]
| [
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] | [((154, 177), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (175, 177), False, 'from rest_framework import routers\n'), ((343, 363), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (350, 363), False, 'from django.urls import path, include\n')] |
import numpy as np
import tensorflow as tf
from tensorflow.contrib.opt import NadamOptimizer
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.momentum import MomentumOptimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from core.HM2 import PHASE_WAKE, PHASE_SLEEP
from core.argo.core.optimizers.NesterovConst import NesterovConst
class WakeSleepOptimizer2(tf.compat.v1.train.GradientDescentOptimizer):
def __init__(self, **optimizer_kwargs):
self._model = optimizer_kwargs["model"]
self._individual_learning_rate = optimizer_kwargs["individual_learning_rate"]
self._learning_rate = optimizer_kwargs["learning_rate"]
self._rescale_learning_rate = optimizer_kwargs["rescale_learning_rate"]
self._d_p = None
self._n_reg = None
post_optimizer = optimizer_kwargs["post_optimizer"] if "post_optimizer" in optimizer_kwargs else None
if post_optimizer is None or post_optimizer == "GD":
self._post_optimizer = super()
elif post_optimizer == "Momentum":
self._post_optimizer = MomentumOptimizer(learning_rate=optimizer_kwargs["learning_rate"],
momentum=0.95,
use_locking=False,
name="MomentumOptimizer")
elif post_optimizer == "RMSProp":
self._post_optimizer = RMSPropOptimizer(learning_rate=optimizer_kwargs["learning_rate"],
decay=0.9,
epsilon=1e-5,
use_locking=False,
name="RMSPropOptimizer")
elif post_optimizer == "Adam":
self._post_optimizer = AdamOptimizer(learning_rate=optimizer_kwargs["learning_rate"],
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
use_locking=False,
name="AdamOptimizer")
elif post_optimizer == "Nadam":
self._post_optimizer = NadamOptimizer(learning_rate=optimizer_kwargs["learning_rate"],
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
use_locking=False,
name="NadamOptimizer")
elif post_optimizer == "Nesterov":
self._post_optimizer = MomentumOptimizer(learning_rate=optimizer_kwargs["learning_rate"],
momentum=0.95,
use_locking=False,
use_nesterov=True,
name="NesterovMomentumOptimizer")
elif post_optimizer == "NesterovConst":
self._post_optimizer = NesterovConst(model=self._model,
learning_rate=optimizer_kwargs["learning_rate"],
use_locking=False,
name="NesterovConstOptimizer")
else:
raise Exception("There is no such post optimizer defined. Must be: None, Adam, Momentum, RMSProp ...")
super().__init__(self._learning_rate)
self._network = self._model._network
self._ilr = self.check_ilr(self._individual_learning_rate)
def check_ilr(self, individual_learning_rate):
length_of_network = len(self._network._layers_spec) + 1
if isinstance(individual_learning_rate, list):
assert len(individual_learning_rate) == length_of_network, \
"Individual learning rates have to equal in length the number of layers, {} and {}".format(
individual_learning_rate, length_of_network)
return list(map(float, individual_learning_rate))
elif isinstance(individual_learning_rate, dict):
ilr = [float(individual_learning_rate[i]) if i in individual_learning_rate else 1.0 for i in
range(length_of_network)]
return ilr
elif isinstance(individual_learning_rate, (int, float)):
return [float(individual_learning_rate)] * length_of_network
else:
raise Exception("You gave an unexpected data type as Individual learning rates")
def apply_gradients(self, grads_and_vars, global_step=None, name="WSOM"):
return self._post_optimizer.apply_gradients(grads_and_vars=grads_and_vars, global_step=global_step, name=name)
def compute_gradients(self, phase, *args, **kw):
grads = []
vars = []
layers = self._network._layers
if phase == PHASE_WAKE:
for i, layer in enumerate(layers):
grad, var = layer.wake()
vars += list(var)
if len(grad) > 0:
grads += [g * self._ilr[i] for g in grad]
elif phase == PHASE_SLEEP:
# CLASSIC SLEEP
for i, layer in enumerate(layers[:-1]):
grad, var = layer.sleep()
vars += list(var)
if len(grad) > 0:
grads += [g * self._ilr[i] for g in grad]
else:
raise ValueError("invalid value for phase '{}'".format(phase))
lr = 1.
if phase == PHASE_SLEEP:
lr *= self._rescale_learning_rate
regs = self._get_regularizers(vars)
grads_and_vars_not_none = [(tf.multiply(-lr, g, name="g_" + g.name.split(":")[0]) + r, v) for (g, r, v) in
zip(grads, regs, vars) if g is not None]
assert np.all([g.shape == v.shape for (g, v) in
grads_and_vars_not_none]), "The shapes of weights and gradients are not the same"
return grads_and_vars_not_none
def _get_regularizers(self, weights):
regs = [0.0] * len(weights)
if self._model.regularizers:
loss = 0.0 + tf.add_n(self._model.regularizers, name="regularization")
regs = tf.gradients(loss, weights)
return regs
| [
"tensorflow.contrib.opt.NadamOptimizer",
"tensorflow.python.training.momentum.MomentumOptimizer",
"tensorflow.gradients",
"tensorflow.add_n",
"core.argo.core.optimizers.NesterovConst.NesterovConst",
"numpy.all",
"tensorflow.python.training.rmsprop.RMSPropOptimizer",
"tensorflow.python.training.adam.AdamOptimizer"
] | [((6133, 6199), 'numpy.all', 'np.all', (['[(g.shape == v.shape) for g, v in grads_and_vars_not_none]'], {}), '([(g.shape == v.shape) for g, v in grads_and_vars_not_none])\n', (6139, 6199), True, 'import numpy as np\n'), ((6537, 6564), 'tensorflow.gradients', 'tf.gradients', (['loss', 'weights'], {}), '(loss, weights)\n', (6549, 6564), True, 'import tensorflow as tf\n'), ((1138, 1269), 'tensorflow.python.training.momentum.MomentumOptimizer', 'MomentumOptimizer', ([], {'learning_rate': "optimizer_kwargs['learning_rate']", 'momentum': '(0.95)', 'use_locking': '(False)', 'name': '"""MomentumOptimizer"""'}), "(learning_rate=optimizer_kwargs['learning_rate'], momentum\n =0.95, use_locking=False, name='MomentumOptimizer')\n", (1155, 1269), False, 'from tensorflow.python.training.momentum import MomentumOptimizer\n'), ((6460, 6517), 'tensorflow.add_n', 'tf.add_n', (['self._model.regularizers'], {'name': '"""regularization"""'}), "(self._model.regularizers, name='regularization')\n", (6468, 6517), True, 'import tensorflow as tf\n'), ((1502, 1641), 'tensorflow.python.training.rmsprop.RMSPropOptimizer', 'RMSPropOptimizer', ([], {'learning_rate': "optimizer_kwargs['learning_rate']", 'decay': '(0.9)', 'epsilon': '(1e-05)', 'use_locking': '(False)', 'name': '"""RMSPropOptimizer"""'}), "(learning_rate=optimizer_kwargs['learning_rate'], decay=0.9,\n epsilon=1e-05, use_locking=False, name='RMSPropOptimizer')\n", (1518, 1641), False, 'from tensorflow.python.training.rmsprop import RMSPropOptimizer\n'), ((1920, 2066), 'tensorflow.python.training.adam.AdamOptimizer', 'AdamOptimizer', ([], {'learning_rate': "optimizer_kwargs['learning_rate']", 'beta1': '(0.9)', 'beta2': '(0.999)', 'epsilon': '(1e-08)', 'use_locking': '(False)', 'name': '"""AdamOptimizer"""'}), "(learning_rate=optimizer_kwargs['learning_rate'], beta1=0.9,\n beta2=0.999, epsilon=1e-08, use_locking=False, name='AdamOptimizer')\n", (1933, 2066), False, 'from tensorflow.python.training.adam import AdamOptimizer\n'), ((2382, 2530), 'tensorflow.contrib.opt.NadamOptimizer', 'NadamOptimizer', ([], {'learning_rate': "optimizer_kwargs['learning_rate']", 'beta1': '(0.9)', 'beta2': '(0.999)', 'epsilon': '(1e-08)', 'use_locking': '(False)', 'name': '"""NadamOptimizer"""'}), "(learning_rate=optimizer_kwargs['learning_rate'], beta1=0.9,\n beta2=0.999, epsilon=1e-08, use_locking=False, name='NadamOptimizer')\n", (2396, 2530), False, 'from tensorflow.contrib.opt import NadamOptimizer\n'), ((2855, 3018), 'tensorflow.python.training.momentum.MomentumOptimizer', 'MomentumOptimizer', ([], {'learning_rate': "optimizer_kwargs['learning_rate']", 'momentum': '(0.95)', 'use_locking': '(False)', 'use_nesterov': '(True)', 'name': '"""NesterovMomentumOptimizer"""'}), "(learning_rate=optimizer_kwargs['learning_rate'], momentum\n =0.95, use_locking=False, use_nesterov=True, name=\n 'NesterovMomentumOptimizer')\n", (2872, 3018), False, 'from tensorflow.python.training.momentum import MomentumOptimizer\n'), ((3304, 3440), 'core.argo.core.optimizers.NesterovConst.NesterovConst', 'NesterovConst', ([], {'model': 'self._model', 'learning_rate': "optimizer_kwargs['learning_rate']", 'use_locking': '(False)', 'name': '"""NesterovConstOptimizer"""'}), "(model=self._model, learning_rate=optimizer_kwargs[\n 'learning_rate'], use_locking=False, name='NesterovConstOptimizer')\n", (3317, 3440), False, 'from core.argo.core.optimizers.NesterovConst import NesterovConst\n')] |
import torch
import torch.nn.functional as F
from torch import nn
import cv2
class SemanticFPN(nn.Module):
"""
FCN semantic predictor based on 'panoptic FPN' paper.
"""
def __init__(self, cfg, in_channels):
super(SemanticFPN, self).__init__()
self.cfg = cfg.clone()
self.class_num = cfg.MODEL.SEMANTIC.NUM_CLASSES + 1
group_num = 32
self.semantic_seq0 = nn.Sequential(
nn.Conv2d(in_channels, 128, 3, padding=1),
nn.GroupNorm(group_num, 128),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.GroupNorm(group_num, 128),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.GroupNorm(group_num, 128),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.GroupNorm(group_num, 128),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
)
self.semantic_seq1 = nn.Sequential(
nn.Conv2d(in_channels, 128, 3, padding=1),
nn.GroupNorm(group_num, 128),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.GroupNorm(group_num, 128),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.GroupNorm(group_num, 128),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
)
self.semantic_seq2 = nn.Sequential(
nn.Conv2d(in_channels, 128, 3, padding=1),
nn.GroupNorm(group_num, 128),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.GroupNorm(group_num, 128),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
)
self.semantic_seq3 = nn.Sequential(
nn.Conv2d(in_channels, 128, 3, padding=1),
nn.GroupNorm(group_num, 128),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
)
self.semantic_seq4 = nn.Sequential(
nn.Conv2d(in_channels, 128, 3, padding=1),
nn.GroupNorm(group_num, 128),
nn.ReLU()
)
self.semantic_final = nn.Sequential(
nn.Conv2d(128, self.class_num, 1),
nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True)
)
def forward(self, x, targets):
sem_losses = {}
x0 = self.semantic_seq0(x[4])
x1 = self.semantic_seq1(x[3])
x2 = self.semantic_seq2(x[2])
x3 = self.semantic_seq3(x[1])
x4 = self.semantic_seq4(x[0])
x0 = F.interpolate(x0, size=x4.size()[-2:], mode="bilinear", align_corners=True)
x1 = F.interpolate(x1, size=x4.size()[-2:], mode="bilinear", align_corners=True)
x2 = F.interpolate(x2, size=x4.size()[-2:], mode="bilinear", align_corners=True)
x3 = F.interpolate(x3, size=x4.size()[-2:], mode="bilinear", align_corners=True)
x = x0 + x1 + x2 + x3 + x4
x = self.semantic_final(x)
if self.training:
# calculate loss
loss_semantic = 0.0
batch_count = 0
for i in range(len(targets)):
label = targets[i].get_field("semantic_label").copy()
x_i = F.interpolate(x[i:i+1], size=label.shape, mode='bilinear', align_corners=True)
label = torch.LongTensor(label).unsqueeze(0)
label = label.to(device=self.cfg.MODEL.DEVICE)
count = torch.sum(label > 0)
loss_semantic += F.cross_entropy(x_i, label, ignore_index=0, reduction="sum")
batch_count += count
if batch_count > 0:
loss_semantic /= batch_count
loss_semantic *= self.cfg.MODEL.SEMANTIC.LOSS_WEIGHT
sem_losses.update(dict(loss_semantic=loss_semantic))
return None, sem_losses
else:
return x, {}
def build_semantic_head(cfg, in_channels):
semantic_head = SemanticFPN(cfg, in_channels)
return semantic_head
| [
"torch.nn.GroupNorm",
"torch.nn.ReLU",
"torch.LongTensor",
"torch.nn.Conv2d",
"torch.sum",
"torch.nn.Upsample",
"torch.nn.functional.interpolate",
"torch.nn.functional.cross_entropy"
] | [((449, 490), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(128)', '(3)'], {'padding': '(1)'}), '(in_channels, 128, 3, padding=1)\n', (458, 490), False, 'from torch import nn\n'), ((504, 532), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['group_num', '(128)'], {}), '(group_num, 128)\n', (516, 532), False, 'from torch import nn\n'), ((546, 555), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (553, 555), False, 'from torch import nn\n'), ((569, 633), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (580, 633), False, 'from torch import nn\n'), ((647, 680), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'padding': '(1)'}), '(128, 128, 3, padding=1)\n', (656, 680), False, 'from torch import nn\n'), ((694, 722), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['group_num', '(128)'], {}), '(group_num, 128)\n', (706, 722), False, 'from torch import nn\n'), ((736, 745), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (743, 745), False, 'from torch import nn\n'), ((759, 823), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (770, 823), False, 'from torch import nn\n'), ((837, 870), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'padding': '(1)'}), '(128, 128, 3, padding=1)\n', (846, 870), False, 'from torch import nn\n'), ((884, 912), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['group_num', '(128)'], {}), '(group_num, 128)\n', (896, 912), False, 'from torch import nn\n'), ((926, 935), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (933, 935), False, 'from torch import nn\n'), ((949, 1013), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (960, 1013), False, 'from torch import nn\n'), ((1027, 1060), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'padding': '(1)'}), '(128, 128, 3, padding=1)\n', (1036, 1060), False, 'from torch import nn\n'), ((1074, 1102), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['group_num', '(128)'], {}), '(group_num, 128)\n', (1086, 1102), False, 'from torch import nn\n'), ((1116, 1125), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1123, 1125), False, 'from torch import nn\n'), ((1139, 1203), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (1150, 1203), False, 'from torch import nn\n'), ((1279, 1320), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(128)', '(3)'], {'padding': '(1)'}), '(in_channels, 128, 3, padding=1)\n', (1288, 1320), False, 'from torch import nn\n'), ((1334, 1362), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['group_num', '(128)'], {}), '(group_num, 128)\n', (1346, 1362), False, 'from torch import nn\n'), ((1376, 1385), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1383, 1385), False, 'from torch import nn\n'), ((1399, 1463), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (1410, 1463), False, 'from torch import nn\n'), ((1477, 1510), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'padding': '(1)'}), '(128, 128, 3, padding=1)\n', (1486, 1510), False, 'from torch import nn\n'), ((1524, 1552), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['group_num', '(128)'], {}), '(group_num, 128)\n', (1536, 1552), False, 'from torch import nn\n'), ((1566, 1575), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1573, 1575), False, 'from torch import nn\n'), ((1589, 1653), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (1600, 1653), False, 'from torch import nn\n'), ((1667, 1700), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'padding': '(1)'}), '(128, 128, 3, padding=1)\n', (1676, 1700), False, 'from torch import nn\n'), ((1714, 1742), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['group_num', '(128)'], {}), '(group_num, 128)\n', (1726, 1742), False, 'from torch import nn\n'), ((1756, 1765), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1763, 1765), False, 'from torch import nn\n'), ((1779, 1843), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (1790, 1843), False, 'from torch import nn\n'), ((1911, 1952), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(128)', '(3)'], {'padding': '(1)'}), '(in_channels, 128, 3, padding=1)\n', (1920, 1952), False, 'from torch import nn\n'), ((1966, 1994), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['group_num', '(128)'], {}), '(group_num, 128)\n', (1978, 1994), False, 'from torch import nn\n'), ((2008, 2017), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2015, 2017), False, 'from torch import nn\n'), ((2031, 2095), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (2042, 2095), False, 'from torch import nn\n'), ((2109, 2142), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'padding': '(1)'}), '(128, 128, 3, padding=1)\n', (2118, 2142), False, 'from torch import nn\n'), ((2156, 2184), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['group_num', '(128)'], {}), '(group_num, 128)\n', (2168, 2184), False, 'from torch import nn\n'), ((2198, 2207), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2205, 2207), False, 'from torch import nn\n'), ((2221, 2285), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (2232, 2285), False, 'from torch import nn\n'), ((2353, 2394), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(128)', '(3)'], {'padding': '(1)'}), '(in_channels, 128, 3, padding=1)\n', (2362, 2394), False, 'from torch import nn\n'), ((2408, 2436), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['group_num', '(128)'], {}), '(group_num, 128)\n', (2420, 2436), False, 'from torch import nn\n'), ((2450, 2459), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2457, 2459), False, 'from torch import nn\n'), ((2473, 2537), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (2484, 2537), False, 'from torch import nn\n'), ((2605, 2646), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(128)', '(3)'], {'padding': '(1)'}), '(in_channels, 128, 3, padding=1)\n', (2614, 2646), False, 'from torch import nn\n'), ((2660, 2688), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['group_num', '(128)'], {}), '(group_num, 128)\n', (2672, 2688), False, 'from torch import nn\n'), ((2702, 2711), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2709, 2711), False, 'from torch import nn\n'), ((2780, 2813), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', 'self.class_num', '(1)'], {}), '(128, self.class_num, 1)\n', (2789, 2813), False, 'from torch import nn\n'), ((2827, 2891), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(4)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=4, mode='bilinear', align_corners=True)\n", (2838, 2891), False, 'from torch import nn\n'), ((3838, 3923), 'torch.nn.functional.interpolate', 'F.interpolate', (['x[i:i + 1]'], {'size': 'label.shape', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(x[i:i + 1], size=label.shape, mode='bilinear', align_corners=True\n )\n", (3851, 3923), True, 'import torch.nn.functional as F\n'), ((4065, 4085), 'torch.sum', 'torch.sum', (['(label > 0)'], {}), '(label > 0)\n', (4074, 4085), False, 'import torch\n'), ((4119, 4179), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['x_i', 'label'], {'ignore_index': '(0)', 'reduction': '"""sum"""'}), "(x_i, label, ignore_index=0, reduction='sum')\n", (4134, 4179), True, 'import torch.nn.functional as F\n'), ((3941, 3964), 'torch.LongTensor', 'torch.LongTensor', (['label'], {}), '(label)\n', (3957, 3964), False, 'import torch\n')] |
import sys
import os
import glob
import re
import numpy as np
import tensorflow as tf
# Keras
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.models import load_model
from keras.preprocessing import image
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
# Define a flask app
app = Flask(__name__)
# Model saved with Keras model.save()
MODEL_PATH = 'models/your_model.h5'
# Load your trained model
# model = load_model(MODEL_PATH)
# model._make_predict_function() # Necessary
print('Model loading...')
# You can also use pretrained model from Keras
# Check https://keras.io/applications/
from keras.applications.resnet50 import ResNet50
model = ResNet50(weights='imagenet')
#graph = tf.get_default_graph()
print('Model loaded. Started serving...')
print('Model loaded. Check http://127.0.0.1:5000/')
def model_predict(img_path, model):
original = image.load_img(img_path, target_size=(224, 224))
# Preprocessing the image
# Convert the PIL image to a numpy array
# IN PIL - image is in (width, height, channel)
# In Numpy - image is in (height, width, channel)
numpy_image = image.img_to_array(original)
# Convert the image / images into batch format
# expand_dims will add an extra dimension to the data at a particular axis
# We want the input matrix to the network to be of the form (batchsize, height, width, channels)
# Thus we add the extra dimension to the axis 0.
image_batch = np.expand_dims(numpy_image, axis=0)
print('PIL image size = ', original.size)
print('NumPy image size = ', numpy_image.shape)
print('Batch image size = ', image_batch.shape)
# Be careful how your trained model deals with the input
# otherwise, it won't make correct prediction!
processed_image = preprocess_input(image_batch, mode='caffe')
#with graph.as_default():
preds = model.predict(processed_image)
print('Deleting File at Path: ' + img_path)
os.remove(img_path)
print('Deleting File at Path - Success - ')
return preds
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['image']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
print('Begin Model Prediction...')
# Make prediction
preds = model_predict(file_path, model)
print('End Model Prediction...')
# Process your result for human
# pred_class = preds.argmax(axis=-1) # Simple argmax
pred_class = decode_predictions(preds, top=1) # ImageNet Decode
result = str(pred_class[0][0][1]) # Convert to string
return result
return None
if __name__ == '__main__':
app.run(debug=True, threaded=True,port=8000)
| [
"flask.render_template",
"keras.preprocessing.image.img_to_array",
"flask.Flask",
"os.remove",
"os.path.dirname",
"werkzeug.utils.secure_filename",
"numpy.expand_dims",
"keras.applications.resnet50.ResNet50",
"keras.applications.imagenet_utils.preprocess_input",
"keras.applications.imagenet_utils.decode_predictions",
"keras.preprocessing.image.load_img"
] | [((444, 459), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (449, 459), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((820, 848), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (828, 848), False, 'from keras.applications.resnet50 import ResNet50\n'), ((1029, 1077), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (1043, 1077), False, 'from keras.preprocessing import image\n'), ((1283, 1311), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['original'], {}), '(original)\n', (1301, 1311), False, 'from keras.preprocessing import image\n'), ((1615, 1650), 'numpy.expand_dims', 'np.expand_dims', (['numpy_image'], {'axis': '(0)'}), '(numpy_image, axis=0)\n', (1629, 1650), True, 'import numpy as np\n'), ((1938, 1981), 'keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['image_batch'], {'mode': '"""caffe"""'}), "(image_batch, mode='caffe')\n", (1954, 1981), False, 'from keras.applications.imagenet_utils import preprocess_input, decode_predictions\n'), ((2131, 2150), 'os.remove', 'os.remove', (['img_path'], {}), '(img_path)\n', (2140, 2150), False, 'import os\n'), ((2293, 2322), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (2308, 2322), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((2552, 2577), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2567, 2577), False, 'import os\n'), ((2995, 3027), 'keras.applications.imagenet_utils.decode_predictions', 'decode_predictions', (['preds'], {'top': '(1)'}), '(preds, top=1)\n', (3013, 3027), False, 'from keras.applications.imagenet_utils import preprocess_input, decode_predictions\n'), ((2645, 2672), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (2660, 2672), False, 'from werkzeug.utils import secure_filename\n')] |
##############################################################################
#
# Copyright (c) 2017 Shoobx, Inc.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Style Related Element Processing
"""
import copy
import odf
import zope.interface
from odf.namespaces import STYLENS
from z3c.rml import attr, directive
from z3c.rml import template as rml_template
from shoobx.rml2odt import flowable
from shoobx.rml2odt.interfaces import IContentContainer
@zope.interface.implementer(IContentContainer)
class Story(flowable.Flow):
signature = rml_template.IStory
@property
def contents(self):
return self.parent.document.text
@zope.interface.implementer(IContentContainer)
class Header(flowable.Flow):
signature = rml_template.IPageGraphics
klass = staticmethod(odf.style.Header)
def process(self):
self.contents = self.klass()
self.parent.content.addElement(self.contents)
self.processSubDirectives()
class Footer(Header):
klass = staticmethod(odf.style.Footer)
class PageTemplate(directive.RMLDirective):
signature = rml_template.IPageTemplate
factories = {
'header': Header,
'footer': Footer,
}
def process(self):
manager = attr.getManager(self)
styleName = manager.getNextStyleName('Mpm')
pageLayoutProps = odf.style.PageLayoutProperties(
**self.parent.styleArgs)
pageLayout = odf.style.PageLayout(name=styleName)
pageLayout.addElement(pageLayoutProps)
manager.document.automaticstyles.addElement(pageLayout)
args = dict(self.getAttributeValues())
self.content = odf.style.MasterPage(
name=args['id'], pagelayoutname=styleName)
self.parent.pageTemplateNames.append(args['id'])
self.parent.parent.document.masterstyles.addElement(self.content)
self.processSubDirectives()
class Template(directive.RMLDirective):
signature = rml_template.ITemplate
attrMapping = {'bottomMargin': 'marginbottom',
'topMargin': 'margintop',
'leftMargin': 'marginleft',
'rightMargin': 'marginright',
'pagesize': 'pagesize',
'showBoundary': 'border',
}
factories = {
'pageTemplate': PageTemplate,
}
def _getNodeName(self, node):
for att, value in node.attributes.items():
if att[1] == 'name':
return value
return None
def process(self):
# determine style attributes to be used in PageTemplate
args = dict(self.getAttributeValues(attrMapping=self.attrMapping))
allowed = self.attrMapping.values()
styleArgs = {}
for argName, argValue in args.items():
if argName not in allowed:
continue
if argName == 'pagesize':
styleArgs['pagewidth'] = '%spt' % argValue[0]
styleArgs['pageheight'] = '%spt' % argValue[1]
elif argName == 'border':
if argValue:
styleArgs[argName] = "3pt"
else:
styleArgs[argName] = "0pt"
else:
styleArgs[argName] = '%spt' % argValue
self.styleArgs = styleArgs
self.pageTemplateNames = []
self.processSubDirectives()
haveMain = (
'main' in self.pageTemplateNames or
'Main' in self.pageTemplateNames)
if haveMain and 'Standard' not in self.pageTemplateNames:
# LibreOffice is picky and expects a 'Standard' pageTemplate
# as default if nothing specified in the story tag
# OTOH reportlab standard is 'main'
# let's make a copy of 'main' as 'Standard'
mainPT = None
for pt in self.parent.document.masterstyles.childNodes:
if pt.tagName == 'style:master-page':
if self._getNodeName(pt).lower() == 'main':
mainPT = pt
if mainPT is not None:
newPT = copy.deepcopy(mainPT)
newPT.setAttrNS(STYLENS, 'name', 'Standard')
newPT.setAttrNS(STYLENS, 'display-name', 'Standard')
self.parent.document.masterstyles.addElement(newPT)
# but all that is just a workaround for now
# how ODT handles page styles/pageTemplate:
# the style of the para on the previous page should have
# fo:break-after="page"
# to make the page break
# the style of the first para on the page gets
# style:master-page-name="First_20_Page"
# which then refers to the style:page-layout
| [
"z3c.rml.attr.getManager",
"odf.style.MasterPage",
"odf.style.PageLayout",
"odf.style.PageLayoutProperties",
"copy.deepcopy"
] | [((1692, 1713), 'z3c.rml.attr.getManager', 'attr.getManager', (['self'], {}), '(self)\n', (1707, 1713), False, 'from z3c.rml import attr, directive\n'), ((1793, 1848), 'odf.style.PageLayoutProperties', 'odf.style.PageLayoutProperties', ([], {}), '(**self.parent.styleArgs)\n', (1823, 1848), False, 'import odf\n'), ((1883, 1919), 'odf.style.PageLayout', 'odf.style.PageLayout', ([], {'name': 'styleName'}), '(name=styleName)\n', (1903, 1919), False, 'import odf\n'), ((2102, 2165), 'odf.style.MasterPage', 'odf.style.MasterPage', ([], {'name': "args['id']", 'pagelayoutname': 'styleName'}), "(name=args['id'], pagelayoutname=styleName)\n", (2122, 2165), False, 'import odf\n'), ((4556, 4577), 'copy.deepcopy', 'copy.deepcopy', (['mainPT'], {}), '(mainPT)\n', (4569, 4577), False, 'import copy\n')] |
# http://inamidst.com/saxo/
# Created by <NAME>
import os.path
import saxo
import unicodedata
surrogates = {"D800", "DB7F", "DB80", "DBFF", "DC00", "DFFF"}
def create_table(db):
db["saxo_unicode"].create(
("hexcode", str),
("codepoint", int),
("name", str),
("current", str),
("ancient", str),
("category", str),
("character", str),
("display", str))
def populate_table_python(db):
for codepoint in range(1, 0x10000):
hexcode = "%04X" % codepoint
# Skip surrogates
if hexcode in surrogates:
character = ""
else:
character = chr(codepoint)
try: category = unicodedata.category(character)
except TypeError:
continue
try: character.encode("utf-8")
except UnicodeEncodeError:
continue
if category.startswith("M"):
# TODO: Just Mn?
display = "\u25CC" + character
elif category.startswith("C") and not category.endswith("o"):
# Co is Private_Use, allow those
if 0 <= codepoint <= 0x1F:
display = chr(codepoint + 0x2400)
else:
display = "<%s>" % category
else:
display = character
try: name = unicodedata.name(character)
except ValueError:
name = "<control>"
current = name[:]
ancient = ""
db["saxo_unicode"].insert((hexcode, codepoint, name, current,
ancient, category, character, display), commit=False)
db.commit()
@saxo.setup
def setup(irc):
path = os.path.join(irc.base, "database.sqlite3")
with saxo.database(path) as db:
if "saxo_unicode" not in db:
create_table(db)
populate_table_python(db)
| [
"unicodedata.name",
"saxo.database",
"unicodedata.category"
] | [((1695, 1714), 'saxo.database', 'saxo.database', (['path'], {}), '(path)\n', (1708, 1714), False, 'import saxo\n'), ((697, 728), 'unicodedata.category', 'unicodedata.category', (['character'], {}), '(character)\n', (717, 728), False, 'import unicodedata\n'), ((1315, 1342), 'unicodedata.name', 'unicodedata.name', (['character'], {}), '(character)\n', (1331, 1342), False, 'import unicodedata\n')] |
#TODO
import unittest
import scrape
class TestSandersonScraper(unittest.TestCase):
def test_send_texts(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_get_values(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_is_update(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_create_update_message(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_save_update(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_scrape(self):
self.assertEqual('foo'.upper(), 'FOO')
if __name__ == '__main__':
unittest.main()
#TODO | [
"unittest.main"
] | [((598, 613), 'unittest.main', 'unittest.main', ([], {}), '()\n', (611, 613), False, 'import unittest\n')] |
# Generated by Django 3.2 on 2021-04-29 20:17
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Subscriber",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("email", models.EmailField(max_length=254)),
("email_hash", models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name="Subscription",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("station_name", models.CharField(max_length=100)),
("status", models.CharField(max_length=1)),
("last_email_date", models.DateTimeField()),
("email_preferred_time", models.CharField(max_length=1)),
("timezone", models.CharField(max_length=10)),
("created_date", models.DateTimeField()),
("update_date", models.DateTimeField()),
(
"subscriber",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="subscribe.subscriber",
),
),
],
),
]
| [
"django.db.models.EmailField",
"django.db.models.ForeignKey",
"django.db.models.BigAutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((373, 469), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (392, 469), False, 'from django.db import migrations, models\n'), ((631, 664), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (648, 664), False, 'from django.db import migrations, models\n'), ((698, 730), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (714, 730), False, 'from django.db import migrations, models\n'), ((909, 1005), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (928, 1005), False, 'from django.db import migrations, models\n'), ((1174, 1206), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1190, 1206), False, 'from django.db import migrations, models\n'), ((1236, 1266), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)'}), '(max_length=1)\n', (1252, 1266), False, 'from django.db import migrations, models\n'), ((1305, 1327), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1325, 1327), False, 'from django.db import migrations, models\n'), ((1371, 1401), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)'}), '(max_length=1)\n', (1387, 1401), False, 'from django.db import migrations, models\n'), ((1433, 1464), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (1449, 1464), False, 'from django.db import migrations, models\n'), ((1500, 1522), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1520, 1522), False, 'from django.db import migrations, models\n'), ((1557, 1579), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1577, 1579), False, 'from django.db import migrations, models\n'), ((1654, 1748), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""subscribe.subscriber"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'subscribe.subscriber')\n", (1671, 1748), False, 'from django.db import migrations, models\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
"""Implements a simple fully-connected 3 layers neural network."""
def __init__(self, state_size: int, action_size: int, seed: int):
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, 32)
self.fc2 = nn.Linear(32, 32)
self.fc3 = nn.Linear(32, action_size)
def forward(self, state):
x = torch.tanh(self.fc1(state))
x = torch.tanh(self.fc2(x))
return self.fc3(x)
class DuelingQNetwork(nn.Module):
"""Implements a Dueling architecture."""
def __init__(self, state_size: int, action_size: int, seed: int):
super(DuelingQNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
# Dueling DQN has first common layers, then splits into two parts,
# one for the state value and another for the action advantage.
self.fc1 = nn.Linear(state_size, 32)
self.state_value_fc1 = nn.Linear(32, 16)
self.state_value_fc2 = nn.Linear(16, 1)
self.advantage_values_fc1 = nn.Linear(32, 16)
self.advantage_values_fc2 = nn.Linear(16, action_size)
def forward(self, state):
"""Build a network that maps state -> action values."""
dense_state = torch.tanh(self.fc1(state))
state_value = torch.tanh(self.state_value_fc1(dense_state))
state_value = self.state_value_fc2(state_value)
advantage_values = torch.tanh(self.advantage_values_fc1(dense_state))
advantage_values = self.advantage_values_fc2(advantage_values)
advantage_mean = torch.mean(advantage_values, dim=1, keepdim=True)
action_values = state_value + advantage_values - advantage_mean
return action_values | [
"torch.mean",
"torch.manual_seed",
"torch.nn.Linear"
] | [((312, 335), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (329, 335), False, 'import torch\n'), ((364, 389), 'torch.nn.Linear', 'nn.Linear', (['state_size', '(32)'], {}), '(state_size, 32)\n', (373, 389), True, 'import torch.nn as nn\n'), ((409, 426), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(32)'], {}), '(32, 32)\n', (418, 426), True, 'import torch.nn as nn\n'), ((446, 472), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'action_size'], {}), '(32, action_size)\n', (455, 472), True, 'import torch.nn as nn\n'), ((844, 867), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (861, 867), False, 'import torch\n'), ((1043, 1068), 'torch.nn.Linear', 'nn.Linear', (['state_size', '(32)'], {}), '(state_size, 32)\n', (1052, 1068), True, 'import torch.nn as nn\n'), ((1109, 1126), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(16)'], {}), '(32, 16)\n', (1118, 1126), True, 'import torch.nn as nn\n'), ((1158, 1174), 'torch.nn.Linear', 'nn.Linear', (['(16)', '(1)'], {}), '(16, 1)\n', (1167, 1174), True, 'import torch.nn as nn\n'), ((1220, 1237), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(16)'], {}), '(32, 16)\n', (1229, 1237), True, 'import torch.nn as nn\n'), ((1274, 1300), 'torch.nn.Linear', 'nn.Linear', (['(16)', 'action_size'], {}), '(16, action_size)\n', (1283, 1300), True, 'import torch.nn as nn\n'), ((1747, 1796), 'torch.mean', 'torch.mean', (['advantage_values'], {'dim': '(1)', 'keepdim': '(True)'}), '(advantage_values, dim=1, keepdim=True)\n', (1757, 1796), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib.sitemaps import Sitemap
from thecut.pages.models import Page
class PageSitemap(Sitemap):
"""Sitemaps.org XML sitemap."""
def items(self):
return Page.objects.current_site().indexable()
def lastmod(self, obj):
return obj.updated_at
sitemaps = {'pages_page': PageSitemap}
| [
"thecut.pages.models.Page.objects.current_site"
] | [((265, 292), 'thecut.pages.models.Page.objects.current_site', 'Page.objects.current_site', ([], {}), '()\n', (290, 292), False, 'from thecut.pages.models import Page\n')] |
"""A parser for reading protocol file in Bernese PRC format
Example:
--------
from midgard import parsers
p = parsers.parse_file(parser_name='bernese_prc', file_path='RES211670.PRC')
data = p.as_dict()
Description:
------------
Reads data from files in PRC format
"""
# Standard library imports
from datetime import datetime
import itertools
from typing import Any, Dict, Iterable, List, Tuple
# Midgard imports
from midgard.data import dataset
from midgard.dev import log
from midgard.dev import plugins
from midgard.math.unit import Unit
from midgard.parsers import ChainParser, ParserDef
@plugins.register
class BernesePrcParser(ChainParser):
"""A parser for reading protocol file in Bernese PRC format
The parsed data are saved in variable **data** as a dictionay with 4-digit station name as key. The station
related data are saved in a dictionary with following keys:
| Key | Type |Description |
|-----------------------|-------------|----------------------------------------------------------------------|
| coord_comp_east | List[float] | List with daily station coordinate comparison results for East |
| | | component in [m] |
| coord_comp_north | List[float] | List with daily station coordinate comparison results for North |
| | | component in [m] |
| coord_comp_up | List[float] | List with daily station coordinate comparison results for Up |
| | | component in [m] |
| coord_comp_rms_east | float | List with daily station coordinate comparison results for East |
| | | component in [m] |
| coord_comp_rms_north | float | List with daily station coordinate comparison results for North |
| | | component in [m] |
| coord_comp_rms_up | float | List with daily station coordinate comparison results for Up |
| | | component in [m] |
| num_of_days | float | Number of days used for analysis |
| pos_mean_x | float | X-coordinate of mean station coordinate position in [m] |
| pos_mean_x_rms1 | float | RMS1 of X-coordinate of mean station coordinate position in [m] |
| pos_mean_x_rms2 | float | RMS2 of X-coordinate of mean station coordinate position in [m] |
| pos_mean_y | float | Y-coordinate of mean station coordinate position in [m] |
| pos_mean_y_rms1 | float | RMS1 of Y-coordinate of mean station coordinate position in [m] |
| pos_mean_y_rms2 | float | RMS2 of Y-coordinate of mean station coordinate position in [m] |
| pos_mean_z | float | Z-coordinate of mean station coordinate position in [m] |
| pos_mean_z_rms1 | float | RMS1 of Z-coordinate of mean station coordinate position in [m] |
| pos_mean_z_rms2 | float | RMS2 of Z-coordinate of mean station coordinate position in [m] |
| repeatability_east | float | Station coordinate repeatability for East component in [m] |
| repeatability_north | float | Station coordinate repeatability for North component in [m] |
| repeatability_up | float | Station coordinate repeatability for Up component in [m] |
| residual_east | float | Station residuals for East component in [m] |
| residual_north | float | Station residuals for North component in [m] |
| residual_up | float | Station residuals for Up component in [m] |
and **meta**-data:
| Key | Description |
|----------------------|--------------------------------------------------------------------------------------|
| num_coord_files | Number of coordinate files used for analysis |
| time | Date of analysis session |
| \__data_path__ | File path |
| \__parser_name__ | Parser name |
"""
def __init__(self, *args: Tuple[Any], **kwargs: Dict[Any, Any]):
"""
Args:
args: Parameters without keyword.
kargs: Keyword arguments.
"""
super().__init__(*args, **kwargs)
self.fields = list() # Save field names, which are read. Needed by generating of dataset.
def setup_parser(self) -> Iterable[ParserDef]:
"""Set up information needed for the parser
This should return a dictionary with all parameters needed by np.genfromtxt to do the actual parsing.
Returns:
Dict: Parameters needed by np.genfromtxt to parse the input file.
"""
# Skip lines until 'Verification of fiducial stations' section
skip_lines_parser = ParserDef(
end_marker=lambda line, _ln, _n: (
"Verification of fiducial stations" in line # Start of residuals_parser
or "PART 9: SLIDING 7-SESSION COMPARISON OF STATION COORDINATES" in line # Start of repeatability_parser
or "LIST OF COORDINATE FILES" in line # Start of num_files_parser
or "COMPARISON OF COORDINATES" in line # Start of coord_comp_parser
),
label= lambda line, _ln: line,
parser_def = {},
)
#----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8-
#
#================================================================================
#RNX2SNX BPE PROCESSING SUMMARY FOR YEAR-SESSION 21-1660
#================================================================================
#
#
# Summary file generated at 16-Jun-2021 13:12:50 by R2S_SUM
time_parser = ParserDef(
end_marker=lambda line, _ln, _n: line.startswith("Summary file generated"),
label=lambda line, _ln: "SUMMARY FOR YEAR-SESSION" in line,
parser_def={
True: {
"parser": self._parse_time,
"fields": {
"time": (0, None),
},
},
},
)
#----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8-
#
# RNX2SNX_211660: Verification of fiducial stations
# -------------------------------------------------------------------------------
#
#
# FILE 1: APR211660.CRD: EXTRAPOLATE
# FILE 2: F1_211660.CRD: RNX2SNX_211660: Final coordinate/troposphere results
#
# LOCAL GEODETIC DATUM: IGb14
# RESIDUALS IN LOCAL SYSTEM (NORTH, EAST, UP)
#
#
#
# ---------------------------------------------------------------------
# | NUM | NAME | FLG | RESIDUALS IN MILLIMETERS | |
# ---------------------------------------------------------------------
# | | | | | |
# | 1 | 0ABI | P A | -1.03 3.41 14.57 | M |
# | 2 | AASC | P A | 0.01 1.26 4.53 | M |
# | | | | | |
# ---------------------------------------------------------------------
# | | RMS / COMPONENT | | 2.76 2.65 6.33 | |
# | | MEAN | | -0.00 -0.00 -0.00 | |
# | | MIN | | -5.85 -8.09 -11.93 | |
# | | MAX | | 5.35 3.83 10.56 | |
# ---------------------------------------------------------------------
residuals_parser = ParserDef(
end_marker=lambda line, _ln, _n: line.strip().startswith("========="),
label=lambda line, _ln: (
line.strip().startswith("|") # Line starts with sign "|"
and line[2:6].strip().isnumeric() # 1st column is a number
),
parser_def={
True: {
"parser": self._parse_line,
"fields": {
"station": (9, 26),
"flag": (28, 32),
"residual_north": (34, 44),
"residual_east": (45, 54),
"residual_up": (55, 64),
},
},
},
)
#----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8-
#
# RNX2SNX_211660: Coordinate repeatability for verificati
# -------------------------------------------------------------------------------
# Total number of stations: 316
# -------------------------------------------------------------------------------
# Weekday Repeatability (mm)
# Station #Days 0123456 N E U
# -------------------------------------------------------------------------------
# 0ABI 1 A 0.00 0.00 0.00
# AASC 7 AAAAAAA 1.21 0.98 3.09
# -------------------------------------------------------------------------------
# # Coordinate estimates: 2176 1.38 1.47 5.75
repeatability_parser = ParserDef(
end_marker=lambda line, _ln, _n: line.strip().startswith("# Coordinate estimates"),
label=lambda line, _ln: (
len(line) > 49 # Length of line has to be larger than 49 characters
and line[1:4].isalnum() # Station name
and line[20].isnumeric() # Number of days
),
parser_def={
True: {
"parser": self._parse_line,
"fields": {
"station": (1, 5),
"num_of_days": (15, 21),
"weekday": (22, 30),
"repeatability_north": (31, 38),
"repeatability_east": (39, 44),
"repeatability_up": (45, 50),
},
},
},
)
#----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8-
#
# -------------------------------------------------------------------------------------------
# File Coordinate files
# -------------------------------------------------------------------------------------------
# 1 ${P}/PGS/STA/F1_211600.CRD
# 2 ${P}/PGS/STA/F1_211610.CRD
# 3 ${P}/PGS/STA/F1_211620.CRD
# 4 ${P}/PGS/STA/F1_211630.CRD
# 5 ${P}/PGS/STA/F1_211640.CRD
# 6 ${P}/PGS/STA/F1_211650.CRD
# 7 ${P}/PGS/STA/F1_211660.CRD
# -------------------------------------------------------------------------------------------
#
#
# -------------------------------------------------------------------------------------------
# LIST OF COORDINATE FILES
# -------------------------------------------------------------------------------------------
#
# NUMBER OF COORDINATE FILES: 7
num_coord_files_parser = ParserDef(
end_marker=lambda line, _ln, _n: line.startswith("1"),
label=lambda line, _ln: line.strip().startswith("NUMBER OF COORDINATE FILES"),
parser_def={
True: {
"parser": self._parse_num_coord_files,
"strip": "",
"fields": {
"num_coord_files": (0, None),
},
},
},
)
#----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8-
#
# COMPARISON OF COORDINATES (IN NORTH, EAST, AND HEIGHT COMPONENT)
# EPOCH FOR COMPARISON: AS IN COORDINATE FI
# RMS: UNWEIGHTED RMS OF THE ESTIMATION OF ONE COORDINATE COMPONENT IN MM
# ---------------------------------------------------------------------------------
#
# NUM STATION #FIL C RMS 1 2 3 4 5 6 7
# ---------------------------------------------------------------------------------
# 2 AASC 7 N 1.21 -1.85 -0.41 0.90 -1.27 0.67 1.39 0.56
# E 0.98 1.50 -1.02 0.47 0.51 -0.78 -1.11 0.42
# U 3.09 0.54 5.33 -0.23 -3.92 0.64 -3.41 1.04
#
# 3 ADAC 7 N 1.76 -1.80 -1.47 0.88 3.25 0.60 -1.24 -0.23
# E 0.82 0.02 -0.20 0.65 -0.84 1.47 -0.37 -0.74
# U 9.21 -1.14 5.65 17.49 -0.76 -9.54 -3.61 -8.09
#
# 72 ENON 5 N 5.03 -7.11 -1.71 -0.84 5.30 4.37
# E 1.85 0.78 0.75 2.13 -2.61 -1.06
# U 6.34 8.82 2.17 1.37 -6.60 -5.76
#
# 33 BIRK 1 N 0.00 0.00
# E 0.00 0.00
# U 0.00 0.00
coord_comparison_parser = ParserDef(
end_marker=lambda line, _ln, _n: line.startswith("1"),
label=lambda line, _ln: (
len(line) > 27 # Length of line has to be larger than 27 characters
and line[27] in ["N", "E", "U"] # Coordinate flag ('N', 'E' or 'U')
and line[31].isnumeric() # RMS
),
parser_def={
True: {
"parser": self._parse_coord_comparison,
"strip": "",
"fields": {
"station": (6, 10),
"num_files": (11, 25),
"flag_coord": (26, 28),
"rms": (29, 35),
"values": (36, None),
},
},
},
)
#----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2----
#
# MEAN VALUES OF GEOCENTRIC X,Y,Z - COORDINATES
# RMS1: RMS OF UNWEIGHTED AVERAGE OF EACH COORDINATE COMPONENT
# RMS2: FORMAL ACCURACY OF EACH COORDINATE COMPONENT FROM COMBINED SOLUTION USING EQUAL WEIGHTS
# ----------------------------------------------------------------------------------------------------------------------------
#
# NUM STATION #FIL FLG X (M) RMS1 RMS2 Y (M) RMS1 RMS2 Z (M) RMS1 RMS2
# ----------------------------------------------------------------------------------------------------------------------------
#
# 2 AASC 7 M 3172870.21703 0.00072 0.00144 604208.68041 0.00041 0.00144 5481574.63290 0.00101 0.00144
# 3 ADAC 7 M 1916240.20525 0.00114 0.00144 963577.13113 0.00066 0.00144 5986596.69558 0.00330 0.00144
coord_mean_parser = ParserDef(
end_marker=lambda line, _ln, _n: line.startswith(">>> CPU"),
label=lambda line, _ln: (
len(line) > 100 # Length of line has to be larger than 100 characters
and line[0:4].strip().isnumeric() # 1st column is a number
and line[6:10].isalnum() # Station name
),
parser_def={
True: {
"parser": self._parse_line,
"fields": {
"station": (6, 10),
"num_files": (12, 26),
"flag": (27, 29),
"pos_mean_x": (30, 46),
"pos_mean_x_rms1": (47, 54),
"pos_mean_x_rms2": (55, 62),
"pos_mean_y": (62, 77),
"pos_mean_y_rms1": (78, 85),
"pos_mean_y_rms2": (86, 93),
"pos_mean_z": (94, 108),
"pos_mean_z_rms1": (109, 116),
"pos_mean_z_rms2": (117, 124),
},
},
},
)
return itertools.chain([
time_parser,
skip_lines_parser,
residuals_parser,
skip_lines_parser,
repeatability_parser,
skip_lines_parser,
num_coord_files_parser,
skip_lines_parser,
coord_comparison_parser,
coord_mean_parser,
])
#
# PARSERS
#
def _parse_coord_comparison(self, line: Dict[str, str], cache: Dict[str, Any]) -> None:
"""Parse station coordinate comparison table
"""
if line["station"].strip().lower():
cache["station"] = line["station"].strip().lower()
station = cache["station"]
self.data.setdefault(station, dict())
coord_def = {
"N": "north",
"E": "east",
"U": "up",
}
coord_key = coord_def[line['flag_coord'].strip()]
self.data[station][f"coord_comp_rms_{coord_key}"] = float(line["rms"]) * Unit.millimeter2meter
if not f"coord_comp_rms_{coord_key}" in self.fields:
self.fields.append(f"coord_comp_rms_{coord_key}")
# Parse values line
#----+----1----+----2----+----3----+----4----+-----
# 1.21 -1.85 -0.41 0.90 -1.27 0.67 1.39 0.56
# 5.03 -7.11 -1.71 -0.84 5.30 4.37
# 0.00 0.00
if not "num_coord_files" in self.meta:
log.warn("Number of coordinate files are unknown. Daily comparison values can not be read.")
return
len_values = self.meta["num_coord_files"] * 6 # length of line depends on number of files
line_values = line["values"].ljust(len_values)
values = [line_values[i:i+6] for i in range(0, len_values, 6)]
for idx, value in enumerate(values):
if value.strip():
values[idx] = float(value) * Unit.millimeter2meter
else:
values[idx] = float('nan')
self.data[station][f"coord_comp_{coord_key}"] = values
if not f"coord_comp_{coord_key}" in self.fields:
self.fields.append(f"coord_comp_{coord_key}")
def _parse_time(self, line: Dict[str, str], _: Dict[str, Any]) -> None:
"""Parse date of analysis session
"""
# Example to parse for getting date:
#
# RNX2SNX BPE PROCESSING SUMMARY FOR YEAR-SESSION 21-1660
#
time = line["time"].split("YEAR-SESSION")[1].strip()
self.meta["time"] = datetime.strptime(time[:-1], "%y-%j")
def _parse_line(self, line: Dict[str, str], _: Dict[str, Any]) -> None:
"""Parse line
"""
station = line["station"].lower()
self.data.setdefault(station, dict())
skip_fields = ["flag", "num_files", "station", "weekday"]
unit_millimeter = [
"repeatability_east",
"repeatability_north",
"repeatability_up"
"residual_east",
"residual_north",
"residual_up",
]
for key, value in line.items():
if key in skip_fields:
continue
if not key in self.fields:
self.fields.append(key)
if key in unit_millimeter:
self.data[station][key] = float(value) * Unit.millimeter2meter
else:
self.data[station][key] = float(value)
def _parse_num_coord_files(self, line: Dict[str, str], _: Dict[str, Any]) -> None:
"""Parse number of coordinate files
"""
# Example to parse for getting number of coordinate files:
#
# NUMBER OF COORDINATE FILES: 7
#
self.meta["num_coord_files"] = int(line["num_coord_files"].split(":")[1])
#
# GET DATASET
#
def as_dataset(self) -> "Dataset":
"""Return the parsed data as a Dataset
Returns:
Midgard Dataset where station coordinates and belonging information are stored with following fields:
| Field | Type | Description |
|-------------------------|---------------|-------------------------------------------------------------------|
| coord_comp_east_day<x> | numpy.ndarray | Station coordinate comparison results for East component in [m] |
| | | for day X (X=[1|2|...|7]) |
| coord_comp_north_day<x> | numpy.ndarray | Station coordinate comparison results for North component in [m] |
| | | for day X (X=[1|2|...|7]) |
| coord_comp_up_day<x> | numpy.ndarray | Station coordinate comparison results for Up component in [m] |
| | | for day X (X=[1|2|...|7]) |
| coord_comp_rms_east | numpy.ndarray | List with daily station coordinate comparison results for East |
| | | component in [m] |
| coord_comp_rms_north | numpy.ndarray | List with daily station coordinate comparison results for North |
| | | component in [m] |
| coord_comp_rms_up | numpy.ndarray | List with daily station coordinate comparison results for Up |
| | | component in [m] |
| num_of_days | numpy.ndarray | Number of days used for analysis |
| pos_mean_x | numpy.ndarray | X-coordinate of mean station coordinate position in [m] |
| pos_mean_x_rms1 | numpy.ndarray | RMS1 of X-coordinate of mean station coordinate position in [m] |
| pos_mean_x_rms2 | numpy.ndarray | RMS2 of X-coordinate of mean station coordinate position in [m] |
| pos_mean_y | numpy.ndarray | Y-coordinate of mean station coordinate position in [m] |
| pos_mean_y_rms1 | numpy.ndarray | RMS1 of Y-coordinate of mean station coordinate position in [m] |
| pos_mean_y_rms2 | numpy.ndarray | RMS2 of Y-coordinate of mean station coordinate position in [m] |
| pos_mean_z | numpy.ndarray | Z-coordinate of mean station coordinate position in [m] |
| pos_mean_z_rms1 | numpy.ndarray | RMS1 of Z-coordinate of mean station coordinate position in [m] |
| pos_mean_z_rms2 | numpy.ndarray | RMS2 of Z-coordinate of mean station coordinate position in [m] |
| repeatability_east | numpy.ndarray | Station coordinate repeatability for East component in [m] |
| repeatability_north | numpy.ndarray | Station coordinate repeatability for North component in [m] |
| repeatability_up | numpy.ndarray | Station coordinate repeatability for Up component in [m] |
| residual_east | numpy.ndarray | Station residuals for East component in [m] |
| residual_north | numpy.ndarray | Station residuals for North component in [m] |
| residual_up | numpy.ndarray | Station residuals for Up component in [m] |
| station | numpy.ndarray | Station names |
| time | TimeTable | Date of analysis session |
and following Dataset `meta` data:
| Entry | Type | Description |
|---------------------|-------|--------------------------------------------------------------------------------|
| num_coord_files | int | Number of coordinate files used for analysis |
| \__data_path__ | str | File path |
"""
data = dict()
# Generate dataset
dset = dataset.Dataset(num_obs=len(self.data.keys()))
dset.meta = self.meta.copy()
# Remove unnecessary fields in meta
for key in ["__parser_name__"]:
del dset.meta[key]
# Prepare data for adding to dataset
for sta in sorted(self.data.keys()):
for field in self.fields:
if field in ["coord_comp_east", "coord_comp_north", "coord_comp_up"]:
for idx in range(0, self.meta["num_coord_files"]):
if field in self.data[sta]:
data.setdefault(f"{field}_day{idx+1}", list()).append(self.data[sta][field][idx])
else:
data.setdefault(f"{field}_day{idx+1}", list()).append(float('nan'))
continue
if field in self.data[sta]:
data.setdefault(field, list()).append(self.data[sta][field])
else:
# Field does not exist for station 'sta', therefore it is initialized with NaN.
data.setdefault(field, list()).append(float('nan'))
# Add fields to dataset
dset.add_text("station", val=sorted(self.data.keys()))
for field in data:
unit = "" if field == "num_of_days" else "meter"
dset.add_float(field, val=data[field], unit=unit)
dset.add_time(
"time",
val=[dset.meta["time"] for ii in range(0, dset.num_obs)],
scale="utc",
fmt="datetime",
)
return dset
| [
"itertools.chain",
"midgard.parsers.ParserDef",
"datetime.datetime.strptime",
"midgard.dev.log.warn"
] | [((5864, 6164), 'midgard.parsers.ParserDef', 'ParserDef', ([], {'end_marker': "(lambda line, _ln, _n: 'Verification of fiducial stations' in line or \n 'PART 9: SLIDING 7-SESSION COMPARISON OF STATION COORDINATES' in line or\n 'LIST OF COORDINATE FILES' in line or 'COMPARISON OF COORDINATES' in line)", 'label': '(lambda line, _ln: line)', 'parser_def': '{}'}), "(end_marker=lambda line, _ln, _n: \n 'Verification of fiducial stations' in line or \n 'PART 9: SLIDING 7-SESSION COMPARISON OF STATION COORDINATES' in line or\n 'LIST OF COORDINATE FILES' in line or 'COMPARISON OF COORDINATES' in\n line, label=lambda line, _ln: line, parser_def={})\n", (5873, 6164), False, 'from midgard.parsers import ChainParser, ParserDef\n'), ((18195, 18421), 'itertools.chain', 'itertools.chain', (['[time_parser, skip_lines_parser, residuals_parser, skip_lines_parser,\n repeatability_parser, skip_lines_parser, num_coord_files_parser,\n skip_lines_parser, coord_comparison_parser, coord_mean_parser]'], {}), '([time_parser, skip_lines_parser, residuals_parser,\n skip_lines_parser, repeatability_parser, skip_lines_parser,\n num_coord_files_parser, skip_lines_parser, coord_comparison_parser,\n coord_mean_parser])\n', (18210, 18421), False, 'import itertools\n'), ((20708, 20745), 'datetime.datetime.strptime', 'datetime.strptime', (['time[:-1]', '"""%y-%j"""'], {}), "(time[:-1], '%y-%j')\n", (20725, 20745), False, 'from datetime import datetime\n'), ((19634, 19736), 'midgard.dev.log.warn', 'log.warn', (['"""Number of coordinate files are unknown. Daily comparison values can not be read."""'], {}), "(\n 'Number of coordinate files are unknown. Daily comparison values can not be read.'\n )\n", (19642, 19736), False, 'from midgard.dev import log\n')] |
""" helping functions for downloading german weather service data """
from pathlib import Path, PurePosixPath
from typing import Union
from python_dwd.constants.access_credentials import DWD_FOLDER_STATIONDATA, DWD_SERVER, DWD_PATH, FTP_EXPRESSION
def create_local_file_name(remote_file_path: Union[Path, str],
local_folder: Union[Path, str]) -> Path:
"""
The local filename consists of the set of parameters (easier
to analyse when looking at the filename) and the original filename
"""
return Path(local_folder,
DWD_FOLDER_STATIONDATA,
str(remote_file_path).split('/')[-1])
def create_remote_file_name(file: str) -> str:
"""
The filepath to the server is created with the filename,
the parameters and the path
Args:
file: data file name on server
Returns:
complete Path to the required data
"""
file_server = PurePosixPath(DWD_SERVER,
DWD_PATH,
file)
return f"{FTP_EXPRESSION}{file_server}"
| [
"pathlib.PurePosixPath"
] | [((943, 984), 'pathlib.PurePosixPath', 'PurePosixPath', (['DWD_SERVER', 'DWD_PATH', 'file'], {}), '(DWD_SERVER, DWD_PATH, file)\n', (956, 984), False, 'from pathlib import Path, PurePosixPath\n')] |
import inspect
from copy import deepcopy
SELF = "'self'"
UNSAFE_INLINE = "'unsafe-inline'"
UNSAFE_EVAL = "'unsafe-eval'"
NONE = "'none'"
STRICT_DYNAMIC = "'strict-dynamic'"
class Directive:
"""Descriptor for the management and rendering of CSP directives.
Uses types to do some basic sanity checking. This does not ensure
that the resulting directive is necessarily valid though. For example,
typos are not caught, nor are non-sensical values.
Validation is currently out of scope for this project.
"""
def __init__(self, name, type, default, render):
self.name = name
self.type = type
self.default = default
self.renderer = render
def render(self, instance):
if self.name not in instance.__dict__:
return None
if not instance.__dict__[self.name]:
return None
return self.renderer(instance.__dict__[self.name])
def __get__(self, instance, cls):
if instance is None:
return self
if self.name not in instance.__dict__:
instance.__dict__[self.name] = self.default()
return instance.__dict__[self.name]
def __set__(self, instance, value):
if not isinstance(value, self.type):
raise TypeError(f"Expected type {self.type}")
instance.__dict__[self.name] = value
class SetDirective(Directive):
def __init__(self, name):
parent = super()
parent.__init__(name, type=set, default=set, render=render_set)
class SingleValueDirective(Directive):
def __init__(self, name):
parent = super()
parent.__init__(name, type=str, default=str, render=str)
class BooleanDirective(Directive):
def __init__(self, name):
parent = super()
parent.__init__(name, type=bool, default=bool, render=render_bool)
def is_directive(obj):
return isinstance(obj, Directive)
def render_set(value):
return " ".join(sorted(value))
def render_bool(value):
return "" if value else None
class ContentSecurityPolicy:
"""Defines the complete set of policies available in CSP 1 and 2.
* Directives which allow for multiple values are defined as sets.
* Directives which allow for a single value may be set by string.
* Directives which are boolean in nature are set via True/False.
The directives are set through properties, with the dashes replaced by
underscores. For example, 'default-src' becomes 'default_src'.
Everywhere else (i.e. values) the dashes should be left as they are.
Example::
policy = ContentSecurityPolicy()
policy.default_src.add('http://*.example.com')
policy.sandbox = "allow-scripts"
policy.block_all_mixed_content = True
"""
# Fetch directives
child_src = SetDirective("child-src")
connect_src = SetDirective("connect-src")
default_src = SetDirective("default-src")
font_src = SetDirective("font-src")
frame_src = SetDirective("frame-src")
img_src = SetDirective("img-src")
manifest_src = SetDirective("manifest-src")
media_src = SetDirective("media-src")
object_src = SetDirective("object-src")
script_src = SetDirective("script-src")
style_src = SetDirective("style-src")
worker_src = SetDirective("worker-src")
# Document directives
base_uri = SetDirective("base-uri")
plugin_types = SetDirective("plugin-types")
sandbox = SingleValueDirective("sandbox")
disown_opener = BooleanDirective("disown-opener")
# Navigation directives
form_action = SetDirective("form-action")
frame_ancestors = SetDirective("frame-ancestors")
# Reporting directives
report_uri = SingleValueDirective("report-uri")
report_to = SingleValueDirective("report-to")
# Other directives
block_all_mixed_content = BooleanDirective("block-all-mixed-content")
require_sri_for = SingleValueDirective("require-sri-for")
upgrade_insecure_requeists = BooleanDirective("upgrade-insecure-requests")
def __init__(self, report_only=False, **directives):
self.report_only = report_only
for directive in directives:
name = directive.replace("-", "_")
assert hasattr(self, name)
setattr(self, name, directives[directive])
def copy(self):
policy = self.__class__()
policy.__dict__ = deepcopy(self.__dict__)
return policy
@property
def directives(self):
for name, value in inspect.getmembers(self.__class__, is_directive):
yield value
@property
def text(self):
values = ((d.name, d.render(self)) for d in self.directives)
values = ((name, text) for name, text in values if text is not None)
return ";".join(" ".join(v).strip() for v in values)
@property
def header_name(self):
if self.report_only:
return "Content-Security-Policy-Report-Only"
else:
return "Content-Security-Policy"
def apply(self, response):
text = self.text
if text:
response.headers[self.header_name] = text
| [
"inspect.getmembers",
"copy.deepcopy"
] | [((4386, 4409), 'copy.deepcopy', 'deepcopy', (['self.__dict__'], {}), '(self.__dict__)\n', (4394, 4409), False, 'from copy import deepcopy\n'), ((4501, 4549), 'inspect.getmembers', 'inspect.getmembers', (['self.__class__', 'is_directive'], {}), '(self.__class__, is_directive)\n', (4519, 4549), False, 'import inspect\n')] |
import configparser
import pytest
from mock import MagicMock
from seedsigner.controller import Controller
from seedsigner.models.settings import Settings
def test_singleton_init_fails():
""" The Controller should not allow any code to instantiate it via Controller() """
with pytest.raises(Exception):
c = Controller()
def test_singleton_get_instance_without_configure_fails():
""" Calling get_instance() without first calling configure_instance() should fail """
with pytest.raises(Exception):
c = Controller.get_instance()
def test_singleton_get_instance_preserves_state():
""" Changes to the Controller singleton should be preserved across calls to get_instance() """
# Must reset Singleton instances; pytest cannot properly isolate Singletons for us
# automatically.
# TODO: Cleaner solution here would be nice.
Settings._instance = None
Controller._instance = None
settings = """
[system]
debug = False
default_language = en
persistent_settings = False
[display]
text_color = ORANGE
qr_background_color = FFFFFF
camera_rotation = 0
[wallet]
network = main
software = Prompt
qr_density = 2
custom_derivation = m/0/0
compact_seedqr_enabled = False
"""
config = configparser.ConfigParser()
config.read_string(settings)
# Initialize the instance and verify that it read the config settings
Controller.configure_instance(config, disable_hardware=True)
controller = Controller.get_instance()
assert controller.color == "ORANGE"
# Change a value in the instance...
controller.color = "purple"
# ...get a new copy of the instance and confirm change
controller = Controller.get_instance()
assert controller.color == "purple"
def test_missing_settings_get_defaults():
""" Should gracefully handle any missing fields from `settings.ini` """
# TODO: This is not complete; currently only handles missing compact_seedqr_enabled.
# Must reset Singleton instances; pytest cannot properly isolate Singletons for us
# automatically.
# TODO: Cleaner solution here would be nice.
Settings._instance = None
Controller._instance = None
# Intentionally omit `compact_seedqr_enabled` from settings:
settings = """
[system]
debug = False
default_language = en
persistent_settings = False
[display]
text_color = ORANGE
qr_background_color = FFFFFF
camera_rotation = 0
[wallet]
network = main
software = Prompt
qr_density = 2
custom_derivation = m/0/0
"""
config = configparser.ConfigParser()
config.read_string(settings)
# Controller should parse the settings fine, even though a field is missing
Controller.configure_instance(config, disable_hardware=True)
# Controller should still have a default value
controller = Controller.get_instance()
assert controller.settings.compact_seedqr_enabled is False
| [
"configparser.ConfigParser",
"pytest.raises",
"seedsigner.controller.Controller.configure_instance",
"seedsigner.controller.Controller",
"seedsigner.controller.Controller.get_instance"
] | [((1353, 1380), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1378, 1380), False, 'import configparser\n'), ((1493, 1553), 'seedsigner.controller.Controller.configure_instance', 'Controller.configure_instance', (['config'], {'disable_hardware': '(True)'}), '(config, disable_hardware=True)\n', (1522, 1553), False, 'from seedsigner.controller import Controller\n'), ((1571, 1596), 'seedsigner.controller.Controller.get_instance', 'Controller.get_instance', ([], {}), '()\n', (1594, 1596), False, 'from seedsigner.controller import Controller\n'), ((1787, 1812), 'seedsigner.controller.Controller.get_instance', 'Controller.get_instance', ([], {}), '()\n', (1810, 1812), False, 'from seedsigner.controller import Controller\n'), ((2729, 2756), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (2754, 2756), False, 'import configparser\n'), ((2875, 2935), 'seedsigner.controller.Controller.configure_instance', 'Controller.configure_instance', (['config'], {'disable_hardware': '(True)'}), '(config, disable_hardware=True)\n', (2904, 2935), False, 'from seedsigner.controller import Controller\n'), ((3005, 3030), 'seedsigner.controller.Controller.get_instance', 'Controller.get_instance', ([], {}), '()\n', (3028, 3030), False, 'from seedsigner.controller import Controller\n'), ((287, 311), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (300, 311), False, 'import pytest\n'), ((325, 337), 'seedsigner.controller.Controller', 'Controller', ([], {}), '()\n', (335, 337), False, 'from seedsigner.controller import Controller\n'), ((497, 521), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (510, 521), False, 'import pytest\n'), ((535, 560), 'seedsigner.controller.Controller.get_instance', 'Controller.get_instance', ([], {}), '()\n', (558, 560), False, 'from seedsigner.controller import Controller\n')] |
import asyncio
from datetime import datetime
from nindo import NindoClient
async def test():
client = NindoClient()
print("\n--- Viral ---")
async for viral in client.viral():
print(viral.type, viral.post.title)
print("\n--- Milestones ---")
async for milestone in client.milestones():
print(milestone.expected_time, milestone.followers)
print("\n--- Coupons ---")
async for coupon in client.coupons():
print(coupon.discount, coupon.code)
print("\n--- Search ---")
async for artist in client.search("unge"):
print(artist.name)
print("\n--- Youtube ---")
async for artist in client.youtube_charts():
print("\n", artist.rank, artist.name)
details = await artist.get_details()
print("Channels:", len(details.youtube_channels))
print("\n--- Instagram ---")
async for artist in client.instagram_charts():
print("\n", artist.rank, artist.name)
details = await artist.get_details()
print("Channels:", len(details.instagram_channels))
channel = details.instagram_channels[0]
channel_details = await channel.get_details()
print("Average Comments:", channel_details.average_comments)
channel_history = await channel.get_history()
print("History Change:", channel_history.before(datetime.utcnow()).total_change)
post_count = await channel.posts().flatten()
print("Post Count:", len(post_count))
print("\n--- TikTok ---")
async for artist in client.tiktok_charts():
print("\n", artist.rank, artist.name)
details = await artist.get_details()
print("Channels:", len(details.tiktok_channels))
print("\n--- Twitter ---")
async for artist in client.twitter_charts():
print("\n", artist.rank, artist.name)
details = await artist.get_details()
print("Channels:", len(details.twitter_channels))
print("\n--- Twitch ---")
async for artist in client.twitch_charts():
print("\n", artist.rank, artist.name)
details = await artist.get_details()
print("Channels:", len(details.twitch_channels))
print("\n--- Live ---")
artist = await client.get_artist("fe23cce0bcdb3d89cbfd500d91487202")
channel = artist.instagram_channels[0]
async for followers in channel.live():
print(followers)
loop = asyncio.get_event_loop()
loop.run_until_complete(test())
| [
"nindo.NindoClient",
"asyncio.get_event_loop",
"datetime.datetime.utcnow"
] | [((2389, 2413), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2411, 2413), False, 'import asyncio\n'), ((109, 122), 'nindo.NindoClient', 'NindoClient', ([], {}), '()\n', (120, 122), False, 'from nindo import NindoClient\n'), ((1351, 1368), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1366, 1368), False, 'from datetime import datetime\n')] |
from pathlib import Path
import boto3
import src.superannotate as sa
from tests.integration.base import BaseTestCase
class TestDirectS3Upload(BaseTestCase):
PROJECT_NAME = "test_direct_s3_upload"
TEST_FOLDER_NAME = "test_folder"
PROJECT_DESCRIPTION = "desc"
PROJECT_TYPE = "Vector"
S3_BUCKET = "superannotate-python-sdk-test"
S3_FOLDER = "sample_project_vector"
def test_direct_s3_upload(self):
csv = (Path.home() / ".aws" / "credentials").read_text().splitlines()
access_key_id = csv[1].split("=")[1].strip()
access_secret = csv[2].split("=")[1].strip()
sa.upload_images_from_s3_bucket_to_project(
self.PROJECT_NAME,
access_key_id,
access_secret,
self.S3_BUCKET,
self.S3_FOLDER,
)
s3_client = boto3.client("s3")
paginator = s3_client.get_paginator("list_objects_v2")
response_iterator = paginator.paginate(
Bucket=self.S3_BUCKET, Prefix=self.S3_FOLDER
)
on_s3 = []
for response in response_iterator:
if "Contents" in response:
for object_data in response["Contents"]:
key = object_data["Key"]
if key[-4:] in [".jpg", ".png"]:
on_s3.append(key)
self.assertEqual(len(on_s3), sa.get_project_image_count(self.PROJECT_NAME))
def test_direct_s3_upload_folder(self):
csv = (Path.home() / ".aws" / "credentials").read_text().splitlines()
access_key_id = csv[1].split("=")[1].strip()
access_secret = csv[2].split("=")[1].strip()
sa.create_folder(self.PROJECT_NAME, self.TEST_FOLDER_NAME)
project_folder = f"{self.PROJECT_NAME}/{self.TEST_FOLDER_NAME}"
sa.upload_images_from_s3_bucket_to_project(
project_folder, access_key_id, access_secret, self.S3_BUCKET, self.S3_FOLDER
)
s3_client = boto3.client("s3")
paginator = s3_client.get_paginator("list_objects_v2")
response_iterator = paginator.paginate(
Bucket=self.S3_BUCKET, Prefix=self.S3_FOLDER
)
on_s3 = []
for response in response_iterator:
if "Contents" in response:
for object_data in response["Contents"]:
key = object_data["Key"]
if key[-4:] in [".jpg", ".png"]:
on_s3.append(key)
self.assertEqual(len(on_s3), len(sa.search_images(project_folder)))
| [
"boto3.client",
"src.superannotate.upload_images_from_s3_bucket_to_project",
"src.superannotate.create_folder",
"src.superannotate.search_images",
"src.superannotate.get_project_image_count",
"pathlib.Path.home"
] | [((620, 747), 'src.superannotate.upload_images_from_s3_bucket_to_project', 'sa.upload_images_from_s3_bucket_to_project', (['self.PROJECT_NAME', 'access_key_id', 'access_secret', 'self.S3_BUCKET', 'self.S3_FOLDER'], {}), '(self.PROJECT_NAME, access_key_id,\n access_secret, self.S3_BUCKET, self.S3_FOLDER)\n', (662, 747), True, 'import src.superannotate as sa\n'), ((835, 853), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (847, 853), False, 'import boto3\n'), ((1653, 1711), 'src.superannotate.create_folder', 'sa.create_folder', (['self.PROJECT_NAME', 'self.TEST_FOLDER_NAME'], {}), '(self.PROJECT_NAME, self.TEST_FOLDER_NAME)\n', (1669, 1711), True, 'import src.superannotate as sa\n'), ((1793, 1917), 'src.superannotate.upload_images_from_s3_bucket_to_project', 'sa.upload_images_from_s3_bucket_to_project', (['project_folder', 'access_key_id', 'access_secret', 'self.S3_BUCKET', 'self.S3_FOLDER'], {}), '(project_folder, access_key_id,\n access_secret, self.S3_BUCKET, self.S3_FOLDER)\n', (1835, 1917), True, 'import src.superannotate as sa\n'), ((1956, 1974), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (1968, 1974), False, 'import boto3\n'), ((1368, 1413), 'src.superannotate.get_project_image_count', 'sa.get_project_image_count', (['self.PROJECT_NAME'], {}), '(self.PROJECT_NAME)\n', (1394, 1413), True, 'import src.superannotate as sa\n'), ((2493, 2525), 'src.superannotate.search_images', 'sa.search_images', (['project_folder'], {}), '(project_folder)\n', (2509, 2525), True, 'import src.superannotate as sa\n'), ((442, 453), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (451, 453), False, 'from pathlib import Path\n'), ((1475, 1486), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (1484, 1486), False, 'from pathlib import Path\n')] |
import copy
import logging
class InvalidProviderConfig(ValueError):
pass
class attr(object):
def __init__(self, name, alias=None, aliases=None):
self.name = name
self.aliases = []
if alias:
self.aliases.append(alias)
if aliases:
self.aliases.extend(aliases)
@property
def names(self):
return [self.name] + self.aliases
def from_config(self, config, remove=False):
"""
Return the value of this attr instance within the given dictionary.
Parameters
----------
config : dict
The config from which to take this attribute
remove : bool
Whether to remove the attribute from the given config, or whether to
keep it. (default: False)
Examples
--------
>>> f = attr('foo', alias='f')
>>> f.from_config({'a': 1, 'f': 2})
2
>>> f.from_config({'a': 1, 'foo': 2})
2
>>> f.from_config({'a': 1, 'foo': 2, 'f': 3})
Traceback (most recent call last):
...
pyggybank.core.InvalidProviderConfig: Duplicate config item for "foo"...
>>> f.from_config({'a': 1, 'foo': 2, 'f': 2})
2
>>> f.from_config({'a': 1})
Traceback (most recent call last):
...
pyggybank.core.InvalidProviderConfig: No config item for "foo"
"""
matching = [name for name in self.names if name in config]
if remove:
getter = config.pop
else:
getter = config.get
values = [getter(key) for key in matching]
# If duplicate information provided, but it is identical, let the
# config through.
if len(matching) > 1 and not all(values[0] == value
for value in values[1:]):
raise InvalidProviderConfig(
'Duplicate config item for "{}". Got "{}"'
''.format(self.name, ', '.join(matching)))
elif not matching:
raise InvalidProviderConfig(
'No config item for "{}"'.format(self.name))
return values[0]
class Schema:
def __init__(self, provider_names, provider_attrs):
self.provider_names = provider_names
self.provider_attrs = provider_attrs
# TODO: Validate the schema (in case the provider is weirdly configured
# or the attributes have name collisions).
# Including: test attr.name and aliases don't collide with one another.
def sanitise(self, config):
if not hasattr(config, 'keys'):
raise InvalidProviderConfig("Config isn't dict-like.")
keys = set(config.keys())
if 'provider' not in config:
raise InvalidProviderConfig('"provider" is missing from the config')
if config['provider'] not in self.provider_names:
raise InvalidProviderConfig('"{}" is an invalid provider name'
''.format(config.get('provider')))
sanitised = {'provider': config['provider']}
config = copy.deepcopy(config)
for provider_attr in self.provider_attrs:
if not isinstance(provider_attr, attr):
provider_attr = attr(provider_attr)
config_attr = provider_attr.from_config(config, remove=True)
sanitised[provider_attr.name] = config_attr
remaining = sorted(set(config.keys()) - {'provider'})
if remaining:
raise InvalidProviderConfig(
'The following config items are not allowed: {}'
''.format(', '.join(remaining))
)
return sanitised
def extract_credentials(self, config):
"""
Take the credentials out of the given sanitised config.
"""
new_config = {}
credentials = {}
for provider_attr in self.provider_attrs:
if not isinstance(provider_attr, attr):
provider_attr = attr(provider_attr)
if not getattr(provider_attr, 'are_credentials', True):
continue
item = config.get(provider_attr.name, None)
if item is not None:
credentials[provider_attr.name] = item
for name in provider_attr.aliases:
credentials[name] = item
credentials = Credentials(credentials)
return new_config, credentials
class Credentials(dict):
def __getattr__(self, item):
if item in self:
return self[item]
else:
raise AttributeError('{} has not attribute {}'
''.format(self.__class__.__name__, item))
class ClassPropertyDescriptor:
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
if not self.fset:
raise AttributeError("can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
class Provider:
_attributes = []
names = []
domain = ''
@classproperty
def attributes(cls):
attrs = []
for attribute in cls._attributes:
if not isinstance(attribute, attr):
attribute = attr(attribute)
attrs.append(attribute)
return attrs
# @classmethod
# def validate_config(cls, config):
# if not hasattr(config, 'keys'):
# raise InvalidProviderConfig("Config isn't dict-like.")
#
# keys = set(config.keys())
# cls_attrs = set(cls.attributes) | set(['provider'])
# if config.get('provider') not in cls.names:
# raise InvalidProviderConfig(
# 'The "{}" class does not provide "{}"'
# ''.format(cls.__name__, config.get('provider')))
#
# missing = ', '.join(cls_attrs - keys)
# if missing:
# raise InvalidProviderConfig('Config items missing: {}'.format(missing))
# not_allowed = ', '.join(keys - cls_attrs)
# if not_allowed:
# raise InvalidProviderConfig('The following config items '
# 'are not allowed: {}'.format(not_allowed))
# return True
@classmethod
def schema(cls):
"""Return the schema for this provider."""
# Allow a provider the possibility of overriding the
# Schema (type and/or instance).
if isinstance(cls.attributes, Schema):
schema = cls.attributes
else:
schema = Schema(cls.names, cls.attributes)
return schema
@classmethod
def providers(cls):
"""Return a dictionary mapping provider name to class."""
providers = {}
for klass in cls.__subclasses__():
for name in klass.names:
if name in providers:
raise ValueError('Provider name collision for "{}" found '
'between {} and {}.'
''.format(name, klass, providers[name]))
providers[name] = klass
return providers
@classmethod
def pick_provider(cls, config):
providers = cls.providers()
provider_name = config.get('provider', None)
if provider_name is None:
raise ValueError('The provider was not defined in the account '
'config')
provider = providers.get(provider_name, None)
if provider is None:
raise ValueError(
'No provider found for "{}"'.format(provider_name))
return provider
@classmethod
def from_config(cls, config):
"""
Given an account config, return a provider instance and
the Credentials.
"""
provider = cls.pick_provider(config)
schema = provider.schema()
config = schema.sanitise(config)
config, credentials = schema.extract_credentials(config)
return provider.init_from_config(config), credentials
@classmethod
def init_from_config(cls, config):
"""Instantiate the provider with the given config."""
# TODO: Supply the config minus the credentials (e.g. nice-name).
return cls()
@classmethod
def config_schema(cls):
"""Manufacture the config schema for this provider."""
schema = Schema(cls.attributes)
return schema
def prepare_credentials(self, config):
"""Turn the given config into authentication credentials."""
return Credentials(config)
def authenticate(self, browser, credentials):
"""Login to the internet banking session."""
pass
def logout(self, browser):
"""Logout of the internet banking session."""
pass
def balances(self, browser):
"""Return a summary of all accounts."""
# NOTE: Must ensure to go to the right page as the first action of
# this method.
def transactions(self, browser):
"""Return transactions for all accounts."""
@property
def log(self):
"""A pre-configured logger that can be used to log progress/debug."""
if not hasattr(self, '_log'):
log = logging.getLogger(self.__class__.__name__)
logging.basicConfig()
# TODO: Make configurable.
log.setLevel(logging.INFO)
self._log = log
return self._log
from . import providers
| [
"logging.getLogger",
"logging.basicConfig",
"copy.deepcopy"
] | [((3128, 3149), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (3141, 3149), False, 'import copy\n'), ((9763, 9805), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (9780, 9805), False, 'import logging\n'), ((9818, 9839), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (9837, 9839), False, 'import logging\n')] |
import numpy as np
from ..block import ColumnOrientedBlock
class NumpyColumnOrientedBlock(ColumnOrientedBlock):
def transposed(self):
return np.transpose(self.data)
| [
"numpy.transpose"
] | [((156, 179), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (168, 179), True, 'import numpy as np\n')] |
from __future__ import print_function
import ast
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.core import magic_arguments
import re
@magics_class
class FunctionizerMagics(Magics):
def check_variable_exists(self, name, exception=False):
existing = name in self.shell.user_global_ns
if not existing and exception:
raise RuntimeError(f"{name} does not exist in the namespace")
return existing
def process_argument_list(self, arg_list):
kwargs = []
pargs = []
for arg in arg_list:
arg: str
if arg.endswith("!"):
# expand as keyword argument
arg = arg[:-1] # remove the trailing exclamation
kwargs.append(f"{arg}={arg}")
self.check_variable_exists(arg, True)
elif "=" in arg:
kwargs.append(arg)
elif arg.isidentifier():
pargs.append(arg)
else:
raise ValueError(f"'{arg}' is not a valid argument name.")
return pargs, kwargs
@magic_arguments.magic_arguments()
@magic_arguments.argument("--as_dict", action="store_true", default=False, help="return as a dictionary")
@magic_arguments.argument("-a", "--args", type=str, nargs="+", help="arguments")
@magic_arguments.argument("-r", "--ret", type=str, nargs="+", help="return values")
@magic_arguments.argument("-d", "--disable", action="store_true", default=False, help="disable")
@magic_arguments.argument("--skip", action="store_true", default=False, help="only define function, skip execution")
@magic_arguments.argument("--skip_last", action="store_true", default=False, help="drop last line")
@magic_arguments.argument("--return_last", action="store_true", default=False, help="return last line. will override -r")
@magic_arguments.argument("fn", type=str, help="function name")
@cell_magic
def functionize(self, line, cell):
args = magic_arguments.parse_argstring(self.functionize, line)
fn = args.fn
func_args = args.args or []
ret = args.ret or []
skip = args.skip
skip_last = args.skip_last
return_last = args.return_last
as_dict = args.as_dict
shell = self.shell
if skip_last and return_last:
raise UserWarning("Using skip_last and return_last together is not recommended.")
pargs, kwargs = self.process_argument_list(func_args)
# Apply the assignment of the kwargs for before executing the cell
assignment_code = "\n".join(kwargs) + "\n"
if args.disable:
shell.run_cell(assignment_code)
shell.run_cell(cell)
all_args = pargs + kwargs
arg_sig = ",".join(all_args)
lines = cell.splitlines()
# purge empty lines
while True:
last_line = lines[-1]
if re.search(r"\S+", last_line) or not lines:
# is not empty
break
lines.pop(-1)
if skip_last:
lines.pop(-1)
if not lines:
lines.append("pass")
else:
if return_last:
last_expr = lines.pop(-1)
ret_expr = f"return {last_expr}"
else:
if as_dict:
ret_expr = f'return dict({",".join([f"{r}={r}" for r in ret])})'
else:
ret_expr = f'return {",".join(ret)}'
lines.append(ret_expr)
indent_cell = "\n".join([" " + l for l in lines])
function_def = (
f'def {fn}({arg_sig}):\n'
f'{indent_cell}\n'
)
combined = function_def if skip else assignment_code + function_def + cell
shell.run_cell(combined)
def load_ipython_extension(ipython):
ipython.register_magics(FunctionizerMagics)
| [
"IPython.core.magic_arguments.argument",
"IPython.core.magic_arguments.magic_arguments",
"IPython.core.magic_arguments.parse_argstring",
"re.search"
] | [((1114, 1147), 'IPython.core.magic_arguments.magic_arguments', 'magic_arguments.magic_arguments', ([], {}), '()\n', (1145, 1147), False, 'from IPython.core import magic_arguments\n'), ((1153, 1261), 'IPython.core.magic_arguments.argument', 'magic_arguments.argument', (['"""--as_dict"""'], {'action': '"""store_true"""', 'default': '(False)', 'help': '"""return as a dictionary"""'}), "('--as_dict', action='store_true', default=False,\n help='return as a dictionary')\n", (1177, 1261), False, 'from IPython.core import magic_arguments\n'), ((1263, 1342), 'IPython.core.magic_arguments.argument', 'magic_arguments.argument', (['"""-a"""', '"""--args"""'], {'type': 'str', 'nargs': '"""+"""', 'help': '"""arguments"""'}), "('-a', '--args', type=str, nargs='+', help='arguments')\n", (1287, 1342), False, 'from IPython.core import magic_arguments\n'), ((1348, 1435), 'IPython.core.magic_arguments.argument', 'magic_arguments.argument', (['"""-r"""', '"""--ret"""'], {'type': 'str', 'nargs': '"""+"""', 'help': '"""return values"""'}), "('-r', '--ret', type=str, nargs='+', help=\n 'return values')\n", (1372, 1435), False, 'from IPython.core import magic_arguments\n'), ((1436, 1536), 'IPython.core.magic_arguments.argument', 'magic_arguments.argument', (['"""-d"""', '"""--disable"""'], {'action': '"""store_true"""', 'default': '(False)', 'help': '"""disable"""'}), "('-d', '--disable', action='store_true', default=\n False, help='disable')\n", (1460, 1536), False, 'from IPython.core import magic_arguments\n'), ((1537, 1657), 'IPython.core.magic_arguments.argument', 'magic_arguments.argument', (['"""--skip"""'], {'action': '"""store_true"""', 'default': '(False)', 'help': '"""only define function, skip execution"""'}), "('--skip', action='store_true', default=False, help\n ='only define function, skip execution')\n", (1561, 1657), False, 'from IPython.core import magic_arguments\n'), ((1658, 1760), 'IPython.core.magic_arguments.argument', 'magic_arguments.argument', (['"""--skip_last"""'], {'action': '"""store_true"""', 'default': '(False)', 'help': '"""drop last line"""'}), "('--skip_last', action='store_true', default=False,\n help='drop last line')\n", (1682, 1760), False, 'from IPython.core import magic_arguments\n'), ((1762, 1887), 'IPython.core.magic_arguments.argument', 'magic_arguments.argument', (['"""--return_last"""'], {'action': '"""store_true"""', 'default': '(False)', 'help': '"""return last line. will override -r"""'}), "('--return_last', action='store_true', default=\n False, help='return last line. will override -r')\n", (1786, 1887), False, 'from IPython.core import magic_arguments\n'), ((1888, 1950), 'IPython.core.magic_arguments.argument', 'magic_arguments.argument', (['"""fn"""'], {'type': 'str', 'help': '"""function name"""'}), "('fn', type=str, help='function name')\n", (1912, 1950), False, 'from IPython.core import magic_arguments\n'), ((2021, 2076), 'IPython.core.magic_arguments.parse_argstring', 'magic_arguments.parse_argstring', (['self.functionize', 'line'], {}), '(self.functionize, line)\n', (2052, 2076), False, 'from IPython.core import magic_arguments\n'), ((2951, 2979), 're.search', 're.search', (['"""\\\\S+"""', 'last_line'], {}), "('\\\\S+', last_line)\n", (2960, 2979), False, 'import re\n')] |
"""
When I was mixing icons and magic symbols for training data,
I noticed the GAN struggling. It appeared as it was trying to
decide if the image should be mostly black or mostly white. This
seemed to always lead to a mode collapse and devolution into noise.
To combat this, I sorted the icon images into those which are lighter
than the mean of the magic symbols subtracting a threshold.
It would be better to look at distributions and standard deviations
from the magic symbol means, but this was fast and seemed to do the
trick.
"""
import os
import sys
import glob
import numpy as np
import cairosvg
from PIL import Image
from random import randint
import cv2
from libs.img_mods import add_noise, invert_mostly_black_images
from libs.img_utils import ImageUtils
import matplotlib.pyplot as plt
img_utils = ImageUtils()
#############
# Setup
#############
root_path = os.environ["HOME"]
project_path = f"{root_path}/deep_upscale/"
#################
# Parameters
#################
input_path = f"{root_path}/deep_upscale/images/raw/"
output_path = f"{root_path}/deep_upscale/images/"
poor_images_path = f"{output_path}poor/"
rich_images_path = f"{output_path}rich/"
magic_image_path = "/home/ladvien/Documents/magic_symbols/"
print(rich_images_path)
##########################
# Sort Images on Darkness
##########################
png_file_paths = glob.glob(f"{rich_images_path}/*.png")
magic_file_paths = glob.glob(f"{magic_image_path}/*.png")
magic_means = [
np.array(Image.open(img_path)).mean() for img_path in magic_file_paths
]
magic_img_mean = np.array(magic_means).mean()
print(f"Mean of magic symbol images: {magic_img_mean}")
other_means = [
np.array(Image.open(img_path)).mean() for img_path in png_file_paths
]
print(f"Mean of icons: {np.array(other_means).mean()}")
sorted_dir = f"{output_path}sorted_by_dark/"
img_utils.make_dir(sorted_dir)
for img_path in png_file_paths:
file_name = img_path.split("/")[-1]
if np.array(Image.open(img_path)).mean() > magic_img_mean - 10:
os.system(f"cp {img_path} {sorted_dir}{file_name}")
| [
"PIL.Image.open",
"numpy.array",
"os.system",
"libs.img_utils.ImageUtils",
"glob.glob"
] | [((820, 832), 'libs.img_utils.ImageUtils', 'ImageUtils', ([], {}), '()\n', (830, 832), False, 'from libs.img_utils import ImageUtils\n'), ((1366, 1404), 'glob.glob', 'glob.glob', (['f"""{rich_images_path}/*.png"""'], {}), "(f'{rich_images_path}/*.png')\n", (1375, 1404), False, 'import glob\n'), ((1424, 1462), 'glob.glob', 'glob.glob', (['f"""{magic_image_path}/*.png"""'], {}), "(f'{magic_image_path}/*.png')\n", (1433, 1462), False, 'import glob\n'), ((1576, 1597), 'numpy.array', 'np.array', (['magic_means'], {}), '(magic_means)\n', (1584, 1597), True, 'import numpy as np\n'), ((2042, 2093), 'os.system', 'os.system', (['f"""cp {img_path} {sorted_dir}{file_name}"""'], {}), "(f'cp {img_path} {sorted_dir}{file_name}')\n", (2051, 2093), False, 'import os\n'), ((1494, 1514), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1504, 1514), False, 'from PIL import Image\n'), ((1691, 1711), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1701, 1711), False, 'from PIL import Image\n'), ((1778, 1799), 'numpy.array', 'np.array', (['other_means'], {}), '(other_means)\n', (1786, 1799), True, 'import numpy as np\n'), ((1982, 2002), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1992, 2002), False, 'from PIL import Image\n')] |
import os
import yaml
from enum import Enum
from typing import List
import pathlib
class Scale(Enum):
LINEAR = 0,
LOG = 1
def construct(scale_str: str):
if scale_str == "log":
return Scale.LOG
elif scale_str == "linear":
return Scale.LINEAR
else:
raise ValueError("inappropreate scale parameter")
class Type(Enum):
FLOAT = 0,
INT = 1,
STRING = 2,
DATASET = 3,
def construct(type_str: str):
if type_str == "float":
return Type.FLOAT
elif type_str == "int":
return Type.INT
elif type_str == "string":
return Type.STRING
elif type_str == "dataset":
return Type.DATASET
else:
raise ValueError("inappropreate type parameter")
class Param:
def __init__(self):
self.name: str = ""
self.type: Type = Type.FLOAT
self.range_from: float = 0.0
self.range_to: float = 1.0
self.scale: Scale = Scale.LINEAR
self.value: str = ""
self.is_redirect: bool = True
self.data_dir: pathlib.Path = pathlib.Path()
self.filename: str = ""
self.size: int = 0
class Config:
def _validate(self):
for param in self.param_list:
assert(param.range_from < param.range_to)
if param.scale == Scale.LOG:
assert(0 < param.range_from)
assert(0 < param.range_to)
elif param.type == 'dataset':
assert(os.path.exists(param.template.format(0)))
assert(os.path.exists(
param.template.format(param.dataset_size - 1)))
def __init__(self, yaml_filepath: str):
with open(yaml_filepath) as fin:
obj = yaml.safe_load(fin)
self.param_list: List[Param] = []
for param_yml in obj["param_list"]:
param = Param()
param.name = param_yml["name"]
param.type = Type.construct(param_yml["type"])
if param.type == Type.FLOAT:
param.range_from = float(param_yml["range_from"])
param.range_to = float(param_yml["range_to"])
param.scale = Scale.construct(param_yml["scale"])
elif param.type == Type.STRING:
param.value = param_yml["value"]
elif param.type == Type.DATASET:
param.is_redirect = param_yml["is_redirect"]
param.data_dir = pathlib.Path(param_yml["data_dir"])
param.filename = param_yml["filename"]
else:
param.range_from = int(param_yml["range_from"])
param.range_to = int(param_yml["range_to"])
param.scale = Scale.construct(param_yml["scale"])
self.param_list.append(param)
self.dataset_size = obj["dataset_size"]
self.number_of_iteration = int(obj["number_of_iteration"])
self.exec_path = obj["exec_path"]
self.parallel_job_size = obj["parallel_job_size"]
self.direction = "minimize" if obj["minimize"] else "maximize"
self._validate()
| [
"yaml.safe_load",
"pathlib.Path"
] | [((1141, 1155), 'pathlib.Path', 'pathlib.Path', ([], {}), '()\n', (1153, 1155), False, 'import pathlib\n'), ((1798, 1817), 'yaml.safe_load', 'yaml.safe_load', (['fin'], {}), '(fin)\n', (1812, 1817), False, 'import yaml\n'), ((2559, 2594), 'pathlib.Path', 'pathlib.Path', (["param_yml['data_dir']"], {}), "(param_yml['data_dir'])\n", (2571, 2594), False, 'import pathlib\n')] |
import pathlib
from setuptools import setup
from setuptools import find_packages, setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "wrappers/README.rst").read_text()
# This call to setup() does all the work
setup(
name="python-mano-wrappers",
version="1.1.1",
description="REST API Wrappers for various MANOs in compliance with ETSI SOL0005",
long_description=README,
long_description_content_type="text/x-rst",
url="https://github.com/CN-UPB/python-mano-wrappers",
author="PG-SCrAMbLE Team WP3",
author_email="<EMAIL>",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=find_packages(exclude=["tests"]),
install_requires=["requests"],
) | [
"setuptools.find_packages",
"pathlib.Path"
] | [((133, 155), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (145, 155), False, 'import pathlib\n'), ((768, 800), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (781, 800), False, 'from setuptools import find_packages, setup\n')] |
"""This module contains the group of functions which carry out the pocess of
separate a sequence of spikes in bursts.
They share as input the sequence of spikes detected by some algorithm of spike
detection and returns a list of arrays which contains the times in which spikes
exists.
"""
import numpy as np
import math
def general_burst_detection(spks, method='', kwargs={}):
"""Burst detection is the problem of group the spikes into bursts
considering temporal information and temporal density. Usually is done with
spikes produced for the same element, but in this package we consider the
general situation of many elements.
Parameters
----------
spks: pandas.DataFrame
spikes format with columns: 'times', 'neuron', 'regime', 'descriptor'
method: str, optional
which method use. Depending of the selected method we have to input
the required variables.
kwargs: dict
parameters needed for the called functions selected by the method
parameter.
Returns
-------
bursts: list of lists
active times grouped by bursts.
"""
possible_methods = ['dummy']
method = method if method in possible_methods else ''
if method == 'dummy':
bursts = dummy_burst_detection(spks, **kwargs)
return bursts
def dummy_burst_detection(spks, t_max):
"""This algorithm works under the supposition that all the bursts are
separated at least by a gap with lenght which is a upper bound of the
bursts length.
Parameters
----------
spks: array-like, shape (Ns, variables)
the spikes descriptions.
t_max: int
lower-bound of gaps beween bursts or upper bound of bursts length.
It is expressed in index units.
Returns
-------
bursts: list of arrays
the active times in each burst.
"""
utimes = np.unique(spks[:, 0])
gaps = np.diff(utimes)
edges = np.where(gaps >= t_max)[0] + 1
edges = np.hstack([[0], edges, [utimes.shape[0]]])
bursts = []
for i in range(edges.shape[0]-1):
bursts.append(utimes[edges[i]:edges[i+1]])
return bursts
############################## Under supervision ##############################
###############################################################################
def kleinberg_burst_detection(spks, s=2, gamma=1):
import pybursts
utimes = list(spks[:, 0])
bursts = pybursts.kleinberg(utimes, s, gamma)
return bursts
############################ Under implementation #############################
###############################################################################
def kleinberg(spks, s=2, gamma=1):
# Control of inputs
if s <= 1:
raise ValueError("s must be greater than 1!")
if gamma <= 0:
raise ValueError("gamma must be positive!")
utimes = np.unique(spks[:, 0])
utimes = np.sort(utimes)
# Return bursts for only 1 time
if utimes.size == 1:
bursts = [utimes[0]]
return bursts
# Computation of gaps
gaps = np.diff(utimes)
# Computation of needed magnitudes
T = np.sum(gaps)
n = np.size(gaps)
g_hat = T / n
k = int(math.ceil(float(1+math.log(T, s) + math.log(1/np.amin(gaps), s))))
gamma_log_n = gamma * math.log(n)
alpha_function = np.vectorize(lambda x: s ** x / g_hat)
alpha = alpha_function(np.arange(k))
# Definition complementary functions
def tau(i, j):
if i >= j:
return 0
else:
return (j - i) * gamma_log_n
def f(j, x):
return alpha[j] * math.exp(-alpha[j] * x)
# Intialization of C (?)
C = np.repeat(float("inf"), k)
C[0] = 0
q = np.empty((k, 0))
for t in range(n):
C_prime = np.repeat(float("inf"), k)
q_prime = np.empty((k, t+1))
q_prime.fill(np.nan)
for j in range(k):
cost_function = np.vectorize(lambda x: C[x] + tau(x, j))
cost = cost_function(np.arange(0, k))
el = np.argmin(cost)
if f(j, gaps[t]) > 0:
C_prime[j] = cost[el] - math.log(f(j, gaps[t]))
if t > 0:
q_prime[j, :t] = q[el, :]
q_prime[j, t] = j + 1
C = C_prime
q = q_prime
j = np.argmin(C)
q = q[j, :]
prev_q = 0
N = 0
for t in range(n):
if q[t] > prev_q:
N = N + q[t] - prev_q
prev_q = q[t]
bursts = np.array([np.repeat(np.nan, N), np.repeat(utimes[0], N),
np.repeat(utimes[0], N)], ndmin=2,
dtype=object).transpose()
burst_counter = -1
prev_q = 0
stack = np.repeat(np.nan, N)
stack_counter = -1
for t in range(n):
if q[t] > prev_q:
num_levels_opened = q[t] - prev_q
for i in range(int(num_levels_opened)):
burst_counter += 1
bursts[burst_counter, 0] = prev_q + i
bursts[burst_counter, 1] = utimes[t]
stack_counter += 1
stack[stack_counter] = burst_counter
elif q[t] < prev_q:
num_levels_closed = prev_q - q[t]
for i in range(int(num_levels_closed)):
bursts[stack[stack_counter], 2] = utimes[t]
stack_counter -= 1
prev_q = q[t]
while stack_counter >= 0:
bursts[stack[stack_counter], 2] = utimes[n]
stack_counter -= 1
return bursts
| [
"numpy.repeat",
"numpy.unique",
"numpy.amin",
"numpy.hstack",
"numpy.where",
"numpy.sort",
"numpy.size",
"numpy.diff",
"pybursts.kleinberg",
"math.log",
"numpy.sum",
"numpy.empty",
"numpy.argmin",
"math.exp",
"numpy.vectorize",
"numpy.arange"
] | [((1882, 1903), 'numpy.unique', 'np.unique', (['spks[:, 0]'], {}), '(spks[:, 0])\n', (1891, 1903), True, 'import numpy as np\n'), ((1915, 1930), 'numpy.diff', 'np.diff', (['utimes'], {}), '(utimes)\n', (1922, 1930), True, 'import numpy as np\n'), ((1987, 2029), 'numpy.hstack', 'np.hstack', (['[[0], edges, [utimes.shape[0]]]'], {}), '([[0], edges, [utimes.shape[0]]])\n', (1996, 2029), True, 'import numpy as np\n'), ((2431, 2467), 'pybursts.kleinberg', 'pybursts.kleinberg', (['utimes', 's', 'gamma'], {}), '(utimes, s, gamma)\n', (2449, 2467), False, 'import pybursts\n'), ((2861, 2882), 'numpy.unique', 'np.unique', (['spks[:, 0]'], {}), '(spks[:, 0])\n', (2870, 2882), True, 'import numpy as np\n'), ((2896, 2911), 'numpy.sort', 'np.sort', (['utimes'], {}), '(utimes)\n', (2903, 2911), True, 'import numpy as np\n'), ((3063, 3078), 'numpy.diff', 'np.diff', (['utimes'], {}), '(utimes)\n', (3070, 3078), True, 'import numpy as np\n'), ((3127, 3139), 'numpy.sum', 'np.sum', (['gaps'], {}), '(gaps)\n', (3133, 3139), True, 'import numpy as np\n'), ((3148, 3161), 'numpy.size', 'np.size', (['gaps'], {}), '(gaps)\n', (3155, 3161), True, 'import numpy as np\n'), ((3320, 3358), 'numpy.vectorize', 'np.vectorize', (['(lambda x: s ** x / g_hat)'], {}), '(lambda x: s ** x / g_hat)\n', (3332, 3358), True, 'import numpy as np\n'), ((3711, 3727), 'numpy.empty', 'np.empty', (['(k, 0)'], {}), '((k, 0))\n', (3719, 3727), True, 'import numpy as np\n'), ((4286, 4298), 'numpy.argmin', 'np.argmin', (['C'], {}), '(C)\n', (4295, 4298), True, 'import numpy as np\n'), ((4673, 4693), 'numpy.repeat', 'np.repeat', (['np.nan', 'N'], {}), '(np.nan, N)\n', (4682, 4693), True, 'import numpy as np\n'), ((3286, 3297), 'math.log', 'math.log', (['n'], {}), '(n)\n', (3294, 3297), False, 'import math\n'), ((3386, 3398), 'numpy.arange', 'np.arange', (['k'], {}), '(k)\n', (3395, 3398), True, 'import numpy as np\n'), ((3814, 3834), 'numpy.empty', 'np.empty', (['(k, t + 1)'], {}), '((k, t + 1))\n', (3822, 3834), True, 'import numpy as np\n'), ((1944, 1967), 'numpy.where', 'np.where', (['(gaps >= t_max)'], {}), '(gaps >= t_max)\n', (1952, 1967), True, 'import numpy as np\n'), ((3600, 3623), 'math.exp', 'math.exp', (['(-alpha[j] * x)'], {}), '(-alpha[j] * x)\n', (3608, 3623), False, 'import math\n'), ((4026, 4041), 'numpy.argmin', 'np.argmin', (['cost'], {}), '(cost)\n', (4035, 4041), True, 'import numpy as np\n'), ((3992, 4007), 'numpy.arange', 'np.arange', (['(0)', 'k'], {}), '(0, k)\n', (4001, 4007), True, 'import numpy as np\n'), ((4470, 4490), 'numpy.repeat', 'np.repeat', (['np.nan', 'N'], {}), '(np.nan, N)\n', (4479, 4490), True, 'import numpy as np\n'), ((4492, 4515), 'numpy.repeat', 'np.repeat', (['utimes[0]', 'N'], {}), '(utimes[0], N)\n', (4501, 4515), True, 'import numpy as np\n'), ((4539, 4562), 'numpy.repeat', 'np.repeat', (['utimes[0]', 'N'], {}), '(utimes[0], N)\n', (4548, 4562), True, 'import numpy as np\n'), ((3211, 3225), 'math.log', 'math.log', (['T', 's'], {}), '(T, s)\n', (3219, 3225), False, 'import math\n'), ((3239, 3252), 'numpy.amin', 'np.amin', (['gaps'], {}), '(gaps)\n', (3246, 3252), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.metrics import classification_report
from preprocessing import Preprocessor
import time
class NaiveBayes:
def __init__(self):
self.labels = set()
self.word_counts = {}
self.priors = {}
self.likelihoods = {}
def train(self, train_x, train_y):
self.labels = set(train_y)
self.word_counts = {label:{} for label in self.labels}
self.priors = {label:0 for label in self.labels}
self.likelihoods = {label:{} for label in self.labels}
self.total_words = 0
for tweet,label in zip(train_x,train_y):
self.priors[label] += 1
for word in tweet.split():
self.total_words += 1
if word in self.word_counts[label]:
self.word_counts[label][word] += 1
else:
self.word_counts[label][word] = 1
self.priors = {l:self.priors[l]/len(train_y) for l in self.labels}
self.likelihoods = {
l : {
w : (self.word_counts[l][w]+1)/(self.total_words+sum(self.word_counts[l].values())) \
for w in self.word_counts[l]
} for l in self.labels
}
def predict(self, tweet):
best = {'label':None, 'score':np.inf}
for l in self.labels:
score = -np.log(self.priors[l])
for w in tweet.split():
if w in self.likelihoods[l]:
score += -np.log(self.likelihoods[l][w])
else:
score += -np.log(1.0/(self.total_words+sum(self.word_counts[l].values())))
if score < best['score']:
best['score'],best['label'] = score,l
return best['label']
def test(self, test_x):
return [self.predict(tweet) for tweet in test_x]
def acc(self, y_preds, y_true):
return sum([yp==yt for yp,yt in zip(y_preds,y_true)])/len(y_preds)
if __name__ == "__main__":
train_options = {
"train_data_path": "data/OLIDv1.0/olid-training-v1.0_clean.tsv",
"test_tweet_path": "data/OLIDv1.0/testset-levela_clean.tsv",
"test_label_path": "data/OLIDv1.0/labels-levela.csv",
"sample_size":1,
"seed":1
}
print('='*40)
print('task B')
print('loading data...')
pp = Preprocessor()
train_x, train_y = pp.get_train_data(train_options["train_data_path"],
sample=train_options['sample_size'],
seed=train_options['seed'],
task='subtask_a')
test_x, test_y = pp.get_test_data(train_options['test_tweet_path'],
train_options['test_label_path'])
print('='*40)
print()
print('='*40)
print('training model...')
start = time.time()
model = NaiveBayes()
model.train(train_x=train_x, train_y=train_y)
end = time.time()
print(f'took {round(end-start,2)}s')
print('predicting...')
start = time.time()
preds = model.test(test_x=test_x)
end = time.time()
print(f'took {round(end-start,2)}s')
print('='*40)
print()
results = classification_report(y_true=test_y, y_pred=preds, output_dict=True, digits=4)
print('='*40)
print('testing')
print(f'accuracy: \t{model.acc(preds,test_y)}')
print(f'precision: \t{results["macro avg"]["precision"]}')
print(f'recall: \t{results["macro avg"]["recall"]}')
print(f'F1-score: \t{results["macro avg"]["f1-score"]}')
print('='*40)
print()
# ========================================
# task A
# loading data...
# cleaning data...
# took 0.26s
# ========================================
# ========================================
# training model...
# took 2.8s
# predicting...
# took 0.36s
# ========================================
# ========================================
# testing
# accuracy: 0.7395348837209302
# precision: 0.7725511898173769
# recall: 0.5397177419354838
# F1-score: 0.5019184825888655
# ========================================
| [
"sklearn.metrics.classification_report",
"preprocessing.Preprocessor",
"numpy.log",
"time.time"
] | [((2379, 2393), 'preprocessing.Preprocessor', 'Preprocessor', ([], {}), '()\n', (2391, 2393), False, 'from preprocessing import Preprocessor\n'), ((2919, 2930), 'time.time', 'time.time', ([], {}), '()\n', (2928, 2930), False, 'import time\n'), ((3016, 3027), 'time.time', 'time.time', ([], {}), '()\n', (3025, 3027), False, 'import time\n'), ((3108, 3119), 'time.time', 'time.time', ([], {}), '()\n', (3117, 3119), False, 'import time\n'), ((3168, 3179), 'time.time', 'time.time', ([], {}), '()\n', (3177, 3179), False, 'import time\n'), ((3270, 3348), 'sklearn.metrics.classification_report', 'classification_report', ([], {'y_true': 'test_y', 'y_pred': 'preds', 'output_dict': '(True)', 'digits': '(4)'}), '(y_true=test_y, y_pred=preds, output_dict=True, digits=4)\n', (3291, 3348), False, 'from sklearn.metrics import classification_report\n'), ((1370, 1392), 'numpy.log', 'np.log', (['self.priors[l]'], {}), '(self.priors[l])\n', (1376, 1392), True, 'import numpy as np\n'), ((1504, 1534), 'numpy.log', 'np.log', (['self.likelihoods[l][w]'], {}), '(self.likelihoods[l][w])\n', (1510, 1534), True, 'import numpy as np\n')] |
from collections import Counter
import spacy
nlp = spacy.load("en_core_web_md")
corpus = open("data/atis_utterances.txt", "r").read().split("\n")
all_ent_labels = []
for sentence in corpus:
doc = nlp(sentence.strip())
ents = doc.ents
all_ent_labels += [ent.label_ for ent in ents]
c = Counter(all_ent_labels)
print(c)
| [
"spacy.load",
"collections.Counter"
] | [((54, 82), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (64, 82), False, 'import spacy\n'), ((311, 334), 'collections.Counter', 'Counter', (['all_ent_labels'], {}), '(all_ent_labels)\n', (318, 334), False, 'from collections import Counter\n')] |
import honeycomb_io.core
import minimal_honeycomb
import pandas as pd
import numpy as np
import logging
logger = logging.getLogger(__name__)
def fetch_persons(
person_ids=None,
person_types=None,
names=None,
first_names=None,
last_names=None,
nicknames=None,
short_names=None,
anonymized_names=None,
anonymized_first_names=None,
anonymized_last_names=None,
anonymized_nicknames=None,
anonymized_short_names=None,
environment_id=None,
environment_name=None,
start=None,
end=None,
output_format='list',
chunk_size=100,
client=None,
uri=None,
token_uri=None,
audience=None,
client_id=None,
client_secret=None
):
if (
person_ids is not None or
person_types is not None or
names is not None or
first_names is not None or
last_names is not None or
nicknames is not None or
short_names is not None or
anonymized_names is not None or
anonymized_first_names is not None or
anonymized_last_names is not None or
anonymized_nicknames is not None or
anonymized_short_names is not None
):
query_list = list()
if person_ids is not None:
query_list.append(
{'field': 'person_id', 'operator': 'CONTAINED_BY', 'values': person_ids}
)
if person_types is not None:
query_list.append(
{'field': 'person_type', 'operator': 'CONTAINED_BY', 'values': person_types}
)
if names is not None:
query_list.append(
{'field': 'name', 'operator': 'CONTAINED_BY', 'values': names}
)
if first_names is not None:
query_list.append(
{'field': 'first_name', 'operator': 'CONTAINED_BY', 'values': first_names}
)
if last_names is not None:
query_list.append(
{'field': 'last_name', 'operator': 'CONTAINED_BY', 'values': last_names}
)
if nicknames is not None:
query_list.append(
{'field': 'nickname', 'operator': 'CONTAINED_BY', 'values': nicknames}
)
if short_names is not None:
query_list.append(
{'field': 'short_name', 'operator': 'CONTAINED_BY', 'values': short_names}
)
if anonymized_names is not None:
query_list.append(
{'field': 'anonymized_name', 'operator': 'CONTAINED_BY', 'values': nanonymized_ames}
)
if anonymized_first_names is not None:
query_list.append(
{'field': 'anonymized_first_name', 'operator': 'CONTAINED_BY', 'values': anonymized_first_names}
)
if anonymized_last_names is not None:
query_list.append(
{'field': 'anonymized_last_name', 'operator': 'CONTAINED_BY', 'values': anonymized_last_names}
)
if nicknames is not None:
query_list.append(
{'field': 'anonymized_nickname', 'operator': 'CONTAINED_BY', 'values': anonymized_nicknames}
)
if anonymized_short_names is not None:
query_list.append(
{'field': 'anonymized_short_name', 'operator': 'CONTAINED_BY', 'values': anonymized_short_names}
)
return_data = [
'person_id',
'person_type',
'name',
'first_name',
'last_name',
'nickname',
'short_name',
'anonymized_name',
'anonymized_first_name',
'anonymized_last_name',
'anonymized_nickname',
'anonymized_short_name',
'transparent_classroom_id',
{'assignments': [
'assignment_id',
'start',
'end',
{'environment': [
'environment_id',
'name'
]}
]}
]
logger.info('Fetching persons with specified person characteristics')
persons=honeycomb_io.core.search_objects(
object_name='Person',
query_list=query_list,
return_data=return_data,
chunk_size=chunk_size,
client=client,
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
logger.info('Fetched {} persons with specified person characteristics'.format(
len(persons)
))
logger.info('Filtering based on specified assignment characteristics')
filtered_persons = list(filter(
lambda person: len(honeycomb_io.environments.filter_assignments(
assignments=person.get('assignments', []),
environment_id=environment_id,
environment_name=environment_name,
start=start,
end=end
)) > 0,
persons
))
logger.info('Found {} persons with specified assignment characteristics'.format(
len(filtered_persons)
))
return_list = filtered_persons
else:
# No person characteristics were specified, so we search assignments instead
if environment_id is None:
if environment_name is not None:
logger.info('Fetching environment ID for environment name \'{}\''.format(
environment_name
))
environment_id = honeycomb_io.fetch_environment_id(
environment_name=environment_name,
client=client,
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
query_list = list()
if environment_id is not None:
query_list.append(
{'field': 'environment', 'operator': 'EQ', 'value': environment_id}
)
if start is not None:
query_list.append(
{'operator': 'OR', 'children': [
{'field': 'end', 'operator': 'ISNULL'},
{'field': 'end', 'operator': 'GTE', 'value': honeycomb_io.utils.to_honeycomb_datetime(start)}
]}
)
if end is not None:
query_list.append(
{'field': 'start', 'operator': 'LTE', 'value': honeycomb_io.utils.to_honeycomb_datetime(end)}
)
if query_list is None:
logger.warn('No criteria specified for person search. Returning no persons')
return list()
query_list.append(
{'field': 'assigned_type', 'operator': 'EQ', 'value': 'PERSON'}
)
return_data=[
'assignment_id',
'start',
'end',
{'environment': [
'environment_id',
'name'
]},
{'assigned': [
{'... on Person': [
'person_id',
'person_type',
'name',
'first_name',
'last_name',
'nickname',
'short_name',
'anonymized_name',
'anonymized_first_name',
'anonymized_last_name',
'anonymized_nickname',
'anonymized_short_name',
'transparent_classroom_id'
]}
]}
]
assignments = honeycomb_io.core.search_objects(
object_name='Assignment',
query_list=query_list,
return_data=return_data,
chunk_size=chunk_size,
client=client,
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
person_dict = dict()
for assignment in assignments:
person_id = assignment.get('assigned').get('person_id')
if person_id not in person_dict.keys():
person = assignment.get('assigned')
assignment = {
'assignment_id': assignment.get('assignment_id'),
'start': assignment.get('start'),
'end': assignment.get('end'),
'environment': assignment.get('environment')
}
person['assignments'] = [assignment]
person_dict[person_id] = person
else:
assignment = {
'assignment_id': assignment.get('assignment_id'),
'start': assignment.get('start'),
'end': assignment.get('end'),
'environment': assignment.get('environment')
}
person_dict[person_id]['assignments'].append(assignment)
persons = list(person_dict.values())
return_list = persons
if output_format =='list':
return return_list
elif output_format == 'dataframe':
return generate_person_dataframe(return_list)
else:
raise ValueError('Output format {} not recognized'.format(output_format))
def generate_person_dataframe(
persons
):
if len(persons) == 0:
persons = [dict()]
flat_list = list()
for person in persons:
flat_list.append({
'person_id': person.get('person_id'),
'person_type': person.get('person_type'),
'name': person.get('name'),
'first_name': person.get('first_name'),
'last_name': person.get('last_name'),
'nickname': person.get('nickname'),
'short_name': person.get('short_name'),
'anonymized_name': person.get('anonymized_name'),
'anonymized_first_name': person.get('anonymized_first_name'),
'anonymized_last_name': person.get('anonymized_last_name'),
'anonymized_nickname': person.get('anonymized_nickname'),
'anonymized_short_name': person.get('anonymized_short_name'),
'transparent_classroom_id': person.get('transparent_classroom_id')
})
df = pd.DataFrame(flat_list, dtype='string')
df.set_index('person_id', inplace=True)
return df
# Used by:
# process_pose_data.local_io (wf-process-pose-data)
def fetch_person_info(
environment_id,
client=None,
uri=None,
token_uri=None,
audience=None,
client_id=None,
client_secret=None
):
client = honeycomb_io.core.generate_client(
client=client,
uri=uri,
token_uri=token_uri,
audience=audience,
client_id=client_id,
client_secret=client_secret
)
result = client.bulk_query(
request_name='findAssignments',
arguments={
'environment': {
'type': 'ID',
'value': environment_id
},
'assigned_type': {
'type': 'AssignableTypeEnum',
'value': 'PERSON'
},
},
return_data=[
'assignment_id',
{'assigned': [
{'... on Person': [
'person_id',
'name',
'short_name',
'anonymized_name',
'anonymized_short_name'
]}
]}
],
id_field_name='assignment_id'
)
data_list = list()
for assignment in result:
data_list.append({
'person_id': assignment.get('assigned', {}).get('person_id'),
'name': assignment.get('assigned', {}).get('name'),
'short_name': assignment.get('assigned', {}).get('short_name'),
'anonymized_name': assignment.get('assigned', {}).get('anonymized_name'),
'anonymized_short_name': assignment.get('assigned', {}).get('anonymized_short_name')
})
person_info_df = pd.DataFrame(data_list)
person_info_df.set_index('person_id', inplace=True)
return person_info_df
| [
"logging.getLogger",
"pandas.DataFrame"
] | [((114, 141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (131, 141), False, 'import logging\n'), ((10392, 10431), 'pandas.DataFrame', 'pd.DataFrame', (['flat_list'], {'dtype': '"""string"""'}), "(flat_list, dtype='string')\n", (10404, 10431), True, 'import pandas as pd\n'), ((12162, 12185), 'pandas.DataFrame', 'pd.DataFrame', (['data_list'], {}), '(data_list)\n', (12174, 12185), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 31 21:53:09 2019
@author: Louis
"""
import numpy as np
class mass2mol:
"""
A Python implementation of the algorithm developed by <NAME> and <NAME> (A Fast and Simple Algorithm for the Money Changing Problem, Algorithmica, 2007, 48, 413–432)
The mass2mol object generated the extended residue table for CHNOPS with a blowup factor of 10^5 upon initialisation.
The combinations of CHNOPS atoms which have the same monoisotopic masses as a given mass can then be calculated using the find_formula method.
"""
def __gcd(self, a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, a%b
return a
def __lcm(self, a, b):
"""
Calculate the least common multiple of a and b.
"""
return abs(a * b)//self.__gcd(a, b)
def __find_all(self, M, i, c, output, ppm):
"""
Find all decompositions using the extended residue table
"""
if i == 0:
c[0] = M//self.a[0]
output.append((c[:], ppm))
return
LCM = self.__lcm(self.a[0], self.a[i])
l = LCM//self.a[i]
for j in range(l):
c[i] = j
m = M - j * self.a[i]
r = m % self.a[0]
lbound = self.ERT[r, i-1]
while (m >= lbound) and (lbound != -1) :
self.__find_all(m, i-1, c, output, ppm)
m = m - LCM
c[i] = c[i] + l
return
def __gen_ERT(self):
"""
Generate extended residue table
"""
k = len(self.a)
self.ERT = -np.ones((self.a[0], k), dtype='int64') # Used -1 in place of infinities: requires more computational steps to check this. Better to use unsigned int - 1?
self.ERT[0, ] = 0
for i in range(1, k):
self.ERT[:,i] = self.ERT[:, i-1]
d = self.__gcd(self.a[0], self.a[i])
for p in range(d):
n = [self.ERT[q, i-1] for q in range(self.a[0]) if (q % d == p and self.ERT[q, i-1] != -1)]
if n == []:
n = -1
else:
n = min(n)
for x in range(1, self.a[0]//d):
n = n + self.a[i]
r = n % self.a[0]
if self.ERT[r, i-1] != -1:
if n < self.ERT[r, i-1]:
self.ERT[r, i] = n
else:
n = self.ERT[r, i-1]
else:
self.ERT[r, i] = n
def __init__(self, ppm=5):
"""
Initialise mass2mol object with masses of CHNOPS and blowup factor.
Generate extended residue table for these masses.
"""
self.a = [100783, 1200000, 1400307, 1599492, 3097376, 3197207]
self.blowup = 100000
self.alphabet = 'HCNOPS'
self.storage = []
self.ppm = ppm
self.__gen_ERT()
def __formula(self, x):
"""
Function to generate chemical formula string from list of numbers of atoms.
"""
output = ''
if x[1] != 0:
output = output + 'C' + str(x[1])
if x[0]!= 0:
output = output + 'H' + str(x[0])
for i in range(2,len(x)):
if x[i] != 0:
output = output + self.alphabet[i] + str(x[i])
return output
def find_formula(self, mass):
"""
Function to calculate all possible combinations of atoms which have a combined mass equal to the given mass, within a certain error range.
Returns a list of tuples consisting of the chemical formula followed by the difference from the given mass in parts per million.
"""
mass = int(mass * self.blowup)
error = int(mass/1000000 * self.ppm)
output = []
for m in range(mass-error, mass + error +1):
ppm = (m - mass)/mass * 1000000
self.__find_all(m, len(self.a)-1, [0]*len(self.a), output, ppm)
formula_list = []
for x in output:
formula_list.append((self.__formula(x[0]), x[1]))
return formula_list | [
"numpy.ones"
] | [((1904, 1942), 'numpy.ones', 'np.ones', (['(self.a[0], k)'], {'dtype': '"""int64"""'}), "((self.a[0], k), dtype='int64')\n", (1911, 1942), True, 'import numpy as np\n')] |
"""
author: <EMAIL>
last edit: Oct 21, 2010
Python directory changer.
This script displays a directory tree. By single clicking on a '+' or '-' sign
one can expand/collapse an item. Double clicking on an item itself,
if that item is a directory, results in the script printing its location to
standard output, and then exiting.
The idea is to create a script to start a graphical directory browser
from a terminal window and switch the terminal it is started from
to the new directory.
Since a script cannot change the environment of its parent we need a
little trick to make this work.
Create an alias (for example in .bashrc or in .bash_aliases) like this:
alias pycd='python ~/pycd.py > ~/pycd.out && . ~/pycd.out'
(This alias expects pycd.py to be in the user's home directory, so put
it there or adapt the alias)
Now typing 'pycd' in a terminal (without the quotes) opens the browser
and redirects the script's output to the file 'pycd.out', and then (after the
script has exited) executes the cd command in that file, in the shell.
The result is, we end up in the desired directory.
The script needs the TreeWidget from idle, so one should
install idle:
sudo aptitude install idle
(or equivalent)
If necessary, adjust the sys.path.append line, to make it point
to your idlelib location.
"""
import sys
import os
from string import replace
from Tkinter import *
sys.path.append(r'/usr/lib/python2.6/idlelib')
from TreeWidget import FileTreeItem, TreeNode, ScrolledCanvas
class MyFileTreeItem(FileTreeItem):
def GetSubList(self):
try:
names = os.listdir(self.path)
except os.error:
return []
names.sort(lambda a, b: cmp(os.path.normcase(a).lower(), os.path.normcase(b).lower()))
sublist = []
for name in names:
item = MyFileTreeItem(os.path.join(self.path, name))
sublist.append(item)
return sublist
def OnDoubleClick(self):
if self.IsExpandable():
sys.stdout.write('cd %s' %(replace(self.path,' ','\ ')))
sys.exit()
def test():
root = Tk()
sys.exitfunc = root.quit
root.configure(bd=0, bg="yellow")
root.title("terminal directory changer")
root.focus_set()
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = MyFileTreeItem('/')
node = TreeNode(sc.canvas, None, item)
node.expand()
root.mainloop()
if __name__=='__main__':
test()
| [
"os.listdir",
"TreeWidget.TreeNode",
"string.replace",
"TreeWidget.ScrolledCanvas",
"os.path.join",
"sys.exit",
"os.path.normcase",
"sys.path.append"
] | [((1391, 1436), 'sys.path.append', 'sys.path.append', (['"""/usr/lib/python2.6/idlelib"""'], {}), "('/usr/lib/python2.6/idlelib')\n", (1406, 1436), False, 'import sys\n'), ((2260, 2327), 'TreeWidget.ScrolledCanvas', 'ScrolledCanvas', (['root'], {'bg': '"""white"""', 'highlightthickness': '(0)', 'takefocus': '(1)'}), "(root, bg='white', highlightthickness=0, takefocus=1)\n", (2274, 2327), False, 'from TreeWidget import FileTreeItem, TreeNode, ScrolledCanvas\n'), ((2411, 2442), 'TreeWidget.TreeNode', 'TreeNode', (['sc.canvas', 'None', 'item'], {}), '(sc.canvas, None, item)\n', (2419, 2442), False, 'from TreeWidget import FileTreeItem, TreeNode, ScrolledCanvas\n'), ((1602, 1623), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (1612, 1623), False, 'import os\n'), ((2078, 2088), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2086, 2088), False, 'import sys\n'), ((1848, 1877), 'os.path.join', 'os.path.join', (['self.path', 'name'], {}), '(self.path, name)\n', (1860, 1877), False, 'import os\n'), ((2036, 2066), 'string.replace', 'replace', (['self.path', '""" """', '"""\\\\ """'], {}), "(self.path, ' ', '\\\\ ')\n", (2043, 2066), False, 'from string import replace\n'), ((1707, 1726), 'os.path.normcase', 'os.path.normcase', (['a'], {}), '(a)\n', (1723, 1726), False, 'import os\n'), ((1736, 1755), 'os.path.normcase', 'os.path.normcase', (['b'], {}), '(b)\n', (1752, 1755), False, 'import os\n')] |
# função que faz todas as etapas
# de construção da rede
# e entrega os objetos certinho
import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil
from nltk.corpus import wordnet as wn
import builtins as B
TT=time.time()
def check(amsg="string message"):
global TT
print(amsg, time.time()-TT); TT=time.time()
B.me=[]
B.tt_=[]
B.tt=[]
B.degen=[]
B.nonenglish=[]
puncts=set(string.punctuation)
#w=open("./wordsEn.txt","r")
#w=w.read()
#WL=w.split()
WL2=k.corpus.words.words()
#w=open("./wordlist.txt","r")
this_dir, this_filename = os.path.split(__file__)
#DATA_PATH = os.path.join(this_dir, "data", "data.txt")
w=open(os.path.join(this_dir,"words.txt"),"r")
# https://raw.githubusercontent.com/dwyl/english-words/master/words.txt
w=w.read()
WL=w.split()
labelsh=("","g.","p.","i.","h.")
#WL.append("email")
#WL.append("e-mail")
#WL.append("having")
WL_=set(WL)
WLP=k.corpus.floresta.words()
WLP_=set(WLP)
stopwords=set(k.corpus.stopwords.words("english"))
stopwordsP=set(k.corpus.stopwords.words("portuguese"))
f=open(os.path.join(this_dir,"pickledir/brill_taggerT2M1"),"rb")
brill_tagger=pickle.load(f)
f.close()
DL=g.tableHelpers.dl
ME=g.tableHelpers.me
replacement_patterns = [
(r'won\'t', 'will not'),
(r'can\'t', 'can not'),
(r'i\'m', 'i am'),
(r'ain\'t', 'is not'),
(r'(\w+)\'ll', '\g<1> will'),
(r'(\w+)n\'t', '\g<1> not'),
(r'(\w+)\'ve', '\g<1> have'),
(r'(\w+)\'s', '\g<1> is'),
(r'(\w+)\'re', '\g<1> are'),
(r'(\w+)\'d', '\g<1> would')
]
class RegexpReplacer(object):
def __init__(self, patterns=replacement_patterns):
self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]
def replace(self, text):
s = text
count_=0
for (pattern, repl) in self.patterns:
(s, count) = re.subn(pattern, repl, s)
count_+=count
return s, count_
REPLACER=RegexpReplacer()
R=REPLACER.replace
def pDump(tobject,tfilename):
with open(tfilename,"wb") as f:
pickle.dump(tobject,f,-1)
class EmptyClass:
pass
class EmailStructures:
"""Class that makes all basic structures for a given email list"""
def __init__(self,list_id,n_messages,text="yes",offset=0,basedir="~/.gmane3/"):
TT=time.time()
lid=list_id; TOTAL_M=n_messages
lm=g.LoadMessages(lid,TOTAL_M,offset=offset, basedir=basedir)
ds=g.ListDataStructures(lm,text="yes")
print(lid+"{0:.2f} for loading messages and making datastructures".format(time.time()-TT)); TT=time.time()
print(lid+"{0:.2f} for data structures".format(time.time()-TT)); TT=time.time()
ts=g.TimeStatistics(ds)
print("{0:.2f} for statistics along time".format(time.time()-TT)); TT=time.time()
iN=g.InteractionNetwork(ds)
nm=g.NetworkMeasures(iN,exclude=["rich_club"])
print("{0:.2f} for network measures".format(time.time()-TT)); TT=time.time()
if nm.N < 5:
# network is too small
# so make it disposable by makeTables_
np2_ = EmptyClass()
np2_.sectorialized_agents__ = [[], [], []]
else:
np2_=g.NetworkPartitioning(nm,2,"g")
del np2_.binomial
print("{0:.2f} for network partition".format(time.time()-TT)); TT=time.time()
self.structs=lm, ds, ts, iN, nm, np2_
def perc_(alist):
if type(alist) in (type([1,2]), type((2,4))):
return [100*i/sum(alist[1:]) for i in alist]
else:
return 100*alist/alist[1:].sum()
def perc(alist):
if type(alist) in (type([1,2]), type((2,4))):
return [100*i/sum(alist) for i in alist]
else:
return 100*alist/alist.sum()
def digRoot(msgid,ds):
layers=[[msgid]]
while len(layers[-1]):
layer=layers[-1]
layers+=[[]]
for mid in layer:
if mid in ds.responses.keys():
layers[-1]+=[i[0] for i in ds.responses[mid]]
return layers,len(layers)
def generalMeasures(ds,np,ts):
"""Return overall measures from list datastructures and network partitioning"""
#date1=ds.messages[ds.message_ids[0]][2].isoformat().split("T")[0]
#date2=ds.messages[ds.message_ids[-1]][2].isoformat().split("T")[0]
dt=ts.datetimes
primeira,ultima=dt[0],dt[-1]
date1=primeira.isoformat()[:-6]
date2=ultima.isoformat( )[:-6]
deltaAnos=(ultima-primeira)
deltaAnos_=deltaAnos.days/365.2425
N=ds.n_authors
Ns=[len(i) for i in np.sectorialized_agents__]
Ns_=perc(Ns)
M_=ds.n_messages-ds.n_empty
M=ds.n_messages
#Mh=sum([len(ds.author_messages[author]) for author in np.sectorialized_agents__[2]])
#Mi=sum([len(ds.author_messages[author]) for author in np.sectorialized_agents__[1]])
#Mp=sum([len(ds.author_messages[author]) for author in np.sectorialized_agents__[0]])
#Ms=[Mh,Mi,Mp][::-1]
Ms=[sum([len(ds.author_messages[i]) for i in j])
for j in np.sectorialized_agents__]
#M2=[100*i/ds.n_messages for i in Ms]
Ms_=perc(Ms)
NM=N/M
NM_=100*NM
NM_missing=M_/N
NM_missing_=100*NM_missing
NMs=[i/j if j!=0 else n.inf for i,j in zip(Ns,Ms)]
NMs_=perc(NMs)
#idsh=[i[0] for j in np.sectorialized_agents__[2] for i in ds.author_messages[j] if ds.messages[i[0]][1]==None]
#idsi=[i[0] for j in np.sectorialized_agents__[1] for i in ds.author_messages[j] if ds.messages[i[0]][1]==None]
#idsp=[i[0] for j in np.sectorialized_agents__[0] for i in ds.author_messages[j] if ds.messages[i[0]][1]==None]
#idsh_=len(idsh)
#idsi_=len(idsi)
#idsp_=len(idsp)
#ids=[idsh_,idsi_,idsp_][::-1]
#ids_=[100*ii/Gamma for ii in ids]
#Gamma=len([i for i in ds.message_ids if ds.messages[i][1]==None])
Gammas=[sum([len([i for i in ds.author_messages[aid] if i[1]==None])
for aid in sa]) for sa in np.sectorialized_agents__]
Gammas_=perc(Gammas)
G_=[100*i/j for i,j in zip(Gammas,Ms)]
# Gammas_==ids_
#roots=[[[i for i in ds.author_messages[aid] if i[1]==None]
# for aid in sa] for sa in pr.sectorialized_agents__]
#roots_=[i for j in roots for i in j]
## *) a partir de cada uma delas, procura outras que tenham
## ela como resposta e assim por diante,
## até não achar mais resposta, guarda o número de mensagens
## encontradas
#roots__=[[[i[j][0] for j in range(len(i))] for i in rr if i] for rr in roots]
#rr=[]
roots_sectors=[]
tlength_sectors=[]
threads_sectors=[]
for setor in np.sectorialized_agents__:
roots_sector=[]
tlength_sector=[]
threads_sector=[]
for agentid in setor:
messages=ds.author_messages[agentid]
for message in messages:
if message[1]==None: # nova thread, guarda ID
roots_sector.append(message[0])
t_sector,lsector=digRoot(message[0],ds)
tlength_sector.append(lsector)
threads_sector.append(t_sector)
roots_sectors.append(roots_sector)
tlength_sectors.append(tlength_sector)
threads_sectors.append(threads_sector)
mt=[n.mean(i) for i in tlength_sectors]
st=[n.std(i) for i in tlength_sectors]
tls=[i for j in tlength_sectors for i in j]
mt_ =n.mean(tls)
st_ =n.std(tls)
mvars=("date1","date2","deltaAnos_",
"N","Ns","Ns_","M","M_","Ms","Ms_",
"NM","NM_","NMs","NMs_","NM_missing","NM_missing_",
"Gammas","Gammas_","G_",
"roots_sectors","tlength_sectors","threads_sectors",
"mt","st","tls","mt_","st_")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
#return date1,date2,N,Ns,Ns_,Ms,M2,Gamma,ids,ids_,M_,MN,MN_
def makeGeneralTable(generalMeasures_instance, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="geralInline.tex",tag=None):
gms=generalMeasures_instance
labelsh=("","g.","p.","i.","h.")
labels=(r"$N$",r"$N_{\%}$",r"$M$",r"$M_{\%}$",
r"$\Gamma$",r"$\Gamma_{\%}$",r"$\frac{\Gamma}{M}\%$",
r"$\mu(\gamma)$",r"$\sigma(\gamma)$")
N,Ns,Ns_,M,Ms,Ms_,Gammas,Gammas_,G_,mt_,mt,st_,st,deltaAnos_,date1,date2=[gms[i]
for i in ("N","Ns","Ns_","M","Ms","Ms_","Gammas","Gammas_","G_","mt_","mt","st_","st","deltaAnos_","date1","date2")]
Gamma=sum(Gammas)
data=[[N]+Ns,[100]+Ns_,[M]+Ms,[100]+Ms_,[Gamma]+Gammas,[100]+Gammas_,[100*Gamma/M]+G_,[mt_]+mt,[st_]+st]
caption=r"""Distribution of participants, messages and threads among each Erd\"os sector ({{\bf p.}} for periphery, {{\bf i.}} for intermediary,
{{\bf h.}} for hubs) in a total timespan of {:.2f} years (from {} to {}). $N$ is the number of participants, $M$ is the number of messages, $\Gamma$ is the number of threads, and $\gamma$ is the number of messages in a thread.
The \% denotes the usual `per cent' with respecto to the total quantity ($100\%$ for {{\bf g.}})
while $\mu$ and $\sigma$ denote mean and standard deviation.""".format(deltaAnos_,date1,date2)
fname_=mkName(table_dir,fname,tag)
g.tableHelpers.lTable(labels,labelsh,data,caption,fname_,"textGeral")
dl=g.tableHelpers.dl
me=g.tableHelpers.me
me(fname_[:-4],"\\bf",[(0,i) for i in range(1,5)])
dl(fname_[:-4]+"_",[1],[1],list(range(2,8,2))+[8,9])
def mkName(tdir,fname,tag):
return tdir+fname.replace(".tex","{}.tex".format(tag))
def makeText_(ds,pr):
texts=[]
msg_ids=[]
textG=""
for sector in pr.sectorialized_agents__:
texts+=[""]
msg_ids.append([])
for author in sector:
for message in ds.author_messages[author]:
mid=message[0]
text=ds.messages[mid][3]
texts[-1]+=text
textG+=text
msg_ids[-1].append(mid)
texts=[textG]+texts
foo=[REPLACER.replace(i) for i in texts]
texts_=[i[0] for i in foo]
ncontractions=[i[1] for i in foo]
return texts_,ncontractions, msg_ids
def makeText(ds,mid=None):
if not mid:
t=[ds.messages[i][3] for i in ds.message_ids]
else:
t=[ds.messages[i][3] for i in mid]
T_="\n".join(t) # todo o texto, com contracoes
T,ncontractions=REPLACER.replace(T_) # todo o texto, sem contracoes
return T, ncontractions
def medidasTokensQ_(T,lang="en"):
atime=time.time()
wtok=k.tokenize.wordpunct_tokenize(T)
wtok_=[t.lower() for t in wtok]
if lang=="en":
kw=[len(i) for i in wtok_ if i in WL_]
sw=[len(i) for i in wtok_ if i in stopwords]
else:
kw=[len(i) for i in wtok_ if i in WLP_]
sw=[len(i) for i in wtok_ if i in stopwordsP]
mvars=("kw","sw")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def medidasTokensQ(T,lang="en"):
atime=time.time()
wtok=k.tokenize.wordpunct_tokenize(T)
wtok_=[t.lower() for t in wtok]
if lang=="en":
kw=[len(i) for i in wtok_ if i in WL_]
sw=[len(i) for i in wtok_ if i in stopwords]
else:
kw=[len(i) for i in wtok_ if i in WLP_]
sw=[len(i) for i in wtok_ if i in stopwordsP]
mkw=n.mean(kw)
dkw=n.std(kw)
msw=n.mean(sw)
dsw=n.std(sw)
mvars=("mkw","dkw","msw","dsw")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def makeSentencesTable(medidasSentencas_dict, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="sentencesInline.tex",tag=None):
sms=medidasSentencas_dict
mvars=("nsents",
"Mchars_sents","Schars_sents",
"Mtoks_sents","Stoks_sents",
"Mknownw_sents","Sknownw_sents",
"Mstopw_sents","Sstopw_sents",
"Mpuncts_sents","Spuncts_sents",)
sms_=[[sms[j][i] for j in range(4)] for i in mvars]
labelsh=("","g.","p.","i.","h.")
labels=(r"$sents$",r"$sents_{\%}$",
r"$\mu_S(chars)$", r"$\sigma_S(chars)$",
r"$\mu_S(tokens)$",r"$\sigma_S(tokens)$",
r"$\mu_S(knownw)$",r"$\sigma_S(knownw)$",
r"$\mu_S(stopw)$", r"$\sigma_S(stopw)$",
r"$\mu_S(puncts)$",r"$\sigma_S(puncts)$",
)
caption=r"""Sentences sizes in each Erd\"os sector ({{\bf p.}} for periphery, {{\bf i.}} for intermediary, {{\bf h.}} for hubs)."""
#data=list(map(list, zip(*tms_)))
data=sms_
nsents=data[0]
nsents_=perc_(nsents)
data=n.array(data[1:])
data=n.vstack((nsents,nsents_,data))
fname_=mkName(table_dir,fname,tag)
g.lTable(labels,labelsh,data,caption,fname_,"textGeral")
ME(fname_[:-4],"\\bf",[(0,i) for i in range(1,5)])
DL(fname_[:-4]+"_",[1],[1],[2,4,6,8,10,12])
def makeTokenSizesTable(medidasTokens__instance, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="tokenSizesInline.tex",tag=None):
tms=medidasTokens__instance
mvars=("Mtoken","Stoken","Mknownw","Sknownw",
"Mknownw_diff","Sknownw_diff",
"Mstopw","Sstopw")
tms_=[[tms[j][i] for j in range(4)] for i in mvars]
labelsh=("","g.","p.","i.","h.")
labels=( r"$\mu(\overline{tokens})$",r"$\sigma(\overline{tokens})$",
r"$\mu(\overline{knownw})$",r"$\sigma(\overline{knownw})$",
r"$\mu(\overline{knownw \neq})$",r"$\sigma(\overline{knownw \neq})$",
r"$\mu(\overline{stopw})$",r"$\sigma(\overline{stopw})$")
caption=r"""Token sizes in each Erd\"os sector ({{\bf p.}} for periphery, {{\bf i.}} for intermediary, {{\bf h.}} for hubs)."""
#data=list(map(list, zip(*tms_)))
data=tms_
#ntoks=data[0]
#ntoks_=perc_(ntoks)
#data=n.array(data[1:])
#data=n.vstack((ntoks,ntoks_,data*100))
fname_=mkName(table_dir,fname,tag)
g.lTable(labels,labelsh,data,caption,fname_,"textGeral_")
ME(fname_[:-4],"\\bf",[(0,i) for i in range(1,5)])
DL(fname_[:-4]+"_",[1],[1],[2,4,6,8])
def makeTokensTable(medidasTokens__instance, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="tokensInline.tex",tag=None):
tms=medidasTokens__instance
mvars=("tokens",
"tokens_diff",
"knownw",
"knownw_diff",
"stopw",
"punct",
"contract")
tms_=[[tms[j][i] for j in range(4)] for i in mvars]
labelsh=("","g.","p.","i.","h.")
labels=(r"$tokens$",
r"$tokens_{\%}$",
r"$tokens \neq$",
r"$\frac{knownw}{tokens}$",
r"$\frac{knownw \neq}{knownw}$",
r"$\frac{stopw}{knownw}$",
r"$\frac{punct}{tokens}$",
r"$\frac{contrac}{tokens}$",
)
caption=r"""tokens in each Erd\"os sector ({{\bf p.}} for periphery, {{\bf i.}} for intermediary,
{{\bf h.}} for hubs)."""
#data=list(map(list, zip(*tms_)))
data=tms_
ntoks=data[0]
ntoks_=perc_(ntoks)
data=n.array(data[1:])
data=n.vstack((ntoks,ntoks_,data*100))
fname_=mkName(table_dir,fname,tag)
g.lTable(labels,labelsh,data,caption,fname_,"textGeral")
ME(fname_[:-4],"\\bf",[(0,i) for i in range(1,5)])
DL(fname_[:-4]+"_",[1],[1],[2,3,5,7,8])
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
ll=[]
for i in range(0, len(l), n):
ll.append(l[i:i+n])
return ll
def medidasSinais2_(medidas_pos_list,medidas_mensagens):
return [medidasSinais2(post,mmU)
for post,mmU in zip(medidas_pos_list,medidas_mensagens)]
def medidasSinais2(post,medidas_mensagensU):
sinal=[[i[1] for i in j] for j in post["tags"]]
sinal_=chunks([i[1] for j in post["tags"] for i in j],100)
sinais={}
sinais["adj"]=[j.count("ADJ") for j in sinal]
sinais["sub"]=[j.count("NOUN") for j in sinal]
sinais["pun"]=[j.count(".") for j in sinal]
sinais["verb"]=[j.count("VERB") for j in sinal_]
sinais["chars"]=medidas_mensagensU["toks_msgs"]
return sinais
def medidasSinais_(TS):
return [medidasSinais(T) for T in TS]
def medidasSinais(T):
wtok=k.tokenize.wordpunct_tokenize(T)
lens_tok=[len(i) for i in wtok]
lens_word=[len(i) for i in wtok if (i not in stopwords) and (i in WL_)]
lens_sent=[len(i) for i in k.sent_tokenize(T)]
mvars=("lens_tok","lens_word","lens_sent")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def ksAll(sigDict,mkeys):
l=[]
for key in mkeys:
l.append([])
for i in range(4):
l[-1].append([])
for j in range(4):
vals=g.ksStatistics.kolmogorovSmirnovDistance__(sigDict[i][key],sigDict[j][key])
l[-1][-1].append(vals)
return l
def makeKSTables(dists,table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fnames=None,tags=None,tag=None):
ldists=[]
for dists_meas in dists:
l=[]
for sect1_meas in dists_meas:
calphas=[]
dnns=[]
for sect2_val in sect1_meas:
calpha,dnn=sect2_val
calphas+=[calpha]
dnns+=[dnn]
l+=[calphas,dnns]
ldists.append(l) # new table
dists=ldists
labels=labelsh[1:]
labels_=[(l,"") for l in labels]
labels__=[i for j in labels_ for i in j]
caption="KS distances on {}."
count=0
if not fnames:
fnames=[str(i) for i in range(len(dists))]
if not tags:
tags=[str(i) for i in range(len(dists))]
for meas,fname,tag_ in zip(dists,fnames,tags):
fname_=mkName(table_dir,fname+".tex",tag)
g.lTable(labels__,labelsh,meas,caption.format(tag_),
fname_,"ksDistances")
ME(fname_[:-4],"\\bf",[(0,i) for i in range(1,5)]+[(i,0) for i in range(1,9)])
DL(fname_[:-4]+"_",[1],[1],[2,4,6,8])
# ME(fname_+"_","\\bf",,1)
def medidasTokens__(lt=("texts",),ct=("ncontractions",)):
return [medidasTokens_(i,j) for i,j in zip(lt,ct)]
def medidasTokens_(T,ncontract=None):
wtok=k.tokenize.wordpunct_tokenize(T)
wtok_=[t.lower() for t in wtok]
tokens=len(wtok) #
tokens_=set(wtok)
tokens_diff=len(tokens_)/tokens #
punct=sum([sum([tt in puncts for tt in t])==len(t) for t in wtok_])
punct/=tokens
known=[i for i in wtok_ if (i not in stopwords) and (i in WL_)]
knownw=len(known)
known_=set(known)
knownw_diff=len(known_)/knownw
stop=[i for i in wtok_ if i in stopwords]
stopw=len(stop)/knownw
knownw/=tokens
contract=ncontract/tokens
# media e desvio de tamanhos:
# tokens,
Mtoken,Stoken=mediaDesvio_(wtok_)
# known words sem stop,
Mknownw,Sknownw=mediaDesvio_(known)
# known words sem stop e sem repetição
Mknownw_diff,Sknownw_diff=mediaDesvio_(known_)
# stop words
Mstopw,Sstopw=mediaDesvio_(stop)
#stokwn=[len(i) for i in known]
#skownw=[len(i) for i in known]
mvars=("tokens","tokens_diff","punct",
"knownw","knownw_diff","stopw","contract",
"Mtoken","Stoken","Mknownw","Sknownw",
"Mknownw_diff","Sknownw_diff",
"Mstopw","Sstopw")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def medidasTokens(T):
atime=time.time()
wtok=k.tokenize.wordpunct_tokenize(T)
wtok_=[t.lower() for t in wtok]
nt=len(wtok) #
ntd=len(set(wtok)) #
# tokens que sao pontuacoes
ntp=sum([sum([tt in puncts for tt in t])==len(t) for t in wtok]) #
# known and unkown words
kw=[] #
ukw=[] #
tp=[]
sw=[]
for t in wtok_:
if t in WL_:
kw.append(t)
elif sum([tt in puncts for tt in t])==len(t):
tp.append(t)
else:
ukw.append(t)
if t in stopwords:
sw.append(t)
sw_=set(sw)
kw_=set(kw)
ukw_=set(ukw)
kwss=[i for i in kw if wn.synsets(i)] #
kwss_=set(kwss) #
# known words that does not have synsets
kwnss=[i for i in kw if i not in kwss_] #
print("MT2:", atime-time.time()); atime=time.time()
kwnss_=set(kwnss) #
# words that are stopwords
kwsw=[i for i in kw if i in stopwords] #
kwsw_=set(kwsw)
print("MT3:", atime-time.time()); atime=time.time()
# known words that are not stopwords
kwnsw=[i for i in kw if i not in stopwords] #
kwnsw_=set(kwnsw) #
# unknown words that are stopwords
ukwsw=[i for i in ukw if i in stopwords] #
print( "MT4:", atime-time.time()); atime=time.time()
# known words that return synsets and are stopwords
kwsssw=[i for i in kwss if i in stopwords] #
print("MT5:", atime-time.time()); atime=time.time()
# known words that dont return synsets and are stopwords
kwnsssw=[i for i in kwnss if i in stopwords] #
print("MT6:", atime-time.time()); atime=time.time()
# words that are known, are not stopwords and do not return synset
foo_=kwnss_.difference(stopwords)
kwnssnsw=[i for i in kw if i in foo_] #
print("MT7:", atime-time.time()); atime=time.time()
foo_=kwss_.difference(stopwords)
kwssnsw=[i for i in kw if i in foo_] #
kwssnsw_=set(kwssnsw)
print("MT8:", atime-time.time()); atime=time.time()
mvars=("nt", "ntd", "ntp","sw","sw_","kw", "kw_", "tp", "ukw", "ukw_", "kwss", "kwss_", "kwnss", "kwnss_","kwsw", "kwsw_", "kwnsw", "kwnsw_", "ukwsw", "kwsssw", "kwnsssw", "kwnssnsw", "kwssnsw","kwssnsw_")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def makeCharTable(charsMeasures_instance, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="charsInline.tex",tag=None):
cms=charsMeasures_instance
labelsh=("","g.","p.","i.","h.")
labels=(r"$chars$",
r"$chars_{\%}$",
r"$\frac{spaces}{chars}$",
r"$\frac{punct}{chars-spaces}$",
r"$\frac{digits}{chars-spaces}$",
r"$\frac{letters}{chars-spaces}$",
r"$\frac{vogals}{letters}$",
r"$\frac{uppercase}{letters}$",
)
caption=r"""Characters in each Erd\"os sector ({{\bf p.}} for periphery, {{\bf i.}} for intermediary,
{{\bf h.}} for hubs)."""
data=list(map(list, zip(*cms)))
nchars=data[0]
nchars_=perc_(nchars)
data=n.array(data[1:])
data=n.vstack((nchars,nchars_,data*100))
fname_=mkName(table_dir,fname,tag)
g.lTable(labels,labelsh,data,caption,fname_,"textGeral")
ME(fname_[:-4],"\\bf",[(0,i) for i in range(1,5)])
DL(fname_[:-4]+"_",[1],[1],[2,4,5,7,8])
def medidasLetras_(LT=["list","of","strings"]):
return [medidasLetras(i) for i in LT]
def medidasLetras(T):
# quantos caracteres
nc=len(T)
# espacos
ne=T.count(" ")
# letras (e maiusculas)
nl=sum([t.isalpha() for t in T])
nm=sum([t.isupper() for t in T])
# vogais
nv=sum([t in ("a","e","i","o","u") for t in T])
# pontuacao
np=sum([t in puncts for t in T])
# numerais
nd=sum([t.isdigit() for t in T])
return nc,ne/nc,np/(nc-ne),nd/(nc-ne),nl/(nc-ne),nv/nl,nm/nl
def mediaDesvio_(medidas):
medidas_=[len(i) for i in medidas]
return n.mean(medidas_),n.std(medidas_)
def mediaDesvio(tid="astring",adict={"stringkey":"tokens"}):
tid_=tid+"_"
toks=[len(i) for i in adict[tid]]
toks_=[len(i) for i in adict[tid_]]
mtid=n.mean(toks)
dtid=n.std(toks)
mtid_=n.mean(toks_)
dtid_= n.std(toks_)
fdict={}
fdict["m"+tid]=mtid
fdict["d"+tid]=dtid
fdict["m"+tid_]=mtid_
fdict["d"+tid_]=dtid_
return fdict
def medidasTamanhosTokens(medidas_tokens):
mdict={}
MT=medidas_tokens
mdict.update(mediaDesvio("kw",MT))
mdict.update(mediaDesvio("kwnsw",MT))
mdict.update(mediaDesvio("kwssnsw",MT))
mdict.update(mediaDesvio("kwssnsw",MT))
mdict.update(mediaDesvio("kwsw",MT))
mdict.update(mediaDesvio("sw",MT))
return mdict
def makeCorrelationTable_(measures_pca, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="correlationInline.tex",tag=None):
mp=measures_pca
cors=[i["pca"].C for i in mp]
cors_=[]
for secn in range(len(cors[0])):
for cor in cors: # cor correlation measure
cors_.append(cor[secn])
data=cors_
labels=mp[0]["vlabels"]
labelsh=[""]+labels
labels_=[(i,"","","") for i in labels]
labels__=[i for j in labels_ for i in j]
labels__[1:4]=["(p.)","(i.)","(h.)"]
caption="Pierson correlation coefficient for the topological and textual measures."
fname_=mkName(table_dir,fname,tag)
g.lTable(labels__,labelsh,data,caption,fname_,"textCorr")
# renderiza matriz como tabela
#ME(table_dir+fname[:-4],"\\bf",[(0,i) for i in range(1,5)])
nz=(n.abs(n.array(data))>.6).nonzero()
ii=nz[0]
jj=nz[1]
#pts=[(i,j) for i,j in zip(ii,jj)]
pts=[(i+1,j+1) for i,j in zip(ii,jj)]
B.thing=nz,data,pts
ME(fname_[:-4],"\\bf",pts)
DL(fname_[:-4]+"_",[1],[1],[2,3,4,
6,7,8,
10,11,12,
14,15,16,
18,19,20,
22,23,24,
26,27,28,
30,31,32,
34,35,36])
def makeCorrelationTable(correlationMatrix, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="correlationInline.tex",mvars=[]):
#mvars=[i.replace("_","") for i in mvars]
labelsh=[""]+mvars
labels=mvars
caption=r"""Correlation of textual and topological metrics."""
data=correlationMatrix*100
g.lTable(labels,labelsh,data,caption,table_dir+fname,"textCorr")
# ME(table_dir+fname[:-4],"\\bf",[(0,i) for i in range(1,5)])
# DL(table_dir+fname[:-4]+"_",[1],[1],[])
#DL(table_dir+fname[:-4],[1],[1],[],0)
def makeMessagesTable(medidasMensagens_dict, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="messagesInline.tex",tag=None):
mms=medidasMensagens_dict
mvars=("nmsgs",
"Msents_msgs","Ssents_msgs",
"Mtokens_msgs","Stokens_msgs",
"Mknownw_msgs","Sknownw_msgs",
"Mstopw_msgs","Sstopw_msgs",
"Mpuncts_msgs","Spuncts_msgs",
"Mchars_msgs","Schars_msgs",
)
mms_=[[mms[j][i] for j in range(4)] for i in mvars]
labelsh=("","g.","p.","i.","h.")
labels=(r"$msgs$",r"$msgs_{\%}$",
r"$\mu_M(sents)$", r"$\sigma_M(sents)$",
r"$\mu_M(tokens)$",r"$\sigma_M(tokens)$",
r"$\mu_M(knownw)$",r"$\sigma_M(knownw)$",
r"$\mu_M(stopw)$", r"$\sigma_M(stopw)$",
r"$\mu_M(puncts)$",r"$\sigma_M(puncts)$",
r"$\mu_M(chars)$", r"$\sigma_M(chars)$",
)
caption=r"""Messages sizes in each Erd\"os sector ({{\bf p.}} for periphery, {{\bf i.}} for intermediary, {{\bf h.}} for hubs)."""
#data=list(map(list, zip(*tms_)))
data=mms_
nmsgs=data[0]
nmsgs_=perc_(nmsgs)
data=n.array(data[1:])
data=n.vstack((nmsgs,nmsgs_,data))
fname_=mkName(table_dir,fname,tag)
g.lTable(labels,labelsh,data,caption,fname_,"textGeral")
ME(fname_[:-4],"\\bf",[(0,i) for i in range(1,5)])
DL(fname_[:-4]+"_",[1],[1],[2,4,6,8,10,12,14,16])
def medidasMensagens_(ds,msg_ids):
return [medidasMensagens(ds,mids) for mids in [None]+list(msg_ids)]
def medidasMensagens(ds,tids=None):
# TTM
if not tids:
mT=[ds.messages[i][3] for i in ds.message_ids]
else:
mT=[ds.messages[i][3] for i in tids]
tokens_msgs=[k.tokenize.wordpunct_tokenize(t) for t in mT] # tokens
knownw_msgs=[[i for i in toks if (i not in stopwords) and (i in WL_)] for toks in tokens_msgs]
stopw_msgs=[[i for i in toks if i in stopwords] for toks in tokens_msgs]
puncts_msgs=[[i for i in toks if
(len(i)==sum([(ii in puncts) for ii in i]))]
for toks in tokens_msgs] #
sents_msgs=[k.sent_tokenize(t) for t in mT] # tokens
nmsgs=len(mT)
toks_msgs=[len(i) for i in mT]
Mchars_msgs, Schars_msgs = mediaDesvio_(mT)
Mtokens_msgs, Stokens_msgs = mediaDesvio_(tokens_msgs)
Mknownw_msgs, Sknownw_msgs = mediaDesvio_(knownw_msgs)
Mstopw_msgs, Sstopw_msgs = mediaDesvio_(stopw_msgs)
Mpuncts_msgs, Spuncts_msgs = mediaDesvio_(puncts_msgs)
Msents_msgs,Ssents_msgs = mediaDesvio_(sents_msgs)
mvars=("nmsgs","toks_msgs",
"Msents_msgs","Ssents_msgs",
"Mtokens_msgs","Stokens_msgs",
"Mknownw_msgs","Sknownw_msgs",
"Mstopw_msgs","Sstopw_msgs",
"Mpuncts_msgs","Spuncts_msgs",
"Mchars_msgs","Schars_msgs",
)
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def medidasSentencas_(Ts=['list',"of","strings"]):
return [medidasSentencas(i) for i in Ts]
def medidasSentencas(T):
TS=k.sent_tokenize(T)
tokens_sentences=[k.tokenize.wordpunct_tokenize(i) for i in TS] ### Para os POS tags
knownw_sentences=[[i for i in ts if (i not in stopwords) and (i in WL_)] for ts in tokens_sentences]
stopw_sentences =[[i for i in ts if i in stopwords] for ts in tokens_sentences]
puncts_sentences=[[i for i in ts if
(len(i)==sum([(ii in puncts) for ii in i]))]
for ts in tokens_sentences] #
Mchars_sents, Schars_sents = mediaDesvio_(TS)
Mtoks_sents, Stoks_sents = mediaDesvio_(tokens_sentences)
Mknownw_sents, Sknownw_sents = mediaDesvio_(knownw_sentences)
Mstopw_sents, Sstopw_sents = mediaDesvio_(stopw_sentences)
Mpuncts_sents, Spuncts_sents = mediaDesvio_(puncts_sentences)
nsents=len(TS)
mvars=("Mchars_sents","Schars_sents",
"Mtoks_sents","Stoks_sents",
"Mknownw_sents","Sknownw_sents",
"Mstopw_sents","Sstopw_sents",
"Mpuncts_sents","Spuncts_sents","nsents",
"tokens_sentences")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def medidasTamanhosSentencas(T,medidas_tokens):
MT=medidas_tokens
############
# medidas de sentencas
TS=k.sent_tokenize(T)
# media e desvio de numero de caracteres por sentenca
tTS=[len(i) for i in TS]
mtTS=n.mean(tTS) #
dtTS=n.std(tTS) #
# media e desvio do tamanho das sentencas em tokens
sTS=[k.tokenize.wordpunct_tokenize(i) for i in TS] ### Para os POS tags
tsTS=[len(i) for i in sTS]
mtsTS=n.mean(tsTS) #
dtsTS=n.std(tsTS) #
# media e desvio do tamanho das sentencas em palavras conhecidas
kw_=MT["kw_"]
tsTSkw=[len([ii for ii in i if ii in kw_]) for i in sTS]
mtsTSkw=n.mean(tsTSkw) #
dtsTSkw=n.std(tsTSkw) #
# media e desvio do tamanho das sentencas em palavras que retornam synsets e nao sao stopwords
pv_=MT["kwssnsw_"]
tsTSpv=[len([ii for ii in i if ii in pv_]) for i in sTS]
mtsTSpv=n.mean(tsTSpv) #
dtsTSpv=n.std(tsTSpv) #
mvars=("mtTS","dtTS","mtsTS","dtsTS","mtsTSkw","dtsTSkw",
"mtsTSpv","dtsTSpv","sTS")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def medidasTamanhosMensagens(ds, tids=None):
if not tids:
mT=[ds.messages[i][3] for i in ds.message_ids]
else:
mT=[ds.messages[i][3] for i in tids]
tmT=[len(t) for t in mT] # chars
ttmT=[len(k.tokenize.wordpunct_tokenize(t)) for t in mT] # tokens
tsmT=[len(k.sent_tokenize(t)) for t in mT] # sentences
mtmT=n.mean(tmT)
dtmT=n.std(tmT)
mttmT=n.mean(ttmT)
dttmT=n.std(ttmT)
mtsmT=n.mean(tsmT)
dtsmT=n.std(tsmT)
mvars=("mtmT","dtmT","mttmT","dttmT","mtsmT","dtsmT")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def medidasPOS_(list_of_list_of_sentences_tokenized):
# [i["tokens_sentences"] for i in sent_measures]
return [medidasPOS(i) for i in list_of_list_of_sentences_tokenized]
def medidasPOS(sentences_tokenized):
"""Measures of POS tags
Receives a sequence of sentences,
each as a sequence of tokens.
Returns a set measures of POS tags,
and the tagged sentences.
Convention:
VERB - verbs (all tenses and modes)
NOUN - nouns (common and proper)
PRON - pronouns
ADJ - adjectives
ADV - adverbs
ADP - adpositions (prepositions and postpositions)
CONJ - conjunctions
DET - determiners
NUM - cardinal numbers
PRT - particles or other function words
X - other: foreign words, typos, abbreviations
. - punctuation
See "A Universal Part-of-Speech Tagset"
by <NAME>, <NAME> and <NAME>
for more details:
http://arxiv.org/abs/1104.2086"""
tags=brill_tagger.tag_sents(sentences_tokenized)
tags_=[item for sublist in tags for item in sublist]
tags__=[i[1] for i in tags_ if i[0].lower() in WL_]
htags=c.Counter(tags__)
htags__=c.OrderedDict()
if htags:
factor=100.0/sum(htags.values())
htags_={}
for i in htags.keys(): htags_[i]=htags[i]*factor
htags__=c.OrderedDict(sorted(htags_.items(), key=lambda x: -x[1]))
mvars=("htags__","tags")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def S(acounter):
return sorted(acounter.items(),key=lambda x: -x[1])
def auxWnTb(tt,level,tabfname,wn_dict_list):
tt_=[S(i) for i in tt]
labels=[i[0] for i in tt_[0][:12]]
if labels:
wms_=[[tt[i][j] for i in range(4)] for j in labels]
labels=[i.replace("_","\_") for i in labels]
if level=="root":
caption=r"""Counts for the most incident synsets at the semantic roots in each Erd\"os sector ({{\bf p.}} for periphery, {{\bf i.}} for intermediary, {{\bf h.}} for hubs). Yes.""".format(level)
else:
caption=r"""Counts for the most incident synsets {} step from the semantic roots in each Erd\"os sector ({{\bf p.}} for periphery, {{\bf i.}} for intermediary, {{\bf h.}} for hubs).""".format(level)
# normalizar este data com relação às colunas
B.me.append(wms_)
B.tt_.append(tt_)
B.tt.append(tt)
data=n.array(wms_)
data=100*data/data.sum(axis=0)
data=data[:12]
data=n.vstack((data,data.sum(axis=0)))
labels+=[r"{{\bf total}}"]
g.lTable(labels,labelsh,data,caption,tabfname,"textGeral_")
ME(tabfname[:-4],"\\bf",[(0,i) for i in range(1,5)])
DL(tabfname[:-4]+"_",[1,-3],[1])
else:
print(tabfname.split("/")[-1], "No labels:",labels,
"\nProbably no hypernyms:",
len(wn_dict_list[0]["top_hypernyms"]))
def makeWordnetTable2a(wn_dict_list, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="wnInline2a.tex"):
"""Table about the most incident roots"""
t0=[c.Counter([i[0].name() for i in j["top_hypernyms"]]) for j in wn_dict_list]
auxWnTb(t0,"root",table_dir+fname,wn_dict_list)
def makeWordnetTable2b(wn_dict_list, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="wnInline2b.tex"):
"""Table about the most incident roots"""
t1=[c.Counter([i[1].name() for i in j["top_hypernyms"] if len(i)>1]) for j in wn_dict_list]
#auxWnTb(labels,labelsh,data,level,tabfname)
auxWnTb(t1,"one",table_dir+fname,wn_dict_list)
def makeWordnetTable2c(wn_dict_list, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="wnInline2c.tex"):
"""Table about the most incident roots"""
t2=[c.Counter([i[2].name() for i in j["top_hypernyms"] if len(i)>2]) for j in wn_dict_list]
auxWnTb(t2,"two",table_dir+fname,wn_dict_list)
def makeWordnetTable2d(wn_dict_list, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="wnInline2d.tex"):
"""Table about the most incident roots"""
t3=[c.Counter([i[3].name() for i in j["top_hypernyms"] if len(i)>3]) for j in wn_dict_list]
auxWnTb(t3,"three",table_dir+fname,wn_dict_list)
def makeWordnetPOSTable(wn_dict_list, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="wnPOSInline.tex",tag=None):
wms=wn_dict_list
labels=["N","ADJ","VERB","ADV","POS","POS!"]
data=[[wms[i]["ftags"][j] for i in range(4)] for j in range(4)]
# incluir % com relação aas palavras totais etiquetadas
# variaveis [posok WL_ e posnok
data+=[[100*len(wms[i]["posok"])/len(wms[i]["WT_"]) for i in range(4)]]
data+=[[100*(len(wms[i]["posok"])/(len(wms[i]["posok"])+len(wms[i]["posnok"]))) for i in range(4)]]
caption=r"""Percentage of synsets with each of the POS tags used by Wordnet. The last lines give the percentage of words considered from all of the tokens (POS) and from the words with synset (POS!). The tokens not considered are punctuations, unrecognized words, words without synsets, stopwords and words for which Wordnet has no synset tagged with POS tags . Values for each Erd\"os sectors are in the columns {{\bf p.}} for periphery, {{\bf i.}} for intermediary, {{\bf h.}} for hubs."""
labelsh=("","g.","p.","i.","h.")
fname_=mkName(table_dir,fname,tag)
g.lTable(labels,labelsh,data,caption,fname_,"textGeral_")
ME(fname_[:-4],"\\bf",[(0,i) for i in range(1,5)])
DL(fname_[:-4]+"_",[1,-4],[1])
def medidasWordnet2_POS(wn_measures,poss=("n","as","v","r")):
wn_measures2={}
for pos in poss:
wn_measures2[pos]=g.textUtils.medidasWordnet2_(wn_measures,pos)
return wn_measures2
def makeWordnetTables2_POS(wn_dict_pos, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="wnPOSInline2",poss=("n","as","v","r"),tag=None):
TDIR=table_dir
for pos in poss:
wn_measures2=wn_dict_pos[pos]
g.textUtils.makeWordnetTable( wn_measures2,TDIR ,fname="{}-{}-{}tag.tex". format(fname,pos,tag)) # medias e desvios das incidencias dos atributos
g.textUtils.makeWordnetTable2a(wn_measures2,TDIR, fname="{}a-{}-{}tag.tex".format(fname,pos,tag)) # contagem dos synsets raiz
g.textUtils.makeWordnetTable2b(wn_measures2,TDIR, fname="{}b-{}-{}tag.tex".format(fname,pos,tag)) # contagem dos synsets raiz
g.textUtils.makeWordnetTable2c(wn_measures2,TDIR, fname="{}c-{}-{}tag.tex".format(fname,pos,tag)) # contagem dos synsets raiz
g.textUtils.makeWordnetTable2d(wn_measures2,TDIR, fname="{}d-{}-{}tag.tex".format(fname,pos,tag)) # contagem dos synsets raiz
# make one file from all 20 (max) tables
names="{}-{}_.tex","{}a-{}_.tex","{}b-{}_.tex","{}c-{}_.tex","{}d-{}_.tex"
tx=""
for pos in poss:
tx+="\n\n% POS -> "+pos
for name in names:
name_=TDIR+name.format(fname,pos,tag)
if os.path.isfile(name_):
tx+="\n% fname -> "+name_+"\n"
tx+=open(name_).read()
g.writeTex(tx,TDIR+fname+".tex")
def makeWordnetTable(wn_dict_list, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="wnInline.tex"):
wms=wn_dict_list
mvars=("mmind","dmind",
"mmaxd","dmaxd",
"mnhol_","dnhol_",
"mnmer_","dnmer_",
"mndomains","dndomains",
"mnsimilar","dnsimilar",
"mnverb_groups","dnverb_groups",
"mnlemmas","dnlemmas",
"mnentailments","dnentailments",
"mnhypo_","dnhypo_",
"mnhyper_","dnhyper_",
)
wms_=[[wms[j][i] for j in range(4)] for i in mvars]
labelsh=("","g.","p.","i.","h.")
labels=("$\mu(min\,depth)$","$\sigma(min\,depth)$",
"$\mu(max\,depth)$",r"$\sigma(max\,depth)$",
"$\mu(holonyms)$", "$\sigma(holonyms)$",
"$\mu(meronyms)$", "$\sigma(meronyms)$",
"$\mu(domains)$", "$\sigma(domains)$",
"$\mu(similar)$", "$\sigma(similar)$",
"$\mu(verb\,groups)$","$\sigma(verb\,groups)$",
"$\mu(lemmas)$", "$\sigma(lemmas)$",
"$\mu(entailments)$", "$\sigma(entailments)$",
"$\mu(hyponyms)$", "$\sigma(hyponyms)$",
"$\mu(hypernyms)$", "$\sigma(hypernyms)$",
)
caption=r"""Measures of wordnet features in each Erd\"os sector ({{\bf p.}} for periphery, {{\bf i.}} for intermediary, {{\bf h.}} for hubs)."""
data=wms_
g.lTable(labels,labelsh,data,caption,table_dir+fname,"textGeral_")
ME(table_dir+fname[:-4],"\\bf",[(0,i) for i in range(1,5)])
DL(table_dir+fname[:-4]+"_",[1],[1],[2,4,6,8,10,12,14,16,18,20,22])
def makePOSTable(posMensagens_dict, table_dir="/home/r/repos/artigoTextoNasRedes/tables/",fname="posInline.tex",tag=None):
pms=posMensagens_dict
# pms_=[list(i["htags__"].items()) for i in pms]
#mvars=[list(i["htags__"].keys()) for i in pms]
#mvars=list(pms[0]["htags__"].keys())
mvars=['NOUN', 'X', 'ADP', 'DET', 'VERB', 'ADJ', 'ADV', 'PRT', 'PRON', 'NUM', 'CONJ',"."]
pms__=[[pms[j]["htags__"][i] if (i in pms[j]["htags__"].keys()) else 0 for j in range(4)] for i in mvars]
labelsh=("","g.","p.","i.","h.")
labels=mvars[:-1]+["PUNC"]
caption=r"""POS tags in each Erd\"os sector ({{\bf p.}} for periphery, {{\bf i.}} for intermediary, {{\bf h.}} for hubs).
Universal POS tags~\cite{{petrov}}:
VERB - verbs (all tenses and modes);
NOUN - nouns (common and proper);
PRON - pronouns;
ADJ - adjectives;
ADV - adverbs;
ADP - adpositions (prepositions and postpositions);
CONJ - conjunctions;
DET - determiners;
NUM - cardinal numbers;
PRT - particles or other function words;
X - other: foreign words, typos, abbreviations;
PUNCT - punctuation.
"""
#data=list(map(list, zip(*tms_)))
data=pms__
#nmsgs=data[0]
#nmsgs_=perc_(nmsgs)
#data=n.array(data[1:])
#data=n.vstack((nmsgs,nmsgs_,data))
fname_=mkName(table_dir,fname,tag)
g.lTable(labels,labelsh,data,caption,fname_,"textGeral_")
ME(fname_[:-4],"\\bf",[(0,i) for i in range(1,5)])
DL(fname_[:-4]+"_",[1],[1],[2,4,7,9,10,11,12])
def filtro(wt_):
# faz separação dos tokens para analise com wordnet
sword_sem_synset=[]
sword_com_synset=[]
word_com_synset=[]
word_sem_synset=[]
pontuacao=[]
token_exotico=[]
for wt in wt_:
ss=wn.synsets(wt[0])
if ss:
if wt[0] in stopwords:
sword_com_synset.append(wt)
else:
word_com_synset.append((wt[0],wt[1],ss))
#elif wt[0] in puncts:
elif sum([tt in puncts for tt in wt[0]])==len(wt[0]):
pontuacao.append(wt)
elif wt[0] in stopwords:
sword_sem_synset.append(wt)
elif wt[0] in WL_:
word_sem_synset.append(wt)
else:
token_exotico.append(wt)
mvars=("sword_sem_synset","sword_com_synset","word_com_synset","word_sem_synset","pontuacao","token_exotico")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def traduzPOS(astring):
if astring in ("NOUN","NNS","NN","NUM"):
return wn.NOUN
elif astring in ("VERB","VBG"):
return wn.VERB
elif astring in ("ADJ","JJ","ADP"):
return wn.ADJ+wn.ADJ_SAT
elif astring in ("ADV","RB","PRT"):
return wn.ADV
else:
return "NOPOS"
def medidasWordnet_(list_words_with_pos_tags):
return [medidasWordnet(i) for i in list_words_with_pos_tags]
def medidasWordnet2_(list_wn_stuff,pos):
return [medidasWordnet2(i,pos) for i in list_wn_stuff]
def medidasWordnet(words_with_pos_tags):
WT=words_with_pos_tags
WT_=[(i[0].lower(),i[1]) for j in WT for i in j]
wlists=filtro(WT_)
wl=wlists["word_com_synset"]
posok=[]
posnok=[]
for ww in wl:
pos = traduzPOS(ww[1])
ss=ww[2]
# procura nos nomes dos synsets o pos e numeracao mais baixa
poss=[i.pos() for i in ss]
fposs=[pp in pos for pp in poss]
if sum(fposs):
tindex=fposs.index(True)
posok.append((ww[0],ss[tindex]))
else:
posnok.append(ww)
# estatísticas sobre posok
# quais as tags?
posok_=[i[1].pos() for i in posok]
ftags_=[100*posok_.count(i)/len(posok_) for i in ('n', 's','a', 'r', 'v')]
ftags=ftags_[0:2]+ftags_[3:]
ftags[1]+=ftags_[2]
mvars=("WT_","wlists","posok","posnok","ftags")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def medidasWordnet2(wndict,pos=None):
"""pos={'r', 'as', 'n', 'v'}"""
sss=wndict["posok"]
if pos:
sss_=[i[1] for i in sss if i[1].pos() in pos]
else:
sss_=[i[1] for i in sss]
hyperpaths=[i.hypernym_paths() for i in sss_]
top_hypernyms=[i[0][:4] for i in hyperpaths] # fazer histograma por camada
lexnames=[i.lexname().split(".")[-1] for i in sss_] # rever
mhol=[len(i.member_holonyms()) for i in sss_]
phol=[len(i.part_holonyms()) for i in sss_]
shol=[len(i.substance_holonyms()) for i in sss_]
nhol_=[mhol[i]+phol[i]+shol[i] for i in range(len(sss_))] ###
mmer=[len(i.member_meronyms()) for i in sss_] #
pmer=[len(i.part_meronyms()) for i in sss_]
smer=[len(i.substance_meronyms()) for i in sss_]
nmer_=[mmer[i]+pmer[i]+smer[i] for i in range(len(sss_))] ###
nlemmas=[len(i.lemmas()) for i in sss_] ###
nhyperpaths=[len(i) for i in hyperpaths]
shyperpaths=[len(i) for j in hyperpaths for i in j]
nentailments=[len(i.entailments()) for i in sss_]
nhypernyms=[len(i.hypernyms()) for i in sss_]
nihypernyms=[len(i.instance_hypernyms()) for i in sss_]
nhyper_=[nhypernyms[i]+nihypernyms[i] for i in range(len(sss_))]
nhypo=[len(i.hyponyms()) for i in sss_] ###
nihypo=[len(i.instance_hyponyms()) for i in sss_]
nhypo_=[nhypo[i]+nihypo[i] for i in range(len(sss_))]
maxd=[i.max_depth() for i in sss_] ###
mind=[i.min_depth() for i in sss_] ###
nregion_domains=[len(i.region_domains()) for i in sss_] #
ntopic_domains= [len(i.topic_domains()) for i in sss_]
nusage_domains= [len(i.usage_domains()) for i in sss_]
ndomains=[nregion_domains[i]+ntopic_domains[i]+nusage_domains[i]
for i in range(len(sss_))] ###
nsimilar=[ len(i.similar_tos()) for i in sss_]
nverb_groups=[len(i.verb_groups()) for i in sss_]
mvars=list(locals().keys()); mvars.remove("wndict")
mvars_=mvars[:]
mvars_.remove("sss_"); mvars_.remove("sss");
mvars_.remove("top_hypernyms")
mvars_.remove("pos")
mvars_.remove("hyperpaths"); mvars_.remove("lexnames")
vdict={}
#mvars=("nmero_part",)
locals_=locals()
for mvar in mvars:
if mvar not in mvars_:
vdict[mvar] = locals_[mvar]
else:
vdict["m"+mvar]=n.mean(locals_[mvar])
vdict["d"+mvar]=n.std(locals_[mvar])
return vdict
def medidasParticipante(dict_auth_text):
medidas_autor={}
for author in dict_auth_text:
text=dict_auth_text[author]
if text:
text_,ncontract=R(text)
medidas=medidasSentencas(text_)
medidas2=medidasPOS(medidas["tokens_sentences"])
medidas.update(medidas2)
medidas_autor[author]=medidas
return medidas_autor
def medidasPCA2_(ds,nm,authors_lists=None):
mall=medidasPCA2(ds,nm)
return [mall]+[medidasPCA2(ds,nm,authors) for authors in authors_lists]
def medidasPCA2(ds,nm,authors=None):
textosP= textosParticipante(ds,authors)
medidasP=medidasParticipante(textosP)
medidas_autor=g.textUtils.medidasPCA(medidasP,nm)
vkeys=["clustering","degree","strength","Mpuncts_sents","Spuncts_sents","Mknownw_sents","Sknownw_sents","Mstopw_sents","Sstopw_sents"]
pca=g.textUtils.tPCA(medidas_autor,vkeys)
vlabels=[r"$cc$",r"$d$",r"$s$",r"$\mu_S(p)$",r"$\sigma_S(p)$",r"$\mu_S(kw)$",r"$\sigma_S(kw)$",r"$\mu_S(sw)$",r"$\sigma_S(sw)$"]
mvars=("vlabels","pca","vkeys","medidas_autor","medidasP","textosP")
vdict={}
for mvar in mvars:
vdict[mvar] = locals()[mvar]
return vdict
def medidasPCA(medidas_participante_dict,network_measures):
nm,mp=network_measures,medidas_participante_dict
for author in mp:
mp[author]["degree"]=nm.degrees[author]
mp[author]["strength"]=nm.strengths[author]
mp[author]["clustering"]=nm.clusterings[author]
return mp
def textosParticipante(ds,authors=None):
texts={}
if not authors:
authors=ds.author_messages
for author in authors:
texts[author]=""
for msg in ds.author_messages[author]:
msgid=msg[0]
text=ds.messages[msgid][-1]
texts[author]+=text
B.LANG+=[langid.classify(text)]
return texts
def makePCATable_(medidas_pca,table_dir,fname="pcaInline.tex",tag=None):
vecs=[i["pca"].feature_vec_.real for i in medidas_pca]
vals=[i["pca"].eig_values_.real for i in medidas_pca]
labelsh=[""]+["PC{}".format(i+1) for i in range(vecs[0].shape[1])]
labels=medidas_pca[0]["vlabels"]
labels=labels+[r"$\lambda$"]
data=[]
for secn in range(len(vecs[0])):
for vec in vecs:
data.append(vec[secn])
caption="PCA formation"
data=n.vstack(data+[val[:vecs[0].shape[1]] for val in vals])
labels_=[(i,"","","") for i in labels]
labels__=[i for j in labels_ for i in j]
labels__[1:4]=["(p.)","(i.)","(h.)"]
fname_=mkName(table_dir,fname,tag)
g.lTable(labels__,labelsh,data,caption,fname_,"textPCA")
ME(fname_[:-4],"\\bf",[(0,i) for i in range(1,6)]+[(i,0) for i in range(1,11)])
DL(fname_[:-4]+"_",[1,-6],[1],[2,3,4,
6,7,8,
10,11,12,
14,15,16,
18,19,20,
22,23,24,
26,27,28,
30,31,32,
34,35,36,
38,39,40])
def makePCATable(vecs,vals,labs,table_dir,fname="pcaInline.tex"):
labelsh=[""]+["PC{}".format(i+1) for i in range(vecs.shape[1])]
labels=labs+[r"$\lambda$"]
data=n.vstack((vecs,vals[:vecs.shape[1]]))
caption="PCA formation"
g.lTable(labels,labelsh,data,caption,table_dir+fname,"textGeral_")
ME(table_dir+fname[:-4],"\\bf",[(0,i) for i in range(1,6)]+[(i,0) for i in range(1,11)])
DL(table_dir+fname[:-4]+"_",[1,-3],[1],[2,3,5,7,8])
def tPCA(medidas,keys):
data=[]
for author in medidas:
data+=[[]]
for key in keys:
data[-1]+=[medidas[author][key]]
data_=n.array(data)
data__=data_.T
return g.pca.PCA(data__,final_dimensions=5)
from sklearn.feature_extraction.text import TfidfVectorizer
def tfIdf(texts):
"""Returns distance matrix for the texts"""
vect = TfidfVectorizer(min_df=1)
tfidf = vect.fit_transform([tt.lower() for tt in texts])
aa=(tfidf * tfidf.T).A
return aa
def kolmogorovSmirnovDistance(seq1,seq2,bins=300):
"""Calculate distance between histograms
Adapted from the Kolmogorov-Smirnov test"""
amin=min(min(seq1),min(seq2))
amax=max(max(seq1),max(seq2))
bins=n.linspace(amin,amax,bins+1,endpoint=True)
h1=n.histogram(seq1,bins,density=True)[0]
h2=n.histogram(seq2,bins,density=True)[0]
space=bins[1]-bins[0]
cs1=n.cumsum(h1*space)
cs2=n.cumsum(h2*space)
dc=n.abs(cs1-cs2)
Dnn=max(dc)
n1=len(seq1)
n2=len(seq2)
fact=((n1+n2)/(n1*n2))**0.5
calpha=Dnn/fact
return calpha
def uniteTables3(TDIR,tag):
"""junta cada POS tag da wn em uma tabelona"""
tt="wnPOSInline2a","wnPOSInline2b","wnPOSInline2c","wnPOSInline2d",
fnames=[]
for pos in ("n","as","v","r"):
#for pos in ("n",):
fname=TDIR+"wnPOSInline2-{}-{}".format(pos,tag)
fnames=[]
for ttt in tt:
fnames+=[TDIR+ttt+"-{}-{}tag_".format(pos,tag)]
if os.path.isfile(fnames[1]+".tex"):
g.tableHelpers.vstackTables_(fnames[0],fnames[1],fname)
else:
shutil.copyfile(fnames[0]+".tex",fname+".tex")
if os.path.isfile(fnames[2]+".tex"):
g.tableHelpers.vstackTables_(fname,fnames[2],fname)
if os.path.isfile(fnames[3]+".tex"):
g.tableHelpers.vstackTables_(fname,fnames[3],fname)
def uniteTables2(TDIR,tag):
foo=TDIR+"posMerged{}".format(tag)
g.tableHelpers.vstackTables_(TDIR+"posInline{}_".format(tag),
TDIR+"wnPOSInline{}_".format(tag),foo)
def uniteTables(TDIR,tag):
t1="geral"#"geralInline0_"
t2="chars"
t3="tokensMerged"
t4="sentences"
t5="messages"
def makeN(ss):
if ss==t3:
return ss+"Inline{}".format(tag)
return ss+"Inline{}_".format(tag)
tt=[TDIR+makeN(i) for i in (t1,t2,t3,t4,t5)]
foo=TDIR+"mergedA{}".format(tag)
g.tableHelpers.vstackTables(tt[0],tt[1],foo)
g.tableHelpers.vstackTables(foo,tt[2],foo)
g.tableHelpers.vstackTables(foo,tt[3],foo)
g.tableHelpers.vstackTables(foo,tt[4],foo)
def makeTables_(lids,TOTAL,TDIR,FDIR,tags=None,offset=0,start_from=0,basedir="~/.gmane3/"):
# if not tags:
# tags=[str(i) for i in range(len(lids))]
# for lid,tag in zip(lids,tags):
# es=g.EmailStructures(lid,TOTAL,offset=offset,basedir=basedir)
# if sum([len(i)>4 for i in es.structs[-1].sectorialized_agents__])<3:
# B.degen.append(lid)
# continue
# isenglish = makeTable(lid,es,TOTAL,TDIR,FDIR,tag)
# if isenglish == 'nonenglish':
# B.nonenglish.append(lid)
B.degen=[]
B.nonenglish=[]
tag = 0
for lid in lids:
es=g.EmailStructures(lid,TOTAL,offset=offset,basedir=basedir)
if sum([len(i)>4 for i in es.structs[-1].sectorialized_agents__])<3:
B.degen.append(lid)
print("------- > > Degenerated structure <")
else:
isenglish = makeTable(lid,es,TOTAL,TDIR,FDIR,tag)
if isenglish == 'nonenglish':
B.nonenglish.append(lid)
else:
tag += 1
tags = list(range(tag))
lids_ = [i for i in lids if i not in B.degen and i not in
B.nonenglish]
labelsh = ('tag', 'gmane id')
labels = [str(i) for i in tags]
data = [[i] for i in lids_]
caption = 'Numerical tags with respective list ids used throughout tables in this supporting information document.'
fname_ = TDIR+'labelsIDs.tex'
g.tableHelpers.lTable(labels,labelsh,data,caption,fname_,"strings")
def makeTable(lid,es,TOTAL,TDIR,FDIR,tag,offset=0):
#TDIR="/home/r/repos/artigoTextoNasRedes/tables/"
#TDIRf="/home/r/repos/artigoTextoNasRedes/figs/"
ds=es.structs[1]
timest=es.structs[2]
pr=es.structs[-1]
nm=es.structs[4]
B.LANG=[]
B.tag=tag
ts,ncontractions,msg_ids=g.textUtils.makeText_(ds,pr); check("make text")
B.LANG+=[langid.classify(" ".join(ts))]
if B.LANG[-1][0] != 'en':
print("NON ENGLISH LIST", B.LANG[-1])
return 'nonenglish'
else:
print("IS ENGLISH!!!! <<<<<<<<<=================>>>>>>>>>>> !!!!!!!")
gmeasures=g.generalMeasures(ds,pr,timest)
g.makeGeneralTable(gmeasures,TDIR,tag=tag)
char_measures=g.textUtils.medidasLetras_(ts); check("medidas letras")
g.textUtils.makeCharTable(char_measures,TDIR,tag=tag)
tok_measures=g.textUtils.medidasTokens__(ts,ncontractions); check("medidas tokens")
g.textUtils.makeTokensTable(tok_measures,TDIR,tag=tag)
g.textUtils.makeTokenSizesTable(tok_measures,TDIR,tag=tag)
g.tableHelpers.vstackTables(TDIR+"tokensInline{}_".format(tag),TDIR+"tokenSizesInline{}_".format(tag),TDIR+"tokensMergedInline{}".format(tag))
sent_measures=g.textUtils.medidasSentencas_(ts); check("medidas sentenças")
g.textUtils.makeSentencesTable(sent_measures,TDIR,tag=tag)
msg_measures=g.textUtils.medidasMensagens_(ds,msg_ids); check("medidas mensagens")
g.textUtils.makeMessagesTable(msg_measures,TDIR,tag=tag)
g.textUtils.uniteTables(TDIR,tag)
pos_measures=g.textUtils.medidasPOS_([i["tokens_sentences"] for i in sent_measures]); check("medidas POS")
g.textUtils.makePOSTable(pos_measures,TDIR,tag=tag)
wn_measures=g.textUtils.medidasWordnet_([i["tags"] for i in pos_measures]); check("medidas wordnet")
g.textUtils.makeWordnetPOSTable(wn_measures,TDIR ,tag=tag) # medias e desvios das incidencias dos atributos
g.textUtils.uniteTables2(TDIR,tag)
wn_measures2_pos=g.textUtils.medidasWordnet2_POS(wn_measures); check("medidas wordnet 2")
g.textUtils.makeWordnetTables2_POS(wn_measures2_pos,TDIR,tag=tag) # escreve arquivo com todas as 5 tabelas para cada pos
g.textUtils.uniteTables3(TDIR,tag)
sinais=g.textUtils.medidasSinais_(ts); check("medidas sinais")
dists=g.textUtils.ksAll(sinais,mkeys=["lens_tok","lens_word","lens_sent"]); check("ks sinais")
g.textUtils.makeKSTables(dists,TDIR,
fnames=("ksTokens","ksWords","ksSents"),
tags=("size of tokens","size of known words","size of sentences"),tag=tag)
sinais2=g.textUtils.medidasSinais2_(pos_measures,msg_measures); check("medidas sinais 2")
dists2=g.textUtils.ksAll(sinais2,mkeys=["adj","sub","pun","verb","chars"]); check("ks sinais 2")
g.textUtils.makeKSTables(dists2,TDIR,
fnames=("ksAdjs","ksSubs","ksPuns","ksVerbs","ksChars"),
tags=("use of adjectives on sentences","use of substantives on sentences","use of punctuations on sentences","use of verbs in each 100 tokens","use of number of characters in messages"),tag=tag)
# correlação pierson e spearman (tem necessidade das duas?)
medidas_pca=g.textUtils.medidasPCA2_(ds,nm,pr.sectorialized_agents__); check("medidas pca") # retorna medidas para plotar e tabelas
g.textUtils.makeCorrelationTable_(medidas_pca,TDIR,"correlationInline.tex",tag=tag)
g.textUtils.makePCATable_(medidas_pca,TDIR,tag=tag)
# medidas_pca[0]["pca"].plot("plot_pca-{}.png".format(tag),pr,labels="sym",tdir=FDIR)
es.structs=es.structs[1:]
ftags=[i["ftags"] for i in wn_measures]
LANG=B.LANG
mvars=("es","gmeasures","ts","ncontractions","msg_ids",
"char_measures","tok_measures","sent_measures",
"msg_measures","pos_measures","ftags",
"sinais","sinais2","dists2","medidas_pca","LANG","tag")
vdict={}; check("antes da escrita do pickle")
for mvar in mvars:
vdict[mvar] = locals()[mvar]
pDump(vdict,TDIR+"vdict-{}.pickle".format(tag))
check("escrito pickle, {}, {}".format(lid, TDIR))
del B.tag
return 0
| [
"gmaneLegacy.textUtils.uniteTables3",
"gmaneLegacy.textUtils.medidasSinais2_",
"gmaneLegacy.textUtils.medidasTokens__",
"gmaneLegacy.textUtils.makeMessagesTable",
"gmaneLegacy.textUtils.medidasWordnet2_POS",
"gmaneLegacy.textUtils.makeSentencesTable",
"gmaneLegacy.lTable",
"gmaneLegacy.textUtils.medidasLetras_",
"pickle.dump",
"gmaneLegacy.textUtils.medidasPCA2_",
"gmaneLegacy.textUtils.makeKSTables",
"gmaneLegacy.ksStatistics.kolmogorovSmirnovDistance__",
"sklearn.feature_extraction.text.TfidfVectorizer",
"nltk.corpus.words.words",
"builtins.me.append",
"gmaneLegacy.textUtils.medidasSentencas_",
"gmaneLegacy.makeGeneralTable",
"gmaneLegacy.textUtils.medidasWordnet2_",
"gmaneLegacy.tableHelpers.lTable",
"gmaneLegacy.textUtils.ksAll",
"gmaneLegacy.tableHelpers.vstackTables_",
"numpy.mean",
"builtins.tt.append",
"builtins.tt_.append",
"gmaneLegacy.textUtils.makeWordnetPOSTable",
"os.path.split",
"gmaneLegacy.tableHelpers.vstackTables",
"re.subn",
"gmaneLegacy.generalMeasures",
"gmaneLegacy.textUtils.medidasPCA",
"os.path.isfile",
"shutil.copyfile",
"gmaneLegacy.textUtils.uniteTables",
"numpy.std",
"builtins.nonenglish.append",
"gmaneLegacy.NetworkPartitioning",
"gmaneLegacy.textUtils.medidasWordnet_",
"gmaneLegacy.InteractionNetwork",
"numpy.cumsum",
"gmaneLegacy.textUtils.makeText_",
"re.compile",
"gmaneLegacy.EmailStructures",
"nltk.corpus.stopwords.words",
"numpy.linspace",
"gmaneLegacy.textUtils.makeCharTable",
"gmaneLegacy.textUtils.medidasSinais_",
"numpy.vstack",
"gmaneLegacy.textUtils.uniteTables2",
"collections.OrderedDict",
"gmaneLegacy.textUtils.medidasMensagens_",
"gmaneLegacy.pca.PCA",
"gmaneLegacy.textUtils.medidasPOS_",
"gmaneLegacy.LoadMessages",
"nltk.corpus.floresta.words",
"time.time",
"gmaneLegacy.textUtils.makePCATable_",
"gmaneLegacy.TimeStatistics",
"gmaneLegacy.textUtils.makeWordnetTables2_POS",
"gmaneLegacy.textUtils.makeCorrelationTable_",
"gmaneLegacy.writeTex",
"gmaneLegacy.textUtils.makeTokenSizesTable",
"numpy.array",
"gmaneLegacy.textUtils.tPCA",
"gmaneLegacy.ListDataStructures",
"numpy.histogram",
"nltk.sent_tokenize",
"gmaneLegacy.textUtils.makePOSTable",
"nltk.corpus.wordnet.synsets",
"numpy.abs",
"pickle.load",
"nltk.tokenize.wordpunct_tokenize",
"builtins.degen.append",
"gmaneLegacy.textUtils.makeTokensTable",
"os.path.join",
"collections.Counter",
"langid.classify",
"gmaneLegacy.NetworkMeasures"
] | [((262, 273), 'time.time', 'time.time', ([], {}), '()\n', (271, 273), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((512, 534), 'nltk.corpus.words.words', 'k.corpus.words.words', ([], {}), '()\n', (532, 534), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((591, 614), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (604, 614), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((926, 951), 'nltk.corpus.floresta.words', 'k.corpus.floresta.words', ([], {}), '()\n', (949, 951), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((1150, 1164), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1161, 1164), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((358, 369), 'time.time', 'time.time', ([], {}), '()\n', (367, 369), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((678, 713), 'os.path.join', 'os.path.join', (['this_dir', '"""words.txt"""'], {}), "(this_dir, 'words.txt')\n", (690, 713), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((980, 1015), 'nltk.corpus.stopwords.words', 'k.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (1004, 1015), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((1032, 1070), 'nltk.corpus.stopwords.words', 'k.corpus.stopwords.words', (['"""portuguese"""'], {}), "('portuguese')\n", (1056, 1070), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((1079, 1131), 'os.path.join', 'os.path.join', (['this_dir', '"""pickledir/brill_taggerT2M1"""'], {}), "(this_dir, 'pickledir/brill_taggerT2M1')\n", (1091, 1131), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((7257, 7268), 'numpy.mean', 'n.mean', (['tls'], {}), '(tls)\n', (7263, 7268), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((7278, 7288), 'numpy.std', 'n.std', (['tls'], {}), '(tls)\n', (7283, 7288), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((9081, 9155), 'gmaneLegacy.tableHelpers.lTable', 'g.tableHelpers.lTable', (['labels', 'labelsh', 'data', 'caption', 'fname_', '"""textGeral"""'], {}), "(labels, labelsh, data, caption, fname_, 'textGeral')\n", (9102, 9155), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((10340, 10351), 'time.time', 'time.time', ([], {}), '()\n', (10349, 10351), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((10361, 10393), 'nltk.tokenize.wordpunct_tokenize', 'k.tokenize.wordpunct_tokenize', (['T'], {}), '(T)\n', (10390, 10393), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((10816, 10827), 'time.time', 'time.time', ([], {}), '()\n', (10825, 10827), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((10837, 10869), 'nltk.tokenize.wordpunct_tokenize', 'k.tokenize.wordpunct_tokenize', (['T'], {}), '(T)\n', (10866, 10869), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((11145, 11155), 'numpy.mean', 'n.mean', (['kw'], {}), '(kw)\n', (11151, 11155), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((11164, 11173), 'numpy.std', 'n.std', (['kw'], {}), '(kw)\n', (11169, 11173), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((11182, 11192), 'numpy.mean', 'n.mean', (['sw'], {}), '(sw)\n', (11188, 11192), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((11201, 11210), 'numpy.std', 'n.std', (['sw'], {}), '(sw)\n', (11206, 11210), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((12402, 12419), 'numpy.array', 'n.array', (['data[1:]'], {}), '(data[1:])\n', (12409, 12419), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((12429, 12462), 'numpy.vstack', 'n.vstack', (['(nsents, nsents_, data)'], {}), '((nsents, nsents_, data))\n', (12437, 12462), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((12504, 12565), 'gmaneLegacy.lTable', 'g.lTable', (['labels', 'labelsh', 'data', 'caption', 'fname_', '"""textGeral"""'], {}), "(labels, labelsh, data, caption, fname_, 'textGeral')\n", (12512, 12565), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((13697, 13759), 'gmaneLegacy.lTable', 'g.lTable', (['labels', 'labelsh', 'data', 'caption', 'fname_', '"""textGeral_"""'], {}), "(labels, labelsh, data, caption, fname_, 'textGeral_')\n", (13705, 13759), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((14815, 14832), 'numpy.array', 'n.array', (['data[1:]'], {}), '(data[1:])\n', (14822, 14832), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((14842, 14879), 'numpy.vstack', 'n.vstack', (['(ntoks, ntoks_, data * 100)'], {}), '((ntoks, ntoks_, data * 100))\n', (14850, 14879), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((14919, 14980), 'gmaneLegacy.lTable', 'g.lTable', (['labels', 'labelsh', 'data', 'caption', 'fname_', '"""textGeral"""'], {}), "(labels, labelsh, data, caption, fname_, 'textGeral')\n", (14927, 14980), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((15936, 15968), 'nltk.tokenize.wordpunct_tokenize', 'k.tokenize.wordpunct_tokenize', (['T'], {}), '(T)\n', (15965, 15968), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((17869, 17901), 'nltk.tokenize.wordpunct_tokenize', 'k.tokenize.wordpunct_tokenize', (['T'], {}), '(T)\n', (17898, 17901), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((19103, 19114), 'time.time', 'time.time', ([], {}), '()\n', (19112, 19114), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((19124, 19156), 'nltk.tokenize.wordpunct_tokenize', 'k.tokenize.wordpunct_tokenize', (['T'], {}), '(T)\n', (19153, 19156), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((19903, 19914), 'time.time', 'time.time', ([], {}), '()\n', (19912, 19914), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((20079, 20090), 'time.time', 'time.time', ([], {}), '()\n', (20088, 20090), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((20337, 20348), 'time.time', 'time.time', ([], {}), '()\n', (20346, 20348), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((20498, 20509), 'time.time', 'time.time', ([], {}), '()\n', (20507, 20509), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((20666, 20677), 'time.time', 'time.time', ([], {}), '()\n', (20675, 20677), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((20875, 20886), 'time.time', 'time.time', ([], {}), '()\n', (20884, 20886), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((21038, 21049), 'time.time', 'time.time', ([], {}), '()\n', (21047, 21049), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((22106, 22123), 'numpy.array', 'n.array', (['data[1:]'], {}), '(data[1:])\n', (22113, 22123), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((22133, 22172), 'numpy.vstack', 'n.vstack', (['(nchars, nchars_, data * 100)'], {}), '((nchars, nchars_, data * 100))\n', (22141, 22172), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((22212, 22273), 'gmaneLegacy.lTable', 'g.lTable', (['labels', 'labelsh', 'data', 'caption', 'fname_', '"""textGeral"""'], {}), "(labels, labelsh, data, caption, fname_, 'textGeral')\n", (22220, 22273), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((23171, 23183), 'numpy.mean', 'n.mean', (['toks'], {}), '(toks)\n', (23177, 23183), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((23193, 23204), 'numpy.std', 'n.std', (['toks'], {}), '(toks)\n', (23198, 23204), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((23215, 23228), 'numpy.mean', 'n.mean', (['toks_'], {}), '(toks_)\n', (23221, 23228), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((23240, 23252), 'numpy.std', 'n.std', (['toks_'], {}), '(toks_)\n', (23245, 23252), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((24382, 24444), 'gmaneLegacy.lTable', 'g.lTable', (['labels__', 'labelsh', 'data', 'caption', 'fname_', '"""textCorr"""'], {}), "(labels__, labelsh, data, caption, fname_, 'textCorr')\n", (24390, 24444), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((25519, 25590), 'gmaneLegacy.lTable', 'g.lTable', (['labels', 'labelsh', 'data', 'caption', '(table_dir + fname)', '"""textCorr"""'], {}), "(labels, labelsh, data, caption, table_dir + fname, 'textCorr')\n", (25527, 25590), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((26898, 26915), 'numpy.array', 'n.array', (['data[1:]'], {}), '(data[1:])\n', (26905, 26915), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((26925, 26956), 'numpy.vstack', 'n.vstack', (['(nmsgs, nmsgs_, data)'], {}), '((nmsgs, nmsgs_, data))\n', (26933, 26956), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((26998, 27059), 'gmaneLegacy.lTable', 'g.lTable', (['labels', 'labelsh', 'data', 'caption', 'fname_', '"""textGeral"""'], {}), "(labels, labelsh, data, caption, fname_, 'textGeral')\n", (27006, 27059), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((28796, 28814), 'nltk.sent_tokenize', 'k.sent_tokenize', (['T'], {}), '(T)\n', (28811, 28814), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((30031, 30049), 'nltk.sent_tokenize', 'k.sent_tokenize', (['T'], {}), '(T)\n', (30046, 30049), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((30146, 30157), 'numpy.mean', 'n.mean', (['tTS'], {}), '(tTS)\n', (30152, 30157), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((30169, 30179), 'numpy.std', 'n.std', (['tTS'], {}), '(tTS)\n', (30174, 30179), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((30360, 30372), 'numpy.mean', 'n.mean', (['tsTS'], {}), '(tsTS)\n', (30366, 30372), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((30385, 30396), 'numpy.std', 'n.std', (['tsTS'], {}), '(tsTS)\n', (30390, 30396), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((30560, 30574), 'numpy.mean', 'n.mean', (['tsTSkw'], {}), '(tsTSkw)\n', (30566, 30574), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((30589, 30602), 'numpy.std', 'n.std', (['tsTSkw'], {}), '(tsTSkw)\n', (30594, 30602), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((30801, 30815), 'numpy.mean', 'n.mean', (['tsTSpv'], {}), '(tsTSpv)\n', (30807, 30815), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((30830, 30843), 'numpy.std', 'n.std', (['tsTSpv'], {}), '(tsTSpv)\n', (30835, 30843), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((31386, 31397), 'numpy.mean', 'n.mean', (['tmT'], {}), '(tmT)\n', (31392, 31397), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((31407, 31417), 'numpy.std', 'n.std', (['tmT'], {}), '(tmT)\n', (31412, 31417), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((31428, 31440), 'numpy.mean', 'n.mean', (['ttmT'], {}), '(ttmT)\n', (31434, 31440), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((31451, 31462), 'numpy.std', 'n.std', (['ttmT'], {}), '(ttmT)\n', (31456, 31462), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((31473, 31485), 'numpy.mean', 'n.mean', (['tsmT'], {}), '(tsmT)\n', (31479, 31485), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((31496, 31507), 'numpy.std', 'n.std', (['tsmT'], {}), '(tsmT)\n', (31501, 31507), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((32763, 32780), 'collections.Counter', 'c.Counter', (['tags__'], {}), '(tags__)\n', (32772, 32780), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((32793, 32808), 'collections.OrderedDict', 'c.OrderedDict', ([], {}), '()\n', (32806, 32808), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((36941, 37003), 'gmaneLegacy.lTable', 'g.lTable', (['labels', 'labelsh', 'data', 'caption', 'fname_', '"""textGeral_"""'], {}), "(labels, labelsh, data, caption, fname_, 'textGeral_')\n", (36949, 37003), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((38604, 38641), 'gmaneLegacy.writeTex', 'g.writeTex', (['tx', "(TDIR + fname + '.tex')"], {}), "(tx, TDIR + fname + '.tex')\n", (38614, 38641), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((40051, 40124), 'gmaneLegacy.lTable', 'g.lTable', (['labels', 'labelsh', 'data', 'caption', '(table_dir + fname)', '"""textGeral_"""'], {}), "(labels, labelsh, data, caption, table_dir + fname, 'textGeral_')\n", (40059, 40124), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((41597, 41659), 'gmaneLegacy.lTable', 'g.lTable', (['labels', 'labelsh', 'data', 'caption', 'fname_', '"""textGeral_"""'], {}), "(labels, labelsh, data, caption, fname_, 'textGeral_')\n", (41605, 41659), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((47274, 47310), 'gmaneLegacy.textUtils.medidasPCA', 'g.textUtils.medidasPCA', (['medidasP', 'nm'], {}), '(medidasP, nm)\n', (47296, 47310), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((47458, 47496), 'gmaneLegacy.textUtils.tPCA', 'g.textUtils.tPCA', (['medidas_autor', 'vkeys'], {}), '(medidas_autor, vkeys)\n', (47474, 47496), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((48940, 48997), 'numpy.vstack', 'n.vstack', (['(data + [val[:vecs[0].shape[1]] for val in vals])'], {}), '(data + [val[:vecs[0].shape[1]] for val in vals])\n', (48948, 48997), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((49168, 49229), 'gmaneLegacy.lTable', 'g.lTable', (['labels__', 'labelsh', 'data', 'caption', 'fname_', '"""textPCA"""'], {}), "(labels__, labelsh, data, caption, fname_, 'textPCA')\n", (49176, 49229), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((49983, 50021), 'numpy.vstack', 'n.vstack', (['(vecs, vals[:vecs.shape[1]])'], {}), '((vecs, vals[:vecs.shape[1]]))\n', (49991, 50021), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((50053, 50126), 'gmaneLegacy.lTable', 'g.lTable', (['labels', 'labelsh', 'data', 'caption', '(table_dir + fname)', '"""textGeral_"""'], {}), "(labels, labelsh, data, caption, table_dir + fname, 'textGeral_')\n", (50061, 50126), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((50431, 50444), 'numpy.array', 'n.array', (['data'], {}), '(data)\n', (50438, 50444), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((50475, 50512), 'gmaneLegacy.pca.PCA', 'g.pca.PCA', (['data__'], {'final_dimensions': '(5)'}), '(data__, final_dimensions=5)\n', (50484, 50512), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((50651, 50676), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(1)'}), '(min_df=1)\n', (50666, 50676), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((51005, 51052), 'numpy.linspace', 'n.linspace', (['amin', 'amax', '(bins + 1)'], {'endpoint': '(True)'}), '(amin, amax, bins + 1, endpoint=True)\n', (51015, 51052), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((51174, 51194), 'numpy.cumsum', 'n.cumsum', (['(h1 * space)'], {}), '(h1 * space)\n', (51182, 51194), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((51201, 51221), 'numpy.cumsum', 'n.cumsum', (['(h2 * space)'], {}), '(h2 * space)\n', (51209, 51221), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((51228, 51244), 'numpy.abs', 'n.abs', (['(cs1 - cs2)'], {}), '(cs1 - cs2)\n', (51233, 51244), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((52679, 52725), 'gmaneLegacy.tableHelpers.vstackTables', 'g.tableHelpers.vstackTables', (['tt[0]', 'tt[1]', 'foo'], {}), '(tt[0], tt[1], foo)\n', (52706, 52725), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((52728, 52772), 'gmaneLegacy.tableHelpers.vstackTables', 'g.tableHelpers.vstackTables', (['foo', 'tt[2]', 'foo'], {}), '(foo, tt[2], foo)\n', (52755, 52772), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((52775, 52819), 'gmaneLegacy.tableHelpers.vstackTables', 'g.tableHelpers.vstackTables', (['foo', 'tt[3]', 'foo'], {}), '(foo, tt[3], foo)\n', (52802, 52819), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((52822, 52866), 'gmaneLegacy.tableHelpers.vstackTables', 'g.tableHelpers.vstackTables', (['foo', 'tt[4]', 'foo'], {}), '(foo, tt[4], foo)\n', (52849, 52866), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((54300, 54372), 'gmaneLegacy.tableHelpers.lTable', 'g.tableHelpers.lTable', (['labels', 'labelsh', 'data', 'caption', 'fname_', '"""strings"""'], {}), "(labels, labelsh, data, caption, fname_, 'strings')\n", (54321, 54372), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((54675, 54704), 'gmaneLegacy.textUtils.makeText_', 'g.textUtils.makeText_', (['ds', 'pr'], {}), '(ds, pr)\n', (54696, 54704), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((54975, 55008), 'gmaneLegacy.generalMeasures', 'g.generalMeasures', (['ds', 'pr', 'timest'], {}), '(ds, pr, timest)\n', (54992, 55008), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55011, 55055), 'gmaneLegacy.makeGeneralTable', 'g.makeGeneralTable', (['gmeasures', 'TDIR'], {'tag': 'tag'}), '(gmeasures, TDIR, tag=tag)\n', (55029, 55055), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55073, 55103), 'gmaneLegacy.textUtils.medidasLetras_', 'g.textUtils.medidasLetras_', (['ts'], {}), '(ts)\n', (55099, 55103), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55133, 55188), 'gmaneLegacy.textUtils.makeCharTable', 'g.textUtils.makeCharTable', (['char_measures', 'TDIR'], {'tag': 'tag'}), '(char_measures, TDIR, tag=tag)\n', (55158, 55188), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55209, 55255), 'gmaneLegacy.textUtils.medidasTokens__', 'g.textUtils.medidasTokens__', (['ts', 'ncontractions'], {}), '(ts, ncontractions)\n', (55236, 55255), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55284, 55340), 'gmaneLegacy.textUtils.makeTokensTable', 'g.textUtils.makeTokensTable', (['tok_measures', 'TDIR'], {'tag': 'tag'}), '(tok_measures, TDIR, tag=tag)\n', (55311, 55340), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55343, 55403), 'gmaneLegacy.textUtils.makeTokenSizesTable', 'g.textUtils.makeTokenSizesTable', (['tok_measures', 'TDIR'], {'tag': 'tag'}), '(tok_measures, TDIR, tag=tag)\n', (55374, 55403), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55572, 55605), 'gmaneLegacy.textUtils.medidasSentencas_', 'g.textUtils.medidasSentencas_', (['ts'], {}), '(ts)\n', (55601, 55605), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55638, 55698), 'gmaneLegacy.textUtils.makeSentencesTable', 'g.textUtils.makeSentencesTable', (['sent_measures', 'TDIR'], {'tag': 'tag'}), '(sent_measures, TDIR, tag=tag)\n', (55668, 55698), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55719, 55761), 'gmaneLegacy.textUtils.medidasMensagens_', 'g.textUtils.medidasMensagens_', (['ds', 'msg_ids'], {}), '(ds, msg_ids)\n', (55748, 55761), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55793, 55851), 'gmaneLegacy.textUtils.makeMessagesTable', 'g.textUtils.makeMessagesTable', (['msg_measures', 'TDIR'], {'tag': 'tag'}), '(msg_measures, TDIR, tag=tag)\n', (55822, 55851), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55855, 55889), 'gmaneLegacy.textUtils.uniteTables', 'g.textUtils.uniteTables', (['TDIR', 'tag'], {}), '(TDIR, tag)\n', (55878, 55889), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((55911, 55982), 'gmaneLegacy.textUtils.medidasPOS_', 'g.textUtils.medidasPOS_', (["[i['tokens_sentences'] for i in sent_measures]"], {}), "([i['tokens_sentences'] for i in sent_measures])\n", (55934, 55982), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((56009, 56062), 'gmaneLegacy.textUtils.makePOSTable', 'g.textUtils.makePOSTable', (['pos_measures', 'TDIR'], {'tag': 'tag'}), '(pos_measures, TDIR, tag=tag)\n', (56033, 56062), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((56082, 56144), 'gmaneLegacy.textUtils.medidasWordnet_', 'g.textUtils.medidasWordnet_', (["[i['tags'] for i in pos_measures]"], {}), "([i['tags'] for i in pos_measures])\n", (56109, 56144), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((56175, 56234), 'gmaneLegacy.textUtils.makeWordnetPOSTable', 'g.textUtils.makeWordnetPOSTable', (['wn_measures', 'TDIR'], {'tag': 'tag'}), '(wn_measures, TDIR, tag=tag)\n', (56206, 56234), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((56288, 56323), 'gmaneLegacy.textUtils.uniteTables2', 'g.textUtils.uniteTables2', (['TDIR', 'tag'], {}), '(TDIR, tag)\n', (56312, 56323), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((56349, 56393), 'gmaneLegacy.textUtils.medidasWordnet2_POS', 'g.textUtils.medidasWordnet2_POS', (['wn_measures'], {}), '(wn_measures)\n', (56380, 56393), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((56426, 56493), 'gmaneLegacy.textUtils.makeWordnetTables2_POS', 'g.textUtils.makeWordnetTables2_POS', (['wn_measures2_pos', 'TDIR'], {'tag': 'tag'}), '(wn_measures2_pos, TDIR, tag=tag)\n', (56460, 56493), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((56552, 56587), 'gmaneLegacy.textUtils.uniteTables3', 'g.textUtils.uniteTables3', (['TDIR', 'tag'], {}), '(TDIR, tag)\n', (56576, 56587), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((56599, 56629), 'gmaneLegacy.textUtils.medidasSinais_', 'g.textUtils.medidasSinais_', (['ts'], {}), '(ts)\n', (56625, 56629), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((56665, 56736), 'gmaneLegacy.textUtils.ksAll', 'g.textUtils.ksAll', (['sinais'], {'mkeys': "['lens_tok', 'lens_word', 'lens_sent']"}), "(sinais, mkeys=['lens_tok', 'lens_word', 'lens_sent'])\n", (56682, 56736), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((56758, 56924), 'gmaneLegacy.textUtils.makeKSTables', 'g.textUtils.makeKSTables', (['dists', 'TDIR'], {'fnames': "('ksTokens', 'ksWords', 'ksSents')", 'tags': "('size of tokens', 'size of known words', 'size of sentences')", 'tag': 'tag'}), "(dists, TDIR, fnames=('ksTokens', 'ksWords',\n 'ksSents'), tags=('size of tokens', 'size of known words',\n 'size of sentences'), tag=tag)\n", (56782, 56924), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((56948, 57003), 'gmaneLegacy.textUtils.medidasSinais2_', 'g.textUtils.medidasSinais2_', (['pos_measures', 'msg_measures'], {}), '(pos_measures, msg_measures)\n', (56975, 57003), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((57041, 57113), 'gmaneLegacy.textUtils.ksAll', 'g.textUtils.ksAll', (['sinais2'], {'mkeys': "['adj', 'sub', 'pun', 'verb', 'chars']"}), "(sinais2, mkeys=['adj', 'sub', 'pun', 'verb', 'chars'])\n", (57058, 57113), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((57135, 57450), 'gmaneLegacy.textUtils.makeKSTables', 'g.textUtils.makeKSTables', (['dists2', 'TDIR'], {'fnames': "('ksAdjs', 'ksSubs', 'ksPuns', 'ksVerbs', 'ksChars')", 'tags': "('use of adjectives on sentences', 'use of substantives on sentences',\n 'use of punctuations on sentences', 'use of verbs in each 100 tokens',\n 'use of number of characters in messages')", 'tag': 'tag'}), "(dists2, TDIR, fnames=('ksAdjs', 'ksSubs', 'ksPuns',\n 'ksVerbs', 'ksChars'), tags=('use of adjectives on sentences',\n 'use of substantives on sentences', 'use of punctuations on sentences',\n 'use of verbs in each 100 tokens',\n 'use of number of characters in messages'), tag=tag)\n", (57159, 57450), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((57530, 57589), 'gmaneLegacy.textUtils.medidasPCA2_', 'g.textUtils.medidasPCA2_', (['ds', 'nm', 'pr.sectorialized_agents__'], {}), '(ds, nm, pr.sectorialized_agents__)\n', (57554, 57589), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((57654, 57744), 'gmaneLegacy.textUtils.makeCorrelationTable_', 'g.textUtils.makeCorrelationTable_', (['medidas_pca', 'TDIR', '"""correlationInline.tex"""'], {'tag': 'tag'}), "(medidas_pca, TDIR,\n 'correlationInline.tex', tag=tag)\n", (57687, 57744), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((57742, 57795), 'gmaneLegacy.textUtils.makePCATable_', 'g.textUtils.makePCATable_', (['medidas_pca', 'TDIR'], {'tag': 'tag'}), '(medidas_pca, TDIR, tag=tag)\n', (57767, 57795), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2006, 2033), 'pickle.dump', 'pickle.dump', (['tobject', 'f', '(-1)'], {}), '(tobject, f, -1)\n', (2017, 2033), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2254, 2265), 'time.time', 'time.time', ([], {}), '()\n', (2263, 2265), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2317, 2377), 'gmaneLegacy.LoadMessages', 'g.LoadMessages', (['lid', 'TOTAL_M'], {'offset': 'offset', 'basedir': 'basedir'}), '(lid, TOTAL_M, offset=offset, basedir=basedir)\n', (2331, 2377), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2387, 2423), 'gmaneLegacy.ListDataStructures', 'g.ListDataStructures', (['lm'], {'text': '"""yes"""'}), "(lm, text='yes')\n", (2407, 2423), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2526, 2537), 'time.time', 'time.time', ([], {}), '()\n', (2535, 2537), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2615, 2626), 'time.time', 'time.time', ([], {}), '()\n', (2624, 2626), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2638, 2658), 'gmaneLegacy.TimeStatistics', 'g.TimeStatistics', (['ds'], {}), '(ds)\n', (2654, 2658), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2737, 2748), 'time.time', 'time.time', ([], {}), '()\n', (2746, 2748), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2760, 2784), 'gmaneLegacy.InteractionNetwork', 'g.InteractionNetwork', (['ds'], {}), '(ds)\n', (2780, 2784), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2796, 2840), 'gmaneLegacy.NetworkMeasures', 'g.NetworkMeasures', (['iN'], {'exclude': "['rich_club']"}), "(iN, exclude=['rich_club'])\n", (2813, 2840), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2913, 2924), 'time.time', 'time.time', ([], {}), '()\n', (2922, 2924), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((3286, 3297), 'time.time', 'time.time', ([], {}), '()\n', (3295, 3297), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((7121, 7130), 'numpy.mean', 'n.mean', (['i'], {}), '(i)\n', (7127, 7130), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((7165, 7173), 'numpy.std', 'n.std', (['i'], {}), '(i)\n', (7170, 7173), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((22972, 22988), 'numpy.mean', 'n.mean', (['medidas_'], {}), '(medidas_)\n', (22978, 22988), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((22989, 23004), 'numpy.std', 'n.std', (['medidas_'], {}), '(medidas_)\n', (22994, 23004), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((27463, 27495), 'nltk.tokenize.wordpunct_tokenize', 'k.tokenize.wordpunct_tokenize', (['t'], {}), '(t)\n', (27492, 27495), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((27837, 27855), 'nltk.sent_tokenize', 'k.sent_tokenize', (['t'], {}), '(t)\n', (27852, 27855), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((28837, 28869), 'nltk.tokenize.wordpunct_tokenize', 'k.tokenize.wordpunct_tokenize', (['i'], {}), '(i)\n', (28866, 28869), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((30252, 30284), 'nltk.tokenize.wordpunct_tokenize', 'k.tokenize.wordpunct_tokenize', (['i'], {}), '(i)\n', (30281, 30284), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((33969, 33986), 'builtins.me.append', 'B.me.append', (['wms_'], {}), '(wms_)\n', (33980, 33986), True, 'import builtins as B\n'), ((33995, 34012), 'builtins.tt_.append', 'B.tt_.append', (['tt_'], {}), '(tt_)\n', (34007, 34012), True, 'import builtins as B\n'), ((34021, 34036), 'builtins.tt.append', 'B.tt.append', (['tt'], {}), '(tt)\n', (34032, 34036), True, 'import builtins as B\n'), ((34050, 34063), 'numpy.array', 'n.array', (['wms_'], {}), '(wms_)\n', (34057, 34063), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((34216, 34280), 'gmaneLegacy.lTable', 'g.lTable', (['labels', 'labelsh', 'data', 'caption', 'tabfname', '"""textGeral_"""'], {}), "(labels, labelsh, data, caption, tabfname, 'textGeral_')\n", (34224, 34280), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((37218, 37264), 'gmaneLegacy.textUtils.medidasWordnet2_', 'g.textUtils.medidasWordnet2_', (['wn_measures', 'pos'], {}), '(wn_measures, pos)\n', (37246, 37264), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((41998, 42015), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['wt[0]'], {}), '(wt[0])\n', (42008, 42015), True, 'from nltk.corpus import wordnet as wn\n'), ((51055, 51092), 'numpy.histogram', 'n.histogram', (['seq1', 'bins'], {'density': '(True)'}), '(seq1, bins, density=True)\n', (51066, 51092), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((51101, 51138), 'numpy.histogram', 'n.histogram', (['seq2', 'bins'], {'density': '(True)'}), '(seq2, bins, density=True)\n', (51112, 51138), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((51755, 51789), 'os.path.isfile', 'os.path.isfile', (["(fnames[1] + '.tex')"], {}), "(fnames[1] + '.tex')\n", (51769, 51789), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((51941, 51975), 'os.path.isfile', 'os.path.isfile', (["(fnames[2] + '.tex')"], {}), "(fnames[2] + '.tex')\n", (51955, 51975), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((52050, 52084), 'os.path.isfile', 'os.path.isfile', (["(fnames[3] + '.tex')"], {}), "(fnames[3] + '.tex')\n", (52064, 52084), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((53491, 53552), 'gmaneLegacy.EmailStructures', 'g.EmailStructures', (['lid', 'TOTAL'], {'offset': 'offset', 'basedir': 'basedir'}), '(lid, TOTAL, offset=offset, basedir=basedir)\n', (53508, 53552), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((338, 349), 'time.time', 'time.time', ([], {}), '()\n', (347, 349), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((1810, 1835), 're.subn', 're.subn', (['pattern', 'repl', 's'], {}), '(pattern, repl, s)\n', (1817, 1835), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((3150, 3183), 'gmaneLegacy.NetworkPartitioning', 'g.NetworkPartitioning', (['nm', '(2)', '"""g"""'], {}), "(nm, 2, 'g')\n", (3171, 3183), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((16112, 16130), 'nltk.sent_tokenize', 'k.sent_tokenize', (['T'], {}), '(T)\n', (16127, 16130), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((19729, 19742), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['i'], {}), '(i)\n', (19739, 19742), True, 'from nltk.corpus import wordnet as wn\n'), ((19883, 19894), 'time.time', 'time.time', ([], {}), '()\n', (19892, 19894), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((20059, 20070), 'time.time', 'time.time', ([], {}), '()\n', (20068, 20070), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((20317, 20328), 'time.time', 'time.time', ([], {}), '()\n', (20326, 20328), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((20478, 20489), 'time.time', 'time.time', ([], {}), '()\n', (20487, 20489), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((20646, 20657), 'time.time', 'time.time', ([], {}), '()\n', (20655, 20657), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((20855, 20866), 'time.time', 'time.time', ([], {}), '()\n', (20864, 20866), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((21018, 21029), 'time.time', 'time.time', ([], {}), '()\n', (21027, 21029), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((31261, 31293), 'nltk.tokenize.wordpunct_tokenize', 'k.tokenize.wordpunct_tokenize', (['t'], {}), '(t)\n', (31290, 31293), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((31331, 31349), 'nltk.sent_tokenize', 'k.sent_tokenize', (['t'], {}), '(t)\n', (31346, 31349), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((38491, 38512), 'os.path.isfile', 'os.path.isfile', (['name_'], {}), '(name_)\n', (38505, 38512), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((46501, 46522), 'numpy.mean', 'n.mean', (['locals_[mvar]'], {}), '(locals_[mvar])\n', (46507, 46522), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((46551, 46571), 'numpy.std', 'n.std', (['locals_[mvar]'], {}), '(locals_[mvar])\n', (46556, 46571), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((51801, 51858), 'gmaneLegacy.tableHelpers.vstackTables_', 'g.tableHelpers.vstackTables_', (['fnames[0]', 'fnames[1]', 'fname'], {}), '(fnames[0], fnames[1], fname)\n', (51829, 51858), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((51883, 51934), 'shutil.copyfile', 'shutil.copyfile', (["(fnames[0] + '.tex')", "(fname + '.tex')"], {}), "(fnames[0] + '.tex', fname + '.tex')\n", (51898, 51934), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((51987, 52040), 'gmaneLegacy.tableHelpers.vstackTables_', 'g.tableHelpers.vstackTables_', (['fname', 'fnames[2]', 'fname'], {}), '(fname, fnames[2], fname)\n', (52015, 52040), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((52096, 52149), 'gmaneLegacy.tableHelpers.vstackTables_', 'g.tableHelpers.vstackTables_', (['fname', 'fnames[3]', 'fname'], {}), '(fname, fnames[3], fname)\n', (52124, 52149), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((53639, 53658), 'builtins.degen.append', 'B.degen.append', (['lid'], {}), '(lid)\n', (53653, 53658), True, 'import builtins as B\n'), ((1620, 1637), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (1630, 1637), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((16455, 16531), 'gmaneLegacy.ksStatistics.kolmogorovSmirnovDistance__', 'g.ksStatistics.kolmogorovSmirnovDistance__', (['sigDict[i][key]', 'sigDict[j][key]'], {}), '(sigDict[i][key], sigDict[j][key])\n', (16497, 16531), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((48423, 48444), 'langid.classify', 'langid.classify', (['text'], {}), '(text)\n', (48438, 48444), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((53856, 53880), 'builtins.nonenglish.append', 'B.nonenglish.append', (['lid'], {}), '(lid)\n', (53875, 53880), True, 'import builtins as B\n'), ((2716, 2727), 'time.time', 'time.time', ([], {}), '()\n', (2725, 2727), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2892, 2903), 'time.time', 'time.time', ([], {}), '()\n', (2901, 2903), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((3265, 3276), 'time.time', 'time.time', ([], {}), '()\n', (3274, 3276), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((24554, 24567), 'numpy.array', 'n.array', (['data'], {}), '(data)\n', (24561, 24567), True, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2505, 2516), 'time.time', 'time.time', ([], {}), '()\n', (2514, 2516), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n'), ((2594, 2605), 'time.time', 'time.time', ([], {}), '()\n', (2603, 2605), False, 'import gmaneLegacy as g, time, numpy as n, re, nltk as k, collections as c, string, pickle, os, langid, shutil\n')] |
# coding=utf-8
"""
"""
import os
import unittest
import shutil
from md_utils.rename_files import main
from md_utils.md_common import (capture_stdout, capture_stderr, silent_remove)
import logging
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
DISABLE_REMOVE = logger.isEnabledFor(logging.DEBUG)
__author__ = 'hmayes'
# Directories #
DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
SUB_DATA_DIR = os.path.join(DATA_DIR, 'rename_files')
# Files #
SMALL_FILE = os.path.join(SUB_DATA_DIR, 'small_file.txt')
# test data #
TEST_FILE_NAMES = ['has space.txt', 'has two spaces.txt', 'now!exclaim.txt']
REPLACED_FILE_NAMES1 = ['hasspace.txt', 'hastwospaces.txt', 'now!exclaim.txt']
REPLACED_FILE_NAMES2 = ['has space.txt', 'has two spaces.txt', 'now_exclaim.txt']
# REPLACED_FILE_NAMES3 = ['has_space.txt', 'has_two_spaces.txt', 'now!exclaim.txt']
def make_files(fname_list):
"""
Create files fresh, because will be moved when program runs
@param fname_list: list of file names without directory name
"""
for fname in fname_list:
new_file = os.path.join(SUB_DATA_DIR, fname)
shutil.copyfile(SMALL_FILE, new_file)
def add_sub_dir(fname_list, abs_dir):
"""
Create files fresh, because will be moved when program runs
@param fname_list: list of file names without directory name
@param abs_dir: absolute directory name
@return full_name_list: a list of file names with the specified absolute directory
"""
full_name_list = []
for fname in fname_list:
full_name_list.append(os.path.join(abs_dir, fname))
return full_name_list
def count_files(fname_list):
"""
Counts how many files in list exist
@param fname_list: list of file names
@return num_existing_files: a list of file names with the specified absolute directory
"""
num_existing_files = 0
for fname in fname_list:
if os.path.isfile(fname):
num_existing_files += 1
return num_existing_files
class TestRenameNoOutput(unittest.TestCase):
def testHelp(self):
test_input = ['-h']
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertFalse(output)
with capture_stdout(main, test_input) as output:
self.assertTrue("optional arguments" in output)
def testInvalidArg(self):
test_input = ['-@']
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("unrecognized arguments" in output)
class TestRename(unittest.TestCase):
def testNoFilesRenamed(self):
test_input = []
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stdout(main, test_input) as output:
self.assertTrue("Found and renamed 0 files" in output)
def testDefaultPatterns(self):
make_files(TEST_FILE_NAMES)
test_input = ["-d", SUB_DATA_DIR]
initial_fnames = add_sub_dir(TEST_FILE_NAMES, SUB_DATA_DIR)
expected_fnames = add_sub_dir(REPLACED_FILE_NAMES1, SUB_DATA_DIR)
try:
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
# need to make again for capturing std out
make_files(TEST_FILE_NAMES)
with capture_stdout(main, test_input) as output:
self.assertTrue("Found and renamed 2 files" in output)
self.assertTrue(count_files(initial_fnames), 2)
self.assertTrue(count_files(expected_fnames), 3)
finally:
for fname in expected_fnames:
silent_remove(fname, disable=DISABLE_REMOVE)
def testAltPattern(self):
make_files(TEST_FILE_NAMES)
test_input = ["-d", SUB_DATA_DIR, "-p", "!", "-n", "_"]
initial_fnames = add_sub_dir(TEST_FILE_NAMES, SUB_DATA_DIR)
expected_fnames = add_sub_dir(REPLACED_FILE_NAMES2, SUB_DATA_DIR)
try:
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
# need to make again for capturing std out
make_files(TEST_FILE_NAMES)
with capture_stdout(main, test_input) as output:
self.assertTrue("Found and renamed 1 files" in output)
self.assertTrue(count_files(initial_fnames), 1)
self.assertTrue(count_files(expected_fnames), 3)
finally:
for fname in expected_fnames:
silent_remove(fname, disable=DISABLE_REMOVE)
| [
"logging.getLogger",
"md_utils.md_common.silent_remove",
"md_utils.md_common.capture_stderr",
"os.path.join",
"os.path.isfile",
"os.path.dirname",
"shutil.copyfile",
"md_utils.rename_files.main",
"md_utils.md_common.capture_stdout"
] | [((252, 279), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (269, 279), False, 'import logging\n'), ((453, 491), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""rename_files"""'], {}), "(DATA_DIR, 'rename_files')\n", (465, 491), False, 'import os\n'), ((517, 561), 'os.path.join', 'os.path.join', (['SUB_DATA_DIR', '"""small_file.txt"""'], {}), "(SUB_DATA_DIR, 'small_file.txt')\n", (529, 561), False, 'import os\n'), ((398, 423), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (413, 423), False, 'import os\n'), ((1124, 1157), 'os.path.join', 'os.path.join', (['SUB_DATA_DIR', 'fname'], {}), '(SUB_DATA_DIR, fname)\n', (1136, 1157), False, 'import os\n'), ((1166, 1203), 'shutil.copyfile', 'shutil.copyfile', (['SMALL_FILE', 'new_file'], {}), '(SMALL_FILE, new_file)\n', (1181, 1203), False, 'import shutil\n'), ((1946, 1967), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (1960, 1967), False, 'import os\n'), ((1603, 1631), 'os.path.join', 'os.path.join', (['abs_dir', 'fname'], {}), '(abs_dir, fname)\n', (1615, 1631), False, 'import os\n'), ((2193, 2209), 'md_utils.rename_files.main', 'main', (['test_input'], {}), '(test_input)\n', (2197, 2209), False, 'from md_utils.rename_files import main\n'), ((2223, 2255), 'md_utils.md_common.capture_stderr', 'capture_stderr', (['main', 'test_input'], {}), '(main, test_input)\n', (2237, 2255), False, 'from md_utils.md_common import capture_stdout, capture_stderr, silent_remove\n'), ((2317, 2349), 'md_utils.md_common.capture_stdout', 'capture_stdout', (['main', 'test_input'], {}), '(main, test_input)\n', (2331, 2349), False, 'from md_utils.md_common import capture_stdout, capture_stderr, silent_remove\n'), ((2539, 2555), 'md_utils.rename_files.main', 'main', (['test_input'], {}), '(test_input)\n', (2543, 2555), False, 'from md_utils.rename_files import main\n'), ((2569, 2601), 'md_utils.md_common.capture_stderr', 'capture_stderr', (['main', 'test_input'], {}), '(main, test_input)\n', (2583, 2601), False, 'from md_utils.md_common import capture_stdout, capture_stderr, silent_remove\n'), ((2833, 2849), 'md_utils.rename_files.main', 'main', (['test_input'], {}), '(test_input)\n', (2837, 2849), False, 'from md_utils.rename_files import main\n'), ((2863, 2895), 'md_utils.md_common.capture_stdout', 'capture_stdout', (['main', 'test_input'], {}), '(main, test_input)\n', (2877, 2895), False, 'from md_utils.md_common import capture_stdout, capture_stderr, silent_remove\n'), ((3310, 3326), 'md_utils.rename_files.main', 'main', (['test_input'], {}), '(test_input)\n', (3314, 3326), False, 'from md_utils.rename_files import main\n'), ((3447, 3479), 'md_utils.md_common.capture_stdout', 'capture_stdout', (['main', 'test_input'], {}), '(main, test_input)\n', (3461, 3479), False, 'from md_utils.md_common import capture_stdout, capture_stderr, silent_remove\n'), ((3758, 3802), 'md_utils.md_common.silent_remove', 'silent_remove', (['fname'], {'disable': 'DISABLE_REMOVE'}), '(fname, disable=DISABLE_REMOVE)\n', (3771, 3802), False, 'from md_utils.md_common import capture_stdout, capture_stderr, silent_remove\n'), ((4156, 4172), 'md_utils.rename_files.main', 'main', (['test_input'], {}), '(test_input)\n', (4160, 4172), False, 'from md_utils.rename_files import main\n'), ((4293, 4325), 'md_utils.md_common.capture_stdout', 'capture_stdout', (['main', 'test_input'], {}), '(main, test_input)\n', (4307, 4325), False, 'from md_utils.md_common import capture_stdout, capture_stderr, silent_remove\n'), ((4604, 4648), 'md_utils.md_common.silent_remove', 'silent_remove', (['fname'], {'disable': 'DISABLE_REMOVE'}), '(fname, disable=DISABLE_REMOVE)\n', (4617, 4648), False, 'from md_utils.md_common import capture_stdout, capture_stderr, silent_remove\n')] |
"""Perform rationalization for IWSLT experiments.
Use this script to perform rationalization for either the distractor
experiment or for the alignment experiment.
"""
import argparse
import json
import os
import time
import torch
import numpy as np
from fairseq import utils
from fairseq.models.transformer import TransformerModel
from rationalization import baseline_rationalize_conditional_model, rationalize_conditional_model
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint_dir",
type=str,
help="Directory where trained checkpoint is stored")
parser.add_argument("--task",
type=str,
help="Experiment to rationalize ('alignments' or "
"'distractors')")
parser.add_argument("--method",
type=str,
default="greedy",
help="Rationalization method. Must be one of: 'greedy', "
"'gradient_norm', 'signed_gradient', "
"'integrated_gradient', 'last_attention', or "
"'all_attention'.")
parser.add_argument("--verbose",
action='store_true',
help="Whether to print rationalization results.")
parser.add_argument("--top_1",
action='store_true',
help="Whether to only rationalize a single source word "
"(only used if --task is set to 'alignments'.")
parser.add_argument("--max_steps",
type=int,
default=1024,
help="Maximum number of steps to perform rationalization.")
args = parser.parse_args()
def convert(o):
"""Helper function to convert dict with NumPy entries to JSON file."""
if isinstance(o, np.int64):
return int(o)
raise TypeError
fairseq_dir = os.path.dirname(__file__)
if args.task == 'distractors':
if args.top_1:
raise ValueError("It doesn't make sense to perform top-1 rationalization "
"for the distractors experiment.")
data_path = os.path.join(fairseq_dir,
'data-bin/iwslt14_distractors.tokenized.de-en')
elif args.task == 'alignments':
data_path = os.path.join(fairseq_dir,
'data-bin/iwslt14_alignments.tokenized.de-en')
else:
raise ValueError("--task must be either 'distractors' or 'alignments'.")
model = TransformerModel.from_pretrained(
os.path.join(args.checkpoint_dir, 'compatible_iwslt'),
checkpoint_file='checkpoint_best.pt',
data_name_or_path=data_path)
model.half()
model.cuda()
model.eval()
# Make iterator for the data.
model.task.load_dataset('test')
itr = model.task.get_batch_iterator(
dataset=model.task.dataset('test'),
max_tokens=1200,
max_sentences=1,).next_epoch_itr(shuffle=False)
# Shortcut for model indexing.
model.model = model.models[0]
rs = np.random.RandomState(0)
indices_to_evaluate = np.sort(rs.choice(itr.total, 60, replace=False))
first_time = time.time()
for eval_index, sample in enumerate(itr):
print("Working on {}/{}...".format(eval_index, itr.total))
start_time = time.time()
sample = utils.move_to_cuda(sample)
if sample['target'][0, 0].item() != model.task.source_dictionary.eos_index:
# Add <eos> token to beginning of target tokens.
sample['target'] = torch.cat([
torch.tensor([[model.task.target_dictionary.eos_index]]).to(
sample['target']), sample['target']], -1)
if args.method == 'greedy':
(source_rationales, target_rationales,
rationalization_log) = rationalize_conditional_model(
model,
sample['net_input']['src_tokens'][0],
sample['target'][0],
verbose=args.verbose,
max_steps=args.max_steps,
top_1=args.top_1)
else:
(source_rationales, target_rationales,
rationalization_log) = baseline_rationalize_conditional_model(
model,
sample['net_input']['src_tokens'][0],
sample['target'][0],
args.method,
verbose=args.verbose,
max_steps=args.max_steps,
top_1=args.top_1)
# Save rationalization results
task_name = ("alignments_top_1"
if (args.top_1 and args.task == 'alignments')
else args.task)
results_dir = os.path.join(
fairseq_dir,
"rationalization_results/{}/{}".format(task_name, args.method))
if not os.path.exists(results_dir):
os.makedirs(results_dir)
file_name = os.path.join(results_dir, "{}.json".format(
str(sample['id'].item())))
print("...writing to {}".format(file_name))
with open(file_name, 'w') as outfile:
json.dump(rationalization_log, outfile, default=convert)
print("...finished in {:.2f} (average: {:.2f})".format(
time.time() - start_time, (time.time() - first_time) / (eval_index + 1)))
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"json.dump",
"os.path.join",
"os.path.dirname",
"torch.tensor",
"fairseq.utils.move_to_cuda",
"rationalization.rationalize_conditional_model",
"rationalization.baseline_rationalize_conditional_model",
"time.time",
"numpy.random.RandomState"
] | [((443, 468), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (466, 468), False, 'import argparse\n'), ((1899, 1924), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1914, 1924), False, 'import os\n'), ((2936, 2960), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (2957, 2960), True, 'import numpy as np\n'), ((3046, 3057), 'time.time', 'time.time', ([], {}), '()\n', (3055, 3057), False, 'import time\n'), ((2123, 2196), 'os.path.join', 'os.path.join', (['fairseq_dir', '"""data-bin/iwslt14_distractors.tokenized.de-en"""'], {}), "(fairseq_dir, 'data-bin/iwslt14_distractors.tokenized.de-en')\n", (2135, 2196), False, 'import os\n'), ((2496, 2549), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', '"""compatible_iwslt"""'], {}), "(args.checkpoint_dir, 'compatible_iwslt')\n", (2508, 2549), False, 'import os\n'), ((3176, 3187), 'time.time', 'time.time', ([], {}), '()\n', (3185, 3187), False, 'import time\n'), ((3199, 3225), 'fairseq.utils.move_to_cuda', 'utils.move_to_cuda', (['sample'], {}), '(sample)\n', (3217, 3225), False, 'from fairseq import utils\n'), ((2270, 2342), 'os.path.join', 'os.path.join', (['fairseq_dir', '"""data-bin/iwslt14_alignments.tokenized.de-en"""'], {}), "(fairseq_dir, 'data-bin/iwslt14_alignments.tokenized.de-en')\n", (2282, 2342), False, 'import os\n'), ((3611, 3780), 'rationalization.rationalize_conditional_model', 'rationalize_conditional_model', (['model', "sample['net_input']['src_tokens'][0]", "sample['target'][0]"], {'verbose': 'args.verbose', 'max_steps': 'args.max_steps', 'top_1': 'args.top_1'}), "(model, sample['net_input']['src_tokens'][0],\n sample['target'][0], verbose=args.verbose, max_steps=args.max_steps,\n top_1=args.top_1)\n", (3640, 3780), False, 'from rationalization import baseline_rationalize_conditional_model, rationalize_conditional_model\n'), ((3898, 4091), 'rationalization.baseline_rationalize_conditional_model', 'baseline_rationalize_conditional_model', (['model', "sample['net_input']['src_tokens'][0]", "sample['target'][0]", 'args.method'], {'verbose': 'args.verbose', 'max_steps': 'args.max_steps', 'top_1': 'args.top_1'}), "(model, sample['net_input'][\n 'src_tokens'][0], sample['target'][0], args.method, verbose=args.\n verbose, max_steps=args.max_steps, top_1=args.top_1)\n", (3936, 4091), False, 'from rationalization import baseline_rationalize_conditional_model, rationalize_conditional_model\n'), ((4420, 4447), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (4434, 4447), False, 'import os\n'), ((4453, 4477), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (4464, 4477), False, 'import os\n'), ((4657, 4713), 'json.dump', 'json.dump', (['rationalization_log', 'outfile'], {'default': 'convert'}), '(rationalization_log, outfile, default=convert)\n', (4666, 4713), False, 'import json\n'), ((4780, 4791), 'time.time', 'time.time', ([], {}), '()\n', (4789, 4791), False, 'import time\n'), ((3398, 3454), 'torch.tensor', 'torch.tensor', (['[[model.task.target_dictionary.eos_index]]'], {}), '([[model.task.target_dictionary.eos_index]])\n', (3410, 3454), False, 'import torch\n'), ((4807, 4818), 'time.time', 'time.time', ([], {}), '()\n', (4816, 4818), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
from __future__ import absolute_import, unicode_literals, division
from flask_wtf import FlaskForm
from wtforms import TextField, PasswordField, SubmitField, HiddenField
from wtforms.fields.html5 import EmailField
from oclubs.objs import User
class LoginForm(FlaskForm):
username = TextField(
'Username',
)
password = PasswordField(
'Password',
)
password_2 = PasswordField(
'Password',
)
email = EmailField(
'Email'
)
submit = SubmitField(
'Login',
)
is_initalized = HiddenField(
default='false'
)
is_firstPass = HiddenField(
default='true'
)
nexturl = HiddenField()
forgotpassword = SubmitField(
'Forgot password'
)
def check(self):
# username is always checked for validity
if self.username.data is None or self.username.data == '':
self.errors[self.username] = 'Please enter a username.'
return False
if User.get_userobj_from_loginname(self.username.data) is None:
self.errors[self.username] = 'Username doesn\'t exist.'
return False
# first pass
if self.is_firstPass.data == 'true':
pass
# second pass
else:
if (not self.forgotpassword.data and
(self.password.data is None or self.password.data == '')):
self.errors[self.password] = 'Please enter a password lol.'
return False
# initalization checking
if self.is_initalized.data == 'false':
if self.password.data != self.password_2.data:
self.errors[self.password_2] = 'Passwords do not match.'
return False
if len(self.password.data) < 6:
self.errors[self.password] = 'Password is too short.'
return False
if self.email.data is None or self.email.data == '':
self.errors[self.email] = 'Please enter an email.'
return False
else:
pass
return True
| [
"wtforms.PasswordField",
"wtforms.SubmitField",
"wtforms.fields.html5.EmailField",
"wtforms.HiddenField",
"oclubs.objs.User.get_userobj_from_loginname",
"wtforms.TextField"
] | [((339, 360), 'wtforms.TextField', 'TextField', (['"""Username"""'], {}), "('Username')\n", (348, 360), False, 'from wtforms import TextField, PasswordField, SubmitField, HiddenField\n'), ((392, 417), 'wtforms.PasswordField', 'PasswordField', (['"""Password"""'], {}), "('Password')\n", (405, 417), False, 'from wtforms import TextField, PasswordField, SubmitField, HiddenField\n'), ((451, 476), 'wtforms.PasswordField', 'PasswordField', (['"""Password"""'], {}), "('Password')\n", (464, 476), False, 'from wtforms import TextField, PasswordField, SubmitField, HiddenField\n'), ((505, 524), 'wtforms.fields.html5.EmailField', 'EmailField', (['"""Email"""'], {}), "('Email')\n", (515, 524), False, 'from wtforms.fields.html5 import EmailField\n'), ((553, 573), 'wtforms.SubmitField', 'SubmitField', (['"""Login"""'], {}), "('Login')\n", (564, 573), False, 'from wtforms import TextField, PasswordField, SubmitField, HiddenField\n'), ((610, 638), 'wtforms.HiddenField', 'HiddenField', ([], {'default': '"""false"""'}), "(default='false')\n", (621, 638), False, 'from wtforms import TextField, PasswordField, SubmitField, HiddenField\n'), ((672, 699), 'wtforms.HiddenField', 'HiddenField', ([], {'default': '"""true"""'}), "(default='true')\n", (683, 699), False, 'from wtforms import TextField, PasswordField, SubmitField, HiddenField\n'), ((729, 742), 'wtforms.HiddenField', 'HiddenField', ([], {}), '()\n', (740, 742), False, 'from wtforms import TextField, PasswordField, SubmitField, HiddenField\n'), ((765, 795), 'wtforms.SubmitField', 'SubmitField', (['"""Forgot password"""'], {}), "('Forgot password')\n", (776, 795), False, 'from wtforms import TextField, PasswordField, SubmitField, HiddenField\n'), ((1054, 1105), 'oclubs.objs.User.get_userobj_from_loginname', 'User.get_userobj_from_loginname', (['self.username.data'], {}), '(self.username.data)\n', (1085, 1105), False, 'from oclubs.objs import User\n')] |
import numpy as np
import pandas as pd
def pretty_print(name, to_print):
print(f'{name}:')
print(f'{to_print}\n')
#create data series using pandas
orders = pd.Series(data=[300.50, 60, 123.40, 60, np.nan],
index=['Customer 1', 'Customer 2', 'Customer 3', 'Customer 4', 'Customer 5'])
pretty_print("initial dataset",orders)
#convert to string
pretty_print('to_string', orders.to_string())
#get first 2 rows
pretty_print('first_two_rows',orders.head(2))
#describe index
pretty_print('order_index',orders.index)
#describe data type
pretty_print('order_datatype',orders.dtype)
#describe shape
pretty_print('order_shape',orders.shape)
#summarize series
pretty_print('orders_with_desc',orders.describe())
#sort values
pretty_print('orders_sorted',orders.sort_values())
#count data categories
pretty_print('orders_count', orders.value_counts())
#check for null value
pretty_print('orders_null_data',orders.isnull())
| [
"pandas.Series"
] | [((166, 294), 'pandas.Series', 'pd.Series', ([], {'data': '[300.5, 60, 123.4, 60, np.nan]', 'index': "['Customer 1', 'Customer 2', 'Customer 3', 'Customer 4', 'Customer 5']"}), "(data=[300.5, 60, 123.4, 60, np.nan], index=['Customer 1',\n 'Customer 2', 'Customer 3', 'Customer 4', 'Customer 5'])\n", (175, 294), True, 'import pandas as pd\n')] |
# Generated by Django 2.2.4 on 2019-08-15 07:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('article', '0009_group'),
]
operations = [
migrations.CreateModel(
name='GroupMembers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='article.Group', verbose_name='my group')),
('members', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='member')),
],
),
]
| [
"django.db.migrations.swappable_dependency",
"django.db.models.AutoField",
"django.db.models.ForeignKey"
] | [((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((456, 549), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (472, 549), False, 'from django.db import migrations, models\n'), ((574, 686), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""article.Group"""', 'verbose_name': '"""my group"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'article.Group', verbose_name='my group')\n", (591, 686), False, 'from django.db import migrations, models\n'), ((712, 831), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""member"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL, verbose_name='member')\n", (729, 831), False, 'from django.db import migrations, models\n')] |
"""Телеграм бот."""
import aiogram
from aiogram.utils.exceptions import BotBlocked
import afisha
import bot
import currencies
import horoscope
import news
import recipes
import reminder
import stuff
import weather
import shopping_lists
import locale
import os
import gettext
gettext.install("telbot", os.path.dirname(__file__))
async def get_help(message: aiogram.types.Message):
"""
Показать помощь.
:param message: сообщение
"""
keyboard = aiogram.types.InlineKeyboardMarkup()
keyboard.add(aiogram.types.InlineKeyboardButton(
text=_("Проект"), url="https://github.com/Disfavour/python-project"))
await message.reply(
_("Привет! Я бот-помощник, совмещаю в себе много полезных функций. ") +
_("Чтобы начать, введите /start. ") +
_("Документацию можно найти здесь:"), reply_markup=keyboard)
async def cmd_start(message: aiogram.types.Message):
"""
Предоставить выбор функции.
:param message: сообщение
"""
keyboard = aiogram.types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
buttons = stuff.base_options
keyboard.add(*buttons)
await message.reply(_("Выберите функцию"), reply_markup=keyboard)
async def cmd_dice(message: aiogram.types.Message):
"""
Показать бросание кубика.
:param message: сообщение
"""
await message.reply_dice(emoji="🎲")
async def echo(message: aiogram.types.Message):
"""
Показать, что команда не распознана.
:param message: сообщение
"""
await message.reply(_("Не распознано '") + message.text + "'")
async def error_bot_blocked(update: aiogram.types.Update, exception: BotBlocked):
"""
Обработать блокирование бота.
:param update: входящее обновление
:param exception: исключение
"""
# Здесь можно как-то обработать блокировку, например, удалить пользователя из БД
print(f"Меня заблокировал пользователь!\nСообщение: {update}\nОшибка: {exception}")
return True
class REGISTRATION:
"""
Класс регистрации и начала работы.
:param dp: диспетчер
"""
def __init__(self, dp):
"""Инициализировать диспетчер."""
self.dp = dp
def start(self) -> None:
"""Зарегестрировать обработчики и начать работу."""
self.register_handlers()
aiogram.executor.start_polling(self.dp, skip_updates=True)
def register_handlers(self) -> None:
"""Зарегестрировать обработчики."""
horoscope.register_handlers(self.dp)
news.register_handlers(self.dp)
weather.register_handlers(self.dp)
afisha.register_handlers(self.dp)
recipes.register_handlers(self.dp)
reminder.register_handlers(self.dp)
currencies.register_handlers(self.dp)
shopping_lists.register_handlers(self.dp)
# Это последнее, иначе эхо-обработчик перебьёт другие.
self.register_base_handlers()
def register_base_handlers(self) -> None:
"""Зарегестрировать базовые обработчики."""
self.dp.register_message_handler(get_help, commands="help")
self.dp.register_message_handler(cmd_start, commands="start")
self.dp.register_message_handler(cmd_dice, commands="dice")
self.dp.register_message_handler(echo)
self.dp.register_errors_handler(error_bot_blocked, exception=BotBlocked)
if __name__ == "__main__":
obj = REGISTRATION(bot.dp)
obj.start()
| [
"reminder.register_handlers",
"recipes.register_handlers",
"aiogram.executor.start_polling",
"aiogram.types.ReplyKeyboardMarkup",
"os.path.dirname",
"shopping_lists.register_handlers",
"currencies.register_handlers",
"news.register_handlers",
"horoscope.register_handlers",
"aiogram.types.InlineKeyboardMarkup",
"afisha.register_handlers",
"weather.register_handlers"
] | [((304, 329), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (319, 329), False, 'import os\n'), ((468, 504), 'aiogram.types.InlineKeyboardMarkup', 'aiogram.types.InlineKeyboardMarkup', ([], {}), '()\n', (502, 504), False, 'import aiogram\n'), ((1005, 1084), 'aiogram.types.ReplyKeyboardMarkup', 'aiogram.types.ReplyKeyboardMarkup', ([], {'resize_keyboard': '(True)', 'one_time_keyboard': '(True)'}), '(resize_keyboard=True, one_time_keyboard=True)\n', (1038, 1084), False, 'import aiogram\n'), ((2313, 2371), 'aiogram.executor.start_polling', 'aiogram.executor.start_polling', (['self.dp'], {'skip_updates': '(True)'}), '(self.dp, skip_updates=True)\n', (2343, 2371), False, 'import aiogram\n'), ((2466, 2502), 'horoscope.register_handlers', 'horoscope.register_handlers', (['self.dp'], {}), '(self.dp)\n', (2493, 2502), False, 'import horoscope\n'), ((2511, 2542), 'news.register_handlers', 'news.register_handlers', (['self.dp'], {}), '(self.dp)\n', (2533, 2542), False, 'import news\n'), ((2551, 2585), 'weather.register_handlers', 'weather.register_handlers', (['self.dp'], {}), '(self.dp)\n', (2576, 2585), False, 'import weather\n'), ((2594, 2627), 'afisha.register_handlers', 'afisha.register_handlers', (['self.dp'], {}), '(self.dp)\n', (2618, 2627), False, 'import afisha\n'), ((2636, 2670), 'recipes.register_handlers', 'recipes.register_handlers', (['self.dp'], {}), '(self.dp)\n', (2661, 2670), False, 'import recipes\n'), ((2679, 2714), 'reminder.register_handlers', 'reminder.register_handlers', (['self.dp'], {}), '(self.dp)\n', (2705, 2714), False, 'import reminder\n'), ((2723, 2760), 'currencies.register_handlers', 'currencies.register_handlers', (['self.dp'], {}), '(self.dp)\n', (2751, 2760), False, 'import currencies\n'), ((2769, 2810), 'shopping_lists.register_handlers', 'shopping_lists.register_handlers', (['self.dp'], {}), '(self.dp)\n', (2801, 2810), False, 'import shopping_lists\n')] |
# -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
import math
from builtins import zip
import six
try:
from scipy.stats.norm import logsf
except ImportError:
def norm_cdf(z):
""" Cumulative distribution for N(0, 1)
:param z:
"""
t = 1 / (1 + 0.2316419 * z)
# noinspection PyPep8
return (1 - 0.3989423 * math.exp(-z * z / 2) *
((((
1.330274429 * t - 1.821255978) * t + 1.781477937) * t - 0.356563782) * t + 0.319381530) * t)
def logsf(z):
""" Logarithm of the survival function for N(0, 1)
:param z:
:param z:
"""
try:
return math.log(1 - norm_cdf(z))
except ValueError:
return float('-inf')
norm_logsf = logsf
# Alignment costs: -100*log(p(x:y)/p(1:1))
bead_costs = {
(1, 1): 0,
(2, 1): 230,
(1, 2): 230,
(0, 1): 450,
(1, 0): 450,
(2, 2): 440
}
# Length cost parameters
mean_xy = 1
variance_xy = 6.8
LOG2 = math.log(2)
def length_cost(sx, sy):
""" -100*log[p(|N(0, 1)|>delta)]
:param sx:
:param sy:
:param sx:
:param sy:
"""
lx, ly = sum(sx), sum(sy)
m = (lx + ly * mean_xy) / 2
try:
delta = (lx - ly * mean_xy) / math.sqrt(m * variance_xy)
except ZeroDivisionError:
return float('-inf')
return -100 * (LOG2 + norm_logsf(abs(delta)))
def _align(x, y):
m = {}
for i in range(len(x) + 1):
for j in range(len(y) + 1):
if i == j == 0:
m[0, 0] = (0, 0, 0)
else:
m[i, j] = min((m[i - di, j - dj][0] +
length_cost(x[i - di:i], y[j - dj:j]) +
bead_cost,
di, dj)
for (di, dj), bead_cost in
six.iteritems(bead_costs)
if i - di >= 0 and j - dj >= 0)
i, j = len(x), len(y)
while True:
(c, di, dj) = m[i, j]
if di == dj == 0:
break
yield (i - di, i), (j - dj, j)
i -= di
j -= dj
def char_length(sentence):
""" Length of a sentence in characters
:param sentence:
:param sentence:
"""
return sum(1 for c in sentence if c != ' ')
def align(sx, sy):
""" Align two groups of sentences
:param sx:
:param sy:
:param sx:
:param sy:
"""
cx = map(char_length, sx)
cy = map(char_length, sy)
# noinspection PyTypeChecker
for (i1, i2), (j1, j2) in reversed(list(_align(cx, cy))):
yield ' '.join(sx[i1:i2]), ' '.join(sy[j1:j2])
def read_blocks(f):
"""
:param f:
:return:
"""
block = []
for l in f:
if not l.strip():
yield block
block = []
else:
block.append(l.strip())
if block:
yield block
def main(corpus_x, corpus_y):
"""
:param corpus_x:
:param corpus_y:
:return:
"""
with open(corpus_x) as fx, open(corpus_y) as fy:
for block_x, block_y in zip(read_blocks(fx), read_blocks(fy)):
for (sentence_x, sentence_y) in align(block_x, block_y):
print('%s ||| %s' % (sentence_x, sentence_y))
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
sys.stderr.write('Usage: %s corpus.x corpus.y\n' % sys.argv[0])
sys.exit(1)
main(*sys.argv[1:])
| [
"math.sqrt",
"math.log",
"sys.stderr.write",
"sys.exit",
"math.exp",
"six.iteritems"
] | [((1071, 1082), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (1079, 1082), False, 'import math\n'), ((3423, 3486), 'sys.stderr.write', 'sys.stderr.write', (["('Usage: %s corpus.x corpus.y\\n' % sys.argv[0])"], {}), "('Usage: %s corpus.x corpus.y\\n' % sys.argv[0])\n", (3439, 3486), False, 'import sys\n'), ((3495, 3506), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3503, 3506), False, 'import sys\n'), ((1324, 1350), 'math.sqrt', 'math.sqrt', (['(m * variance_xy)'], {}), '(m * variance_xy)\n', (1333, 1350), False, 'import math\n'), ((423, 443), 'math.exp', 'math.exp', (['(-z * z / 2)'], {}), '(-z * z / 2)\n', (431, 443), False, 'import math\n'), ((1934, 1959), 'six.iteritems', 'six.iteritems', (['bead_costs'], {}), '(bead_costs)\n', (1947, 1959), False, 'import six\n')] |
"""Plot to test HTML tooltip plugin
As a data explorer, I want to add rich information to each point in a
scatter plot, as details-on-demand"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np, pandas as pd
from mpld3 import plugins
css = """
table
{
border-collapse: collapse;
}
th
{
color: #ffffff;
background-color: #000000;
}
td
{
background-color: #cccccc;
}
table, th, td
{
font-family:Arial, Helvetica, sans-serif;
border: 1px solid black;
text-align: right;
}
"""
def main():
fig, ax = plt.subplots()
N = 50
df = pd.DataFrame(index=range(N))
df['x'] = np.random.randn(N)
df['y'] = np.random.randn(N)
df['z'] = np.random.randn(N)
labels = []
for i in range(N):
label = df.ix[[i], :].T
label.columns = ['Row {0}'.format(i)]
labels.append(str(label.to_html())) # .to_html() is unicode, so make leading 'u' go away with str()
points = ax.plot(df.x, df.y, 'o', color='k', mec='w', ms=15, mew=1, alpha=.9)
ax.set_xlabel('x')
ax.set_ylabel('y')
tooltip = plugins.PointHTMLTooltip(
points[0], labels, voffset=10, hoffset=10, css=css)
plugins.connect(fig, tooltip)
return fig
if __name__ == '__main__':
fig = main()
plt.show()
| [
"matplotlib.use",
"mpld3.plugins.connect",
"mpld3.plugins.PointHTMLTooltip",
"numpy.random.randn",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((164, 185), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (178, 185), False, 'import matplotlib\n'), ((545, 559), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (557, 559), True, 'import matplotlib.pyplot as plt\n'), ((624, 642), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (639, 642), True, 'import numpy as np, pandas as pd\n'), ((657, 675), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (672, 675), True, 'import numpy as np, pandas as pd\n'), ((690, 708), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (705, 708), True, 'import numpy as np, pandas as pd\n'), ((1081, 1157), 'mpld3.plugins.PointHTMLTooltip', 'plugins.PointHTMLTooltip', (['points[0]', 'labels'], {'voffset': '(10)', 'hoffset': '(10)', 'css': 'css'}), '(points[0], labels, voffset=10, hoffset=10, css=css)\n', (1105, 1157), False, 'from mpld3 import plugins\n'), ((1171, 1200), 'mpld3.plugins.connect', 'plugins.connect', (['fig', 'tooltip'], {}), '(fig, tooltip)\n', (1186, 1200), False, 'from mpld3 import plugins\n'), ((1266, 1276), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1274, 1276), True, 'import matplotlib.pyplot as plt\n')] |
import unittest
import cassandranames
from dnstypeconstants import *
# Running this *will destroy* data in Cassandra.
class TestCassandraNames(unittest.TestCase):
def setUp(self):
cassandranames.install_schema(drop_first=True, rf=1)
self.names = cassandranames.CassandraNames()
def test_names(self):
# Verify behavior on an initial, empty set.
data = self.names.lookup("pantheon.example.com")
self.assertEqual(data, {})
data = self.names.lookup("pantheon.example.com", A)
self.assertEqual(data, {})
# Add an "A" record.
self.names.insert("pantheon.example.com", A, "192.168.0.1")
# Verify that the "A" records appears in lookups.
data = self.names.lookup("pantheon.example.com")
self.assertEqual(data, {A: {"192.168.0.1": {"ttl": 900}}})
data = self.names.lookup("pantheon.example.com", A)
self.assertEqual(data, {A: {"192.168.0.1": {"ttl": 900}}})
data = self.names.lookup("pantheon.example.com", MX)
self.assertEqual(data, {})
# Add another "A" record, this time with an explicit TTL.
self.names.insert("pantheon.example.com", A, "192.168.0.2", 60)
# Verify that both "A" records appear in results.
data = self.names.lookup("pantheon.example.com")
a_records = {"192.168.0.1": {"ttl": 900}, "192.168.0.2": {"ttl": 60}}
self.assertEqual(data, {A: a_records})
data = self.names.lookup("pantheon.example.com", A)
self.assertEqual(data, {A: a_records})
data = self.names.lookup("pantheon.example.com", MX)
self.assertEqual(data, {})
# Add an MX record.
self.names.insert("pantheon.example.com", MX, "192.168.0.3", preference=10)
# Verify the MX record.
data = self.names.lookup("pantheon.example.com")
self.assertEqual(data, {A: a_records, MX: {"192.168.0.3": {"preference": 10, "ttl": 900}}})
data = self.names.lookup("pantheon.example.com", MX)
self.assertEqual(data, {MX: {"192.168.0.3": {"preference": 10, "ttl": 900}}})
# Delete the A record for 192.168.0.1.
self.names.remove("pantheon.example.com", A, "192.168.0.1")
# Verify the other "A" record and the "MX" record still exists.
data = self.names.lookup("pantheon.example.com", A)
self.assertEqual(data, {A: {"192.168.0.2": {"ttl": 60}}})
data = self.names.lookup("pantheon.example.com", MX)
self.assertEqual(data, {MX: {"192.168.0.3": {"preference": 10, "ttl": 900}}})
# Delete all "MX" records and verify the deletion.
self.names.remove("pantheon.example.com", MX)
data = self.names.lookup("pantheon.example.com", MX)
self.assertEqual(data, {})
data = self.names.lookup("pantheon.example.com", A)
self.assertEqual(data, {A: {"192.168.0.2": {"ttl": 60}}})
# Delete all records for the domain and verify deletion.
self.names.remove("pantheon.example.com")
data = self.names.lookup("pantheon.example.com")
self.assertEqual(data, {})
# Insert some other records, just for fun.
self.names.insert("pantheon.example.com", A, "10.0.0.1", 60)
self.names.insert("pantheon.example.com", A, "10.0.0.2", 60)
self.names.insert("pantheon.example.com", MX, "10.0.0.3", 60, 10)
self.names.insert("pantheon.example.com", MX, "10.0.0.4", 60, 20)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"cassandranames.CassandraNames",
"cassandranames.install_schema"
] | [((3480, 3495), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3493, 3495), False, 'import unittest\n'), ((195, 247), 'cassandranames.install_schema', 'cassandranames.install_schema', ([], {'drop_first': '(True)', 'rf': '(1)'}), '(drop_first=True, rf=1)\n', (224, 247), False, 'import cassandranames\n'), ((269, 300), 'cassandranames.CassandraNames', 'cassandranames.CassandraNames', ([], {}), '()\n', (298, 300), False, 'import cassandranames\n')] |
from inspect import isfunction
from django.db.models.base import ModelBase
from importlib import import_module
from django.utils.module_loading import module_has_submodule
from flash.base import ModelCacheManagerMeta
from flash import settings as flash_settings
import flash.signal_receivers
# Import things here to export
from flash.base import (
ModelCacheManager, InstanceCache, RelatedInstanceCache,
QuerysetCache, QuerysetExistsCache, RelatedQuerysetCache,
DontCache, BatchCacheQuery, InvalidationType)
def load_caches():
import_module('.caches', 'flash')
FLASH_APPS = flash_settings.FLASH_APPS
if isfunction(FLASH_APPS):
FLASH_APPS = FLASH_APPS()
for app_name in FLASH_APPS:
app_module = import_module(app_name)
try:
module = import_module('.caches', app_name)
except ImportError:
if module_has_submodule(app_module, 'caches'):
print ('Import error in %s/caches.py:' % app_name)
raise
import flash.contenttypes_caches
#load_caches()
def get_cache_manager(self):
return ModelCacheManagerMeta.get_model_cache_manager(self)
ModelBase.cache = property(get_cache_manager)
# Register signals for fields_diff
import flash.fields_diff
default_app_config = 'flash.apps.FlashConfig'
| [
"flash.base.ModelCacheManagerMeta.get_model_cache_manager",
"inspect.isfunction",
"django.utils.module_loading.module_has_submodule",
"importlib.import_module"
] | [((560, 593), 'importlib.import_module', 'import_module', (['""".caches"""', '"""flash"""'], {}), "('.caches', 'flash')\n", (573, 593), False, 'from importlib import import_module\n'), ((645, 667), 'inspect.isfunction', 'isfunction', (['FLASH_APPS'], {}), '(FLASH_APPS)\n', (655, 667), False, 'from inspect import isfunction\n'), ((1120, 1171), 'flash.base.ModelCacheManagerMeta.get_model_cache_manager', 'ModelCacheManagerMeta.get_model_cache_manager', (['self'], {}), '(self)\n', (1165, 1171), False, 'from flash.base import ModelCacheManagerMeta\n'), ((757, 780), 'importlib.import_module', 'import_module', (['app_name'], {}), '(app_name)\n', (770, 780), False, 'from importlib import import_module\n'), ((815, 849), 'importlib.import_module', 'import_module', (['""".caches"""', 'app_name'], {}), "('.caches', app_name)\n", (828, 849), False, 'from importlib import import_module\n'), ((893, 935), 'django.utils.module_loading.module_has_submodule', 'module_has_submodule', (['app_module', '"""caches"""'], {}), "(app_module, 'caches')\n", (913, 935), False, 'from django.utils.module_loading import module_has_submodule\n')] |
from app.device.imu import IMU
from app.device.altimeter import Altimeter
from app.device.brakes import Brakes
from app.device.gps import GPS
from app.device.parachute import Parachute
from app.device.radio import Radio
class DeviceFactory(object):
def __init__(self):
self.imu = IMU()
self.altimeter = Altimeter()
self.brakes = Brakes()
self.gps = GPS()
self.radio = Radio()
def sleep_all(self):
self.imu.sleep()
self.radio.sleep()
def wake_all(self):
self.imu.wake()
self.radio.wake()
| [
"app.device.radio.Radio",
"app.device.brakes.Brakes",
"app.device.imu.IMU",
"app.device.gps.GPS",
"app.device.altimeter.Altimeter"
] | [((295, 300), 'app.device.imu.IMU', 'IMU', ([], {}), '()\n', (298, 300), False, 'from app.device.imu import IMU\n'), ((326, 337), 'app.device.altimeter.Altimeter', 'Altimeter', ([], {}), '()\n', (335, 337), False, 'from app.device.altimeter import Altimeter\n'), ((360, 368), 'app.device.brakes.Brakes', 'Brakes', ([], {}), '()\n', (366, 368), False, 'from app.device.brakes import Brakes\n'), ((389, 394), 'app.device.gps.GPS', 'GPS', ([], {}), '()\n', (392, 394), False, 'from app.device.gps import GPS\n'), ((416, 423), 'app.device.radio.Radio', 'Radio', ([], {}), '()\n', (421, 423), False, 'from app.device.radio import Radio\n')] |
import os
import threading
from random import randint
from time import sleep
from django.core.management import call_command
from django_grpc_testtools.executor import TestGRPCServer
from tests.helpers import call_hello_method
def start_server(**params):
"""
Starts gRPC server in a separate thread using "grpcserver" management command with given parameters
:return: connection string
"""
def _grpc_server_async(options):
call_command("grpcserver", **options)
port = 50000 + randint(0, 10000)
params["port"] = port
# Start grpc server
srv = threading.Thread(
target=_grpc_server_async, args=[params]
)
srv.start()
sleep(5)
return "localhost:%s" % port
def test_management_command(grpc_server):
"""
Start gRPC server using management command and make sure it works
"""
assert call_hello_method(grpc_server, 'Django GRPC') == 'Hello, Django GRPC!'
def test_management_command_with_autoreload():
manage_py = os.path.join(os.path.dirname(os.path.abspath(__file__)), "manage.py")
server = TestGRPCServer(manage_py, {'--autoreload': ''})
server.start()
assert call_hello_method(server.addr(), 'Autoreload') == 'Hello, Autoreload!'
server.stop()
| [
"tests.helpers.call_hello_method",
"django.core.management.call_command",
"time.sleep",
"os.path.abspath",
"threading.Thread",
"random.randint",
"django_grpc_testtools.executor.TestGRPCServer"
] | [((592, 650), 'threading.Thread', 'threading.Thread', ([], {'target': '_grpc_server_async', 'args': '[params]'}), '(target=_grpc_server_async, args=[params])\n', (608, 650), False, 'import threading\n'), ((685, 693), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (690, 693), False, 'from time import sleep\n'), ((1089, 1136), 'django_grpc_testtools.executor.TestGRPCServer', 'TestGRPCServer', (['manage_py', "{'--autoreload': ''}"], {}), "(manage_py, {'--autoreload': ''})\n", (1103, 1136), False, 'from django_grpc_testtools.executor import TestGRPCServer\n'), ((456, 493), 'django.core.management.call_command', 'call_command', (['"""grpcserver"""'], {}), "('grpcserver', **options)\n", (468, 493), False, 'from django.core.management import call_command\n'), ((514, 531), 'random.randint', 'randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (521, 531), False, 'from random import randint\n'), ((870, 915), 'tests.helpers.call_hello_method', 'call_hello_method', (['grpc_server', '"""Django GRPC"""'], {}), "(grpc_server, 'Django GRPC')\n", (887, 915), False, 'from tests.helpers import call_hello_method\n'), ((1035, 1060), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1050, 1060), False, 'import os\n')] |
from collections import defaultdict
import numpy as np
import boto3
import os
import os.path
from datetime import datetime
import locale
import argparse
import json
import torch
import torchvision.utils
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from coinrun import CoinrunDataset
from models import vae_models
# hyperparameters
parser = argparse.ArgumentParser(description='Coinrun VAE training.')
parser.add_argument('--local', action='store_true', default=False, help='')
parser.add_argument('--seed', type=int, default=1234, help='')
parser.add_argument('--expname', type=str, default=None, help='', required=True)
parser.add_argument('--description', type=str, default="", help='')
parser.add_argument('--lr', type=float, default=3e-4, help='')
parser.add_argument('--batch_size', type=int, default=64, help='')
parser.add_argument('--latent_dim', type=int, default=32, help='')
parser.add_argument('--epochs', type=int, default=5, help='')
parser.add_argument('--validate_every', type=int, default=2, help='')
parser.add_argument('--checkpoint_every', type=int, default=2, help='')
parser.add_argument('--save_folder', type=str, default='vae_results', help='')
parser.add_argument('--s3', action='store_true', default=False, help='')
parser.add_argument('--s3_bucket', type=str, default='nathan.experiments', help='')
parser.add_argument('--s3_path', type=str, default='adversarial/coinrun_vae', help='')
args = parser.parse_args()
# create save folder
locale.setlocale(locale.LC_ALL, 'fr_FR.utf8')
today = datetime.now().strftime("%d-%m-%Y")
expname = args.expname.replace(' ', '_') + datetime.now().strftime("_%Hh%M")
save_folder = os.path.join(args.save_folder, today, expname)
if not os.path.exists(save_folder):
os.makedirs(save_folder)
print(f'Saving at {save_folder}')
# os.makedirs(os.path.join(save_folder, 'reconstructions'))
# os.makedirs(os.path.join(save_folder, 'samples'))
os.makedirs(os.path.join(save_folder, 'checkpoints'))
else:
print(f'Save folder {save_folder} already exists. Aborting')
exit(0)
# s3
if args.s3:
s3 = boto3.resource('s3')
bucket = s3.Bucket(args.s3_bucket)
# save args to file
with open(os.path.join(save_folder, 'params.json'), 'w') as f:
json.dump(args.__dict__, f, indent=4)
# set device
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print(f'Using device {device}')
# set seed for reproducibility (https://pytorch.org/docs/stable/notes/randomness.html)
if args.seed:
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.set_deterministic(True)
torch.backends.cudnn.benchmark = False # can reduce performance
# dataset
train_dataset = CoinrunDataset('dataset/data.npz', split='train')
val_dataset = CoinrunDataset('dataset/data.npz', split='test')
num_workers = 0 if args.local else 4
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
drop_last=True, num_workers=num_workers, pin_memory=True)
val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False,
drop_last=True, num_workers=num_workers, pin_memory=True)
n_train, n_train_batches = len(train_dataset), len(train_dataloader)
n_val, n_val_batches = len(val_dataset), len(val_dataloader)
assert(n_train_batches == n_train // args.batch_size)
assert(n_val_batches == n_val // args.batch_size)
print(f'Initialized training dataset with {n_train} samples, {n_train_batches} batches')
print(f'Initialized validation dataset with {n_val} samples, {n_val_batches} batches')
# model
model = vae_models['VanillaVAE'](in_channels=3, latent_dim=args.latent_dim).to(device)
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# tensorboard logging
writer = SummaryWriter(log_dir=save_folder)
# training loop
print(f'** Starting experiment {args.expname} **')
for epoch in range(args.epochs):
print(f'----- Epoch {epoch+1}/{args.epochs} -----')
print(f'> training over {n_train_batches} batches')
model.train()
train_losses = defaultdict(list)
for batch_idx, batch in enumerate(train_dataloader):
print('.', end='', flush=True)
data = batch.to(device)
optimizer.zero_grad()
results = model(data)
train_loss = model.loss_function(*results, M_N = args.batch_size / n_train)
for k, v in train_loss.items():
train_losses[k].append(v.item())
train_loss['loss'].backward()
optimizer.step()
print()
train_loss_data = []
for k, v in train_losses.items():
train_loss_data.append(f'{k}: {round(np.mean(v), 3)} (+- {round(np.std(v), 3)})')
writer.add_scalar(f'Train/{k}', np.mean(v), epoch)
print(', '.join(train_loss_data))
if epoch % args.validate_every == 0 or epoch == args.epochs - 1:
print(f'> validating over {n_val_batches} batches')
with torch.no_grad():
model.eval()
val_losses = defaultdict(list)
for batch_idx, batch in enumerate(val_dataloader):
print('.', end='', flush=True)
data = batch.to(device)
results = model(data)
val_loss = model.loss_function(*results, M_N = args.batch_size / n_val)
for k, v in val_loss.items():
val_losses[k].append(v.item())
print()
val_loss_data = []
for k, v in val_losses.items():
val_loss_data.append(f'{k}: {round(np.mean(v), 3)} (+- {round(np.std(v), 3)})')
writer.add_scalar(f'Val/{k}', np.mean(v), epoch)
print(', '.join(val_loss_data))
# sample images
print('> sampling images')
test_input = next(iter(val_dataloader)).to(device)
recons = model.generate(test_input)
grid1 = torchvision.utils.make_grid(recons.data, normalize=True, nrow=8)
# grid1 = torchvision.utils.make_grid(recons.data, os.path.join(save_folder, f"reconstructions/epoch_{epoch+1}.png"), normalize=True, nrow=8)
samples = model.sample(args.batch_size, device)
grid2 = torchvision.utils.make_grid(samples.cpu().data, normalize=True, nrow=8)
# grid2 = torchvision.utils.make_grid(samples.cpu().data, os.path.join(save_folder, f"samples/epoch_{epoch+1}.png"), normalize=True, nrow=8)
writer.add_image('reconstructions', grid1, epoch)
writer.add_image('samples', grid2, epoch)
# writer.add_graph(model, test_input.data)
del test_input, recons, samples
if epoch % args.checkpoint_every == 0 or epoch == args.epochs - 1:
print('> saving checkpoint')
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, os.path.join(save_folder, f"checkpoints/epoch_{epoch}.checkpoint"))
if args.s3:
for path, subdirs, files in os.walk(save_folder):
directory_name = path.replace(args.save_folder + '/', '')
for file in files:
bucket.upload_file(os.path.join(path, file), os.path.join(args.s3_path, directory_name, file))
writer.close()
print(f'Saved at {save_folder}')
if args.s3:
print(f'and at s3://{args.s3_bucket}/{args.s3_path}/{today}/{expname}')
| [
"torch.cuda.is_available",
"os.walk",
"torch.utils.tensorboard.SummaryWriter",
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"boto3.resource",
"numpy.random.seed",
"coinrun.CoinrunDataset",
"locale.setlocale",
"numpy.std",
"torch.device",
"torch.manual_seed",
"os.makedirs",
"os.path.join",
"torch.set_deterministic",
"datetime.datetime.now",
"collections.defaultdict",
"torch.utils.data.DataLoader",
"torch.no_grad",
"json.dump"
] | [((388, 448), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Coinrun VAE training."""'}), "(description='Coinrun VAE training.')\n", (411, 448), False, 'import argparse\n'), ((1515, 1560), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '"""fr_FR.utf8"""'], {}), "(locale.LC_ALL, 'fr_FR.utf8')\n", (1531, 1560), False, 'import locale\n'), ((1696, 1742), 'os.path.join', 'os.path.join', (['args.save_folder', 'today', 'expname'], {}), '(args.save_folder, today, expname)\n', (1708, 1742), False, 'import os\n'), ((2747, 2796), 'coinrun.CoinrunDataset', 'CoinrunDataset', (['"""dataset/data.npz"""'], {'split': '"""train"""'}), "('dataset/data.npz', split='train')\n", (2761, 2796), False, 'from coinrun import CoinrunDataset\n'), ((2811, 2859), 'coinrun.CoinrunDataset', 'CoinrunDataset', (['"""dataset/data.npz"""'], {'split': '"""test"""'}), "('dataset/data.npz', split='test')\n", (2825, 2859), False, 'from coinrun import CoinrunDataset\n'), ((2917, 3046), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'drop_last': '(True)', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(train_dataset, batch_size=args.batch_size, shuffle=True,\n drop_last=True, num_workers=num_workers, pin_memory=True)\n', (2927, 3046), False, 'from torch.utils.data import DataLoader\n'), ((3064, 3192), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'drop_last': '(True)', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(val_dataset, batch_size=args.batch_size, shuffle=False,\n drop_last=True, num_workers=num_workers, pin_memory=True)\n', (3074, 3192), False, 'from torch.utils.data import DataLoader\n'), ((3808, 3842), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'save_folder'}), '(log_dir=save_folder)\n', (3821, 3842), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1750, 1777), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (1764, 1777), False, 'import os\n'), ((1783, 1807), 'os.makedirs', 'os.makedirs', (['save_folder'], {}), '(save_folder)\n', (1794, 1807), False, 'import os\n'), ((2134, 2154), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (2148, 2154), False, 'import boto3\n'), ((2282, 2319), 'json.dump', 'json.dump', (['args.__dict__', 'f'], {'indent': '(4)'}), '(args.__dict__, f, indent=4)\n', (2291, 2319), False, 'import json\n'), ((2369, 2394), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2392, 2394), False, 'import torch\n'), ((2343, 2365), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2355, 2365), False, 'import torch\n'), ((2400, 2419), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2412, 2419), False, 'import torch\n'), ((2558, 2586), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2575, 2586), False, 'import torch\n'), ((2591, 2616), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2605, 2616), True, 'import numpy as np\n'), ((2621, 2650), 'torch.set_deterministic', 'torch.set_deterministic', (['(True)'], {}), '(True)\n', (2644, 2650), False, 'import torch\n'), ((4095, 4112), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4106, 4112), False, 'from collections import defaultdict\n'), ((1569, 1583), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1581, 1583), False, 'from datetime import datetime\n'), ((1982, 2022), 'os.path.join', 'os.path.join', (['save_folder', '"""checkpoints"""'], {}), "(save_folder, 'checkpoints')\n", (1994, 2022), False, 'import os\n'), ((2225, 2265), 'os.path.join', 'os.path.join', (['save_folder', '"""params.json"""'], {}), "(save_folder, 'params.json')\n", (2237, 2265), False, 'import os\n'), ((1648, 1662), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1660, 1662), False, 'from datetime import datetime\n'), ((4742, 4752), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (4749, 4752), True, 'import numpy as np\n'), ((4942, 4957), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4955, 4957), False, 'import torch\n'), ((5009, 5026), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5020, 5026), False, 'from collections import defaultdict\n'), ((6932, 6998), 'os.path.join', 'os.path.join', (['save_folder', 'f"""checkpoints/epoch_{epoch}.checkpoint"""'], {}), "(save_folder, f'checkpoints/epoch_{epoch}.checkpoint')\n", (6944, 6998), False, 'import os\n'), ((7061, 7081), 'os.walk', 'os.walk', (['save_folder'], {}), '(save_folder)\n', (7068, 7081), False, 'import os\n'), ((5639, 5649), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (5646, 5649), True, 'import numpy as np\n'), ((4657, 4667), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (4664, 4667), True, 'import numpy as np\n'), ((4684, 4693), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (4690, 4693), True, 'import numpy as np\n'), ((7231, 7255), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (7243, 7255), False, 'import os\n'), ((7257, 7305), 'os.path.join', 'os.path.join', (['args.s3_path', 'directory_name', 'file'], {}), '(args.s3_path, directory_name, file)\n', (7269, 7305), False, 'import os\n'), ((5548, 5558), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (5555, 5558), True, 'import numpy as np\n'), ((5575, 5584), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (5581, 5584), True, 'import numpy as np\n')] |
import torch
import torchaudio
class ToMono(torch.nn.Module):
def forward(self, waveform: torch.Tensor) -> torch.Tensor:
return torch.mean(waveform, dim=0, keepdim=True)
class Normalize(torch.nn.Module):
def forward(self, waveform: torch.Tensor) -> torch.Tensor:
return (waveform-waveform.mean()) / waveform.std()
class Pad(torch.nn.Module):
def __init__(self, value: float, size: int):
super(Pad, self).__init__()
self.value = value
self.size = size
def forward(self, waveform: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.pad(waveform, (0, self.size-max(waveform.shape)), "constant", self.value)
audio_transform = torch.nn.Sequential(*[
ToMono(), #converts audio channels to mono
torchaudio.transforms.Resample(orig_freq=441000, new_freq=8000), # downsamples audio signal to 8000 HZ
Normalize(), # normalize audio signal to have mean=0 & std=1
Pad(value=0, size=32000),
]) | [
"torch.mean",
"torchaudio.transforms.Resample"
] | [((142, 183), 'torch.mean', 'torch.mean', (['waveform'], {'dim': '(0)', 'keepdim': '(True)'}), '(waveform, dim=0, keepdim=True)\n', (152, 183), False, 'import torch\n'), ((781, 844), 'torchaudio.transforms.Resample', 'torchaudio.transforms.Resample', ([], {'orig_freq': '(441000)', 'new_freq': '(8000)'}), '(orig_freq=441000, new_freq=8000)\n', (811, 844), False, 'import torchaudio\n')] |
import os
import pytest
from ..utils import generic_chempiler_test
HERE = os.path.abspath(os.path.dirname(__file__))
FOLDER = os.path.join(HERE, 'files')
@pytest.mark.integration
def test_lidocaine():
generic_chempiler_test(
os.path.join(FOLDER, 'lidocaine.xdl'),
os.path.join(FOLDER, 'lidocaine_graph.json')
)
@pytest.mark.integration
def test_dmp():
generic_chempiler_test(
os.path.join(FOLDER, 'DMP.xdl'),
os.path.join(FOLDER, 'DMP_graph.json')
)
# Removed as not easy to convert a graphml graph to SL2
# @pytest.mark.integration
# def test_alkyl_fluor():
# generic_chempiler_test(
# os.path.join(FOLDER, 'AlkylFluor.xdl'),
# os.path.join(FOLDER, 'AlkylFluor_graph.graphml')
# )
@pytest.mark.integration
def test_orgsyn_v83p0184a():
generic_chempiler_test(
os.path.join(FOLDER, 'orgsyn_v83p0184a.xdl'),
os.path.join(FOLDER, 'orgsyn_v83p0184a_graph.json')
)
@pytest.mark.integration
def test_orgsyn_v83p0193():
generic_chempiler_test(
os.path.join(FOLDER, 'orgsyn_v83p0193.xdl'),
os.path.join(FOLDER, 'orgsyn_v83p0193_graph.json')
)
@pytest.mark.integration
def test_orgsyn_v80p0129():
generic_chempiler_test(
os.path.join(FOLDER, 'orgsyn_v80p0129.xdl'),
os.path.join(FOLDER, 'orgsyn_v80p0129_graph.json')
)
@pytest.mark.integration
def test_orgsyn_v88p0152_a():
generic_chempiler_test(
os.path.join(FOLDER, 'orgsyn_v88p0152_a.xdl'),
os.path.join(FOLDER, 'orgsyn_v88p0152_a_graph.json')
)
@pytest.mark.integration
def test_orgsyn_v81p0262():
generic_chempiler_test(
os.path.join(FOLDER, 'orgsyn_v81p0262.xdl'),
os.path.join(FOLDER, 'orgsyn_v81p0262_graph.json')
)
@pytest.mark.integration
def test_orgsyn_v87p0016():
generic_chempiler_test(
os.path.join(FOLDER, 'orgsyn_v87p0016.xdl'),
os.path.join(FOLDER, 'orgsyn_v87p0016_graph.json')
)
@pytest.mark.integration
def test_orgsyn_v90p0251():
generic_chempiler_test(
os.path.join(FOLDER, 'orgsyn_v90p0251.xdl'),
os.path.join(FOLDER, 'orgsyn_v90p0251_graph.json')
)
| [
"os.path.dirname",
"os.path.join"
] | [((128, 155), 'os.path.join', 'os.path.join', (['HERE', '"""files"""'], {}), "(HERE, 'files')\n", (140, 155), False, 'import os\n'), ((92, 117), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (107, 117), False, 'import os\n'), ((240, 277), 'os.path.join', 'os.path.join', (['FOLDER', '"""lidocaine.xdl"""'], {}), "(FOLDER, 'lidocaine.xdl')\n", (252, 277), False, 'import os\n'), ((287, 331), 'os.path.join', 'os.path.join', (['FOLDER', '"""lidocaine_graph.json"""'], {}), "(FOLDER, 'lidocaine_graph.json')\n", (299, 331), False, 'import os\n'), ((416, 447), 'os.path.join', 'os.path.join', (['FOLDER', '"""DMP.xdl"""'], {}), "(FOLDER, 'DMP.xdl')\n", (428, 447), False, 'import os\n'), ((457, 495), 'os.path.join', 'os.path.join', (['FOLDER', '"""DMP_graph.json"""'], {}), "(FOLDER, 'DMP_graph.json')\n", (469, 495), False, 'import os\n'), ((850, 894), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v83p0184a.xdl"""'], {}), "(FOLDER, 'orgsyn_v83p0184a.xdl')\n", (862, 894), False, 'import os\n'), ((904, 955), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v83p0184a_graph.json"""'], {}), "(FOLDER, 'orgsyn_v83p0184a_graph.json')\n", (916, 955), False, 'import os\n'), ((1052, 1095), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v83p0193.xdl"""'], {}), "(FOLDER, 'orgsyn_v83p0193.xdl')\n", (1064, 1095), False, 'import os\n'), ((1105, 1155), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v83p0193_graph.json"""'], {}), "(FOLDER, 'orgsyn_v83p0193_graph.json')\n", (1117, 1155), False, 'import os\n'), ((1252, 1295), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v80p0129.xdl"""'], {}), "(FOLDER, 'orgsyn_v80p0129.xdl')\n", (1264, 1295), False, 'import os\n'), ((1305, 1355), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v80p0129_graph.json"""'], {}), "(FOLDER, 'orgsyn_v80p0129_graph.json')\n", (1317, 1355), False, 'import os\n'), ((1454, 1499), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v88p0152_a.xdl"""'], {}), "(FOLDER, 'orgsyn_v88p0152_a.xdl')\n", (1466, 1499), False, 'import os\n'), ((1509, 1561), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v88p0152_a_graph.json"""'], {}), "(FOLDER, 'orgsyn_v88p0152_a_graph.json')\n", (1521, 1561), False, 'import os\n'), ((1658, 1701), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v81p0262.xdl"""'], {}), "(FOLDER, 'orgsyn_v81p0262.xdl')\n", (1670, 1701), False, 'import os\n'), ((1711, 1761), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v81p0262_graph.json"""'], {}), "(FOLDER, 'orgsyn_v81p0262_graph.json')\n", (1723, 1761), False, 'import os\n'), ((1858, 1901), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v87p0016.xdl"""'], {}), "(FOLDER, 'orgsyn_v87p0016.xdl')\n", (1870, 1901), False, 'import os\n'), ((1911, 1961), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v87p0016_graph.json"""'], {}), "(FOLDER, 'orgsyn_v87p0016_graph.json')\n", (1923, 1961), False, 'import os\n'), ((2058, 2101), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v90p0251.xdl"""'], {}), "(FOLDER, 'orgsyn_v90p0251.xdl')\n", (2070, 2101), False, 'import os\n'), ((2111, 2161), 'os.path.join', 'os.path.join', (['FOLDER', '"""orgsyn_v90p0251_graph.json"""'], {}), "(FOLDER, 'orgsyn_v90p0251_graph.json')\n", (2123, 2161), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Author: jS1ngle
# License: MIT License (http://opensource.org/licenses/MIT)
from SimulationHelperFunctions import *
import numpy as np
# -----Functions---------------------------------------------------------
def evaluate_portfolios(coin_data_struct, number_portfolio, portfolio_samples, logReturns, cov, timeFrame):
# Initialize results matrix
results = np.zeros((3 + len(coin_data_struct), number_portfolio))
for i in range(number_portfolio):
weights = portfolio_samples[i]
# portfolio return
results[0, i] = np.sum(np.array(logReturns.mean().tolist()) * weights) * timeFrame
# portfolio volatility
results[1, i] = np.sqrt(np.dot(weights.T, np.dot(cov, weights))) * np.sqrt(timeFrame)
# Calc Sortino ratio and add weights
tmp = []
for col in range(len(weights)):
tmp.append(calc_sortino(logReturns[coins[col]].tolist()[1:] * timeFrame, 0.0))
results[col + 3, i] = weights[col]
# Total Sortino ratio
results[2, i] = np.sum(np.multiply(tmp, weights)) * np.sqrt(timeFrame)
return results
# -----Input-------------------------------------------------------------
# In time periods (usually days)
timeFrame = 30
numberPortfolio = 20000
coinDataStruct = [['BNB', 'Binance', 'BTC'],
['ADA', 'Binance', 'BTC'],
['ETH', 'Binance', 'BTC'],
['SOL', 'Binance', 'BTC'],
['DOGE', 'Binance', 'BTC'],
['DOT', 'Binance', 'BTC']]
df2 = pd.DataFrame({})
for i in range(len(coinDataStruct)):
coin = coinDataStruct[i][0]
exchange = coinDataStruct[i][1]
tradeCurrency = coinDataStruct[i][2]
# Pass closing price to new dataframe
df2[coin] = get_hist_price_data(coin, tradeCurrency, timeFrame, exchange)['close']
coins = []
for i in range(len(coinDataStruct)):
coins.append(coinDataStruct[i][0])
logReturns = np.log(df2 / df2.shift(1))
cov = logReturns.cov().as_matrix()
# Create portfolios with different coin allocations
portfolioSamples = np.random.dirichlet(np.ones(len(coinDataStruct)), numberPortfolio)
results = evaluate_portfolios(coinDataStruct,
numberPortfolio,
portfolioSamples,
logReturns,
cov,
timeFrame)
# Convert results array to Pandas DataFrame
results_frame = pd.DataFrame(results.T)
resColumn = ['ret', 'stdev', 'Sortino'] + coins
results_frame.columns = results_frame.columns[:0].tolist() + resColumn
# print results_frame
results_frame.to_csv('portfolioOptimization.csv')
# Process efficient frontier data
pf = get_pareto_frontier(results_frame.stdev, results_frame.ret)
x = [x[0] for x in pf]
y = [y[1] for y in pf]
pfFrame = results_frame[(results_frame.stdev.isin(x)) & (results_frame.ret.isin(y))]
pfFrame.set_index('Sortino', inplace=True)
pfFrame.to_csv('paretoFrontier.csv')
| [
"numpy.dot",
"numpy.multiply",
"numpy.sqrt"
] | [((751, 769), 'numpy.sqrt', 'np.sqrt', (['timeFrame'], {}), '(timeFrame)\n', (758, 769), True, 'import numpy as np\n'), ((1102, 1120), 'numpy.sqrt', 'np.sqrt', (['timeFrame'], {}), '(timeFrame)\n', (1109, 1120), True, 'import numpy as np\n'), ((1073, 1098), 'numpy.multiply', 'np.multiply', (['tmp', 'weights'], {}), '(tmp, weights)\n', (1084, 1098), True, 'import numpy as np\n'), ((726, 746), 'numpy.dot', 'np.dot', (['cov', 'weights'], {}), '(cov, weights)\n', (732, 746), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.