repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
nextBillyonair/DPM | [
"840ffaafe15c208b200b74094ffa8fe493b4c975"
] | [
"tests/test_moments.py"
] | [
"import pytest\nfrom dpm.distributions import *\nimport dpm.utils as utils\nimport torch\n\n\ndef test_arcsine():\n model = Arcsine()\n assert model.expectation == 0.5\n assert model.median == 0.5\n assert model.variance == 0.125\n assert model.skewness == 0.\n assert model.kurtosis == -1.5\n\n model = Arcsine(-1, 1)\n assert model.expectation == 0.\n assert model.median == 0.\n assert model.variance == 0.5\n assert model.skewness == 0.\n assert model.kurtosis == -1.5\n\ndef test_bernoulli():\n model = Bernoulli(probs=[0.3])\n assert model.logits.item() + 0.8473 < 1e-2\n assert model.expectation.item() - 0.3 < 1e-2\n assert model.variance.item() - 0.21 < 1e-2\n assert model.skewness.item() - 1.9047619048 < 1e-2\n assert model.kurtosis.item() + -1.2380952381 < 1e-2\n\ndef test_beta():\n model = Beta()\n assert model.expectation == 0.5\n assert model.variance == 0.125\n m = Beta(0.5, 0.5).mode.item()\n assert m == 0. or 1.\n assert Beta(4.5, 3.5).mode.item() - 0.5833333333 < 1e-2\n assert Beta(1.5, 0.5).mode.item() == 1.\n assert Beta(0.5, 1.5).mode.item() == 0.\n # assert Beta(1.00000, 1.00000).mode.item() > 0. and Beta(1.00000, 1.00000).mode.item() < 1.\n\ndef test_cauchy():\n model = Cauchy(loc=1.)\n assert model.median == 1.\n assert model.mode == 1.\n\ndef test_exponential():\n model = Exponential()\n assert model.expectation - 1. < 1e-2\n assert model.mode - 0. < 1e-2\n assert model.variance - 1. < 1e-2\n assert model.median - 0.6931471806 < 1e-2\n assert model.skewness - 2. < 1e-2\n assert model.kurtosis - 6. < 1e-2\n\n model = Exponential(0.5)\n assert model.expectation - 2. < 1e-2\n assert model.mode - 0. < 1e-2\n assert model.variance - 4. < 1e-2\n assert model.median - 1.3862943611 < 1e-2\n assert model.skewness - 2. < 1e-2\n assert model.kurtosis - 6. < 1e-2\n\ndef test_gamma():\n model = Gamma()\n assert model.expectation - 1. < 1e-2\n assert model.variance - 1. < 1e-2\n\n model = Gamma(0.5, 0.75)\n assert model.expectation - 0.6666666667 < 1e-2\n assert model.variance - 0.8888888889 < 1e-2\n\ndef test_gumbel():\n model = Gumbel(loc=1., scale=2.)\n assert model.expectation - (1 + 2 * utils.euler_mascheroni) < 1e-2\n assert model.mode == 1.\n assert model.median - 1.7330258412 < 1e-2\n assert model.variance - 6.5797362674 < 1e-2\n assert model.skewness - 1.14 < 1e-2\n assert model.kurtosis - 2.4 < 1e-2\n\ndef test_hyperbolicsecant():\n model = HyperbolicSecant()\n assert model.expectation == 0.\n assert model.variance == 1.\n assert model.median == 0.\n\ndef test_laplace():\n model = Laplace(loc=1., scale=2.)\n assert model.expectation - 1. < 1e-2\n assert model.variance - 8. < 1e-2\n assert model.stddev - 2.8284271247 < 1e-2\n assert model.median - 1. < 1e-2\n assert model.mode - 1. < 1e-2\n assert model.skewness < 1e-2\n assert model.kurtosis - 3. < 1e-2\n assert model.entropy() - 2.3862943611 < 1e-2\n\ndef test_log_cauchy():\n model = LogCauchy(loc=2.)\n assert model.median - 7.3890560989 < 1e-2\n\ndef test_log_normal():\n model = LogNormal()\n assert model.expectation - 1.6487212707 < 1e-2\n assert model.variance - 4.6707742705 < 1e-2\n assert model.mode - utils.e < 1e-2\n assert model.median - utils.e < 1e-2\n\ndef test_logistic():\n model = Logistic(loc=1., scale=2.)\n assert model.expectation == 1.\n assert model.mode == 1.\n assert model.variance - 13.1594725348 < 1e-2\n assert model.median == 1.\n assert model.skewness == 0.\n assert model.kurtosis == 1.2\n\ndef test_normal():\n model = Normal(0., 3.)\n assert model.variance.item() == 3.\n assert model.expectation.item() == 0.\n model = Normal([0., 0.], [3., 1., 1., 3.])\n assert (model.variance - torch.tensor([[3., 1.], [1., 3.]]) < 1e-2).all()\n assert (model.expectation == torch.tensor([0., 0.])).all()\n\n\ndef test_rayleigh():\n model = Rayleigh(3.)\n assert model.expectation - 3.7599424119 < 1e-2\n assert model.mode - 3. < 1e-2\n assert model.median - 3.5322300675 < 1e-2\n assert model.variance - 3.8628330588 < 1e-2\n assert model.skewness - 1.1186145158 < 1e-2\n assert model.kurtosis - 0.2450893007 < 1e-2\n\n\ndef test_studentt():\n model = StudentT()\n model.expectation\n model.variance\n model.mode\n\ndef test_uniform():\n model = Uniform()\n assert model.expectation - 0.5 < 1e-2\n assert model.variance - 1/12. < 1e-2\n assert model.median - 0.5 < 1e-2\n assert model.skewness == 0.\n assert model.kurtosis + 1.2 < 1e-2\n\ndef test_logitnormal():\n model = LogitNormal()\n assert model.median - torch.sigmoid(torch.tensor(0.)) < 1e-2\n model = LogitNormal(1.)\n assert model.median - torch.sigmoid(torch.tensor(1.)) < 1e-2\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# EOF\n"
] | [
[
"torch.tensor"
]
] |
skyquant2/gs-quant | [
"b7e648fa7912b13ad1fd503b643389e34587aa1e"
] | [
"gs_quant/test/timeseries/test_datetime.py"
] | [
"\"\"\"\nCopyright 2018 Goldman Sachs.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n\"\"\"\n\nimport pytest\nfrom pandas.testing import assert_series_equal\n\nfrom gs_quant.timeseries.datetime import *\n\n\ndef test_align():\n dates1 = [\n date(2019, 1, 1),\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 4),\n date(2019, 1, 5),\n ]\n\n dates2 = [\n date(2019, 1, 2),\n date(2019, 1, 4),\n date(2019, 1, 6),\n ]\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0], index=dates1)\n y = pd.Series([20.0, 40.0, 60.0], index=dates2)\n\n expectedl = pd.Series([2.0, 4.0], index=[date(2019, 1, 2), date(2019, 1, 4)])\n expectedr = pd.Series([20.0, 40.0], index=[date(2019, 1, 2), date(2019, 1, 4)])\n\n result = align(x, y, Interpolate.INTERSECT)\n assert_series_equal(result[0], expectedl, obj=\"Align intersect left\")\n assert_series_equal(result[1], expectedr, obj=\"Align intersect left\")\n\n result = align(y, x, Interpolate.INTERSECT)\n assert_series_equal(result[0], expectedr, obj=\"Align intersect right\")\n assert_series_equal(result[1], expectedl, obj=\"Align intersect right\")\n\n union_dates = [\n date(2019, 1, 1),\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 4),\n date(2019, 1, 5),\n date(2019, 1, 6),\n ]\n\n expected1 = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0, np.nan], index=union_dates)\n expected2 = pd.Series([np.nan, 20.0, np.nan, 40.0, np.nan, 60.0], index=union_dates)\n\n result = align(x, y, Interpolate.NAN)\n assert_series_equal(result[0], expected1, obj=\"Align NaN left\")\n assert_series_equal(result[1], expected2, obj=\"Align NaN left\")\n\n result = align(y, x, Interpolate.NAN)\n assert_series_equal(result[0], expected2, obj=\"Align NaN right\")\n assert_series_equal(result[1], expected1, obj=\"Align NaN right\")\n\n expected1 = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0, 0.0], index=union_dates)\n expected2 = pd.Series([0.0, 20.0, 0.0, 40.0, 0.0, 60.0], index=union_dates)\n\n result = align(x, y, Interpolate.ZERO)\n assert_series_equal(result[0], expected1, obj=\"Align zero left\")\n assert_series_equal(result[1], expected2, obj=\"Align zero left\")\n\n result = align(y, x, Interpolate.ZERO)\n assert_series_equal(result[0], expected2, obj=\"Align zero right\")\n assert_series_equal(result[1], expected1, obj=\"Align zero right\")\n\n expected1 = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0, 5.0], index=union_dates)\n expected2 = pd.Series([20.0, 20.0, 20.0, 40.0, 40.0, 60.0], index=union_dates)\n\n result = align(x, y, Interpolate.STEP)\n assert_series_equal(result[0], expected1, obj=\"Align step left\")\n assert_series_equal(result[1], expected2, obj=\"Align step left\")\n\n result = align(y, x, Interpolate.STEP)\n assert_series_equal(result[0], expected2, obj=\"Align step left\")\n assert_series_equal(result[1], expected1, obj=\"Align step left\")\n\n xp = x.copy()\n yp = y.copy()\n xp.index = pd.to_datetime(xp.index)\n yp.index = pd.to_datetime(yp.index)\n up = pd.to_datetime(union_dates)\n\n expected1 = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0, np.nan], index=up)\n expected2 = pd.Series([np.nan, 20.0, 30.0, 40.0, 50.0, 60.0], index=up)\n\n result = align(xp, yp, Interpolate.TIME)\n assert_series_equal(result[0], expected1, obj=\"Align time left\")\n assert_series_equal(result[1], expected2, obj=\"Align time left\")\n\n result = align(yp, xp, Interpolate.TIME)\n assert_series_equal(result[0], expected2, obj=\"Align time right\")\n assert_series_equal(result[1], expected1, obj=\"Align time right\")\n\n a = pd.Series([0, 100, 110], index=pd.DatetimeIndex(['2019-07-01', '2019-07-08', '2019-07-10']))\n b = pd.Series([20, 60, 70], index=pd.DatetimeIndex(['2019-07-02', '2019-07-10', '2019-07-11']))\n result = align(a, b, Interpolate.TIME)\n\n u_index = a.index.union(b.index)\n assert_series_equal(result[0], pd.Series([0, 100 / 7, 100, 110, np.nan], index=u_index))\n assert_series_equal(result[1], pd.Series([np.nan, 20, 50, 60, 70], index=u_index))\n\n result = align(x, 3)\n assert_series_equal(result[0], x, obj=\"Align scalar left\")\n assert_series_equal(result[1], pd.Series(3, index=dates1), obj=\"Align scalar left\")\n\n result = align(3, x)\n assert_series_equal(result[0], pd.Series(3, index=dates1), obj=\"Align scalar left\")\n assert_series_equal(result[1], x, obj=\"Align scalar right\")\n\n result = align(1, 2)\n assert result[0] == 1\n assert result[1] == 2\n\n with pytest.raises(MqValueError):\n align(x, x, \"None\")\n\n\ndef test_interpolate():\n dates = [\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 5),\n date(2019, 1, 7),\n ]\n\n x = pd.Series([2.0, 3.0, 5.0, 7.0], index=dates)\n\n result = interpolate(x, dates)\n assert_series_equal(result, x, obj=\"Interpolate series by dates\")\n\n result = interpolate(x, x)\n assert_series_equal(result, x, obj=\"Interpolate series by series dates\")\n\n result = interpolate(x)\n assert_series_equal(result, x, obj=\"Interpolate series default\")\n\n select_dates = [\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 7),\n ]\n\n result = interpolate(x, select_dates)\n expected = pd.Series([2.0, 3.0, 7.0], index=select_dates)\n assert_series_equal(result, expected, obj=\"Interpolate subset of dates\")\n\n select_dates = [\n date(2019, 1, 1),\n date(2019, 1, 2),\n date(2019, 1, 4),\n date(2019, 1, 5),\n date(2019, 1, 6),\n date(2019, 1, 7),\n date(2019, 1, 8),\n ]\n\n intersect_dates = [\n date(2019, 1, 2),\n date(2019, 1, 5),\n date(2019, 1, 7),\n ]\n\n result = interpolate(x, select_dates, Interpolate.INTERSECT)\n expected = pd.Series([2.0, 5.0, 7.0], index=intersect_dates)\n assert_series_equal(result, expected, obj=\"Interpolate intersect\")\n\n result = interpolate(x, select_dates, Interpolate.NAN)\n expected = pd.Series([np.nan, 2.0, np.nan, 5.0, np.nan, 7.0, np.nan], index=select_dates)\n assert_series_equal(result, expected, obj=\"Interpolate nan\")\n\n result = interpolate(x, select_dates, Interpolate.ZERO)\n expected = pd.Series([0.0, 2.0, 0.0, 5.0, 0.0, 7.0, 0.0], index=select_dates)\n assert_series_equal(result, expected, obj=\"Interpolate zero\")\n\n result = interpolate(x, select_dates, Interpolate.STEP)\n expected = pd.Series([2.0, 2.0, 2.0, 5.0, 5.0, 7.0, 7.0], index=select_dates)\n assert_series_equal(result, expected, obj=\"Interpolate step dates\")\n\n result = interpolate(x, pd.Series(np.nan, select_dates), Interpolate.STEP)\n expected = pd.Series([2.0, 2.0, 2.0, 5.0, 5.0, 7.0, 7.0], index=select_dates)\n assert_series_equal(result, expected, obj=\"Interpolate step series\")\n\n xnan = pd.Series([np.nan, 3.0, 5.0, 7.0], index=dates)\n\n result = interpolate(xnan, select_dates, Interpolate.STEP)\n expected = pd.Series([np.nan, np.nan, np.nan, 5.0, 5.0, 7.0, 7.0], index=select_dates)\n assert_series_equal(result, expected, obj=\"Interpolate flat nan start\")\n\n x = pd.Series([2.0, 3.0, 5.0, 7.0], index=pd.DatetimeIndex(dates))\n result = interpolate(x, select_dates, Interpolate.STEP)\n expected = pd.Series([2.0, 2.0, 2.0, 5.0, 5.0, 7.0, 7.0], index=pd.DatetimeIndex(select_dates))\n assert_series_equal(result, expected, obj=\"Interpolate step dates to series with timestamps\")\n\n with pytest.raises(MqValueError, match=\"Unknown intersection type: None\"):\n interpolate(x, x, \"None\")\n\n with pytest.raises(MqValueError, match=\"Cannot perform step interpolation on an empty series\"):\n interpolate(pd.Series(), select_dates, Interpolate.STEP)\n\n\ndef test_value():\n dates = [\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 5),\n date(2019, 1, 7),\n ]\n\n x = pd.Series([2.0, 3.0, 5.0, 7.0], index=dates)\n\n result = value(x, date(2019, 1, 3))\n assert result == 3.0\n\n result = value(x, date(2019, 1, 5))\n assert result == 5.0\n\n result = value(x, date(2019, 1, 4))\n assert result == 3.0\n\n result = value(x, date(2019, 1, 4), Interpolate.INTERSECT)\n assert result is None\n\n result = value(x, date(2019, 1, 4), Interpolate.STEP)\n assert result == 3.0\n\n result = value(x, date(2019, 1, 4), Interpolate.ZERO)\n assert result == 0.0\n\n result = value(x, date(2019, 1, 4), Interpolate.NAN)\n assert np.isnan(result)\n\n\ndef test_day():\n dates = [\n date(2019, 1, 1),\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 4),\n ]\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)\n\n result = day(x)\n expected = pd.Series([1, 2, 3, 4], index=dates)\n assert_series_equal(result, expected, obj=\"Day\")\n\n\ndef test_weekday():\n dates = [\n date(2019, 1, 7),\n date(2019, 1, 8),\n date(2019, 1, 9),\n date(2019, 1, 10),\n date(2019, 1, 11),\n date(2019, 1, 12),\n date(2019, 1, 13),\n ]\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], index=dates)\n\n result = weekday(x)\n expected = pd.Series([0, 1, 2, 3, 4, 5, 6], index=dates)\n assert_series_equal(result, expected, obj=\"Weekday\")\n\n\ndef test_month():\n dates = [\n date(2019, 1, 1),\n date(2019, 2, 1),\n date(2019, 3, 1),\n date(2019, 4, 1),\n ]\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)\n\n result = month(x)\n expected = pd.Series([1, 2, 3, 4], index=dates)\n assert_series_equal(result, expected, obj=\"Month\")\n\n\ndef test_year():\n dates = [\n date(2019, 1, 1),\n date(2020, 1, 2),\n date(2021, 1, 3),\n date(2022, 1, 4),\n ]\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)\n\n result = year(x)\n expected = pd.Series([2019, 2020, 2021, 2022], index=dates)\n assert_series_equal(result, expected, obj=\"Year\")\n\n\ndef test_quarter():\n dates = [\n date(2019, 1, 1),\n date(2019, 4, 1),\n date(2019, 7, 1),\n date(2019, 10, 1),\n ]\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)\n\n result = quarter(x)\n expected = pd.Series([1, 2, 3, 4], index=dates)\n assert_series_equal(result, expected, obj=\"Quarter\")\n\n\ndef test_day_count_fractions():\n dates = [\n date(2019, 1, 1),\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 4),\n date(2019, 1, 5),\n date(2019, 1, 6),\n ]\n\n x = pd.Series([])\n assert_series_equal(x, day_count_fractions(x))\n\n x = pd.Series([100.0, 101, 103.02, 100.9596, 100.9596, 102.978792], index=dates)\n\n result = day_count_fractions(x, DayCountConvention.ACTUAL_360)\n result2 = day_count_fractions(x.index, DayCountConvention.ACTUAL_360)\n dcf = 1 / 360\n expected = pd.Series([np.NaN, dcf, dcf, dcf, dcf, dcf], index=dates)\n assert_series_equal(result, expected, obj=\"ACT/360\")\n assert_series_equal(result2, expected, obj=\"ACT/360\")\n\n result = day_count_fractions(x, DayCountConvention.ACTUAL_365F)\n dcf = 1 / 365\n expected = pd.Series([np.NaN, dcf, dcf, dcf, dcf, dcf], index=dates)\n assert_series_equal(result, expected, obj=\"ACT/365\")\n\n\ndef test_date_range():\n dates = [\n date(2019, 1, 1),\n date(2019, 1, 2),\n date(2019, 1, 3),\n date(2019, 1, 4),\n date(2019, 1, 5),\n date(2019, 1, 6),\n ]\n\n values = [1.0, 2.0, 3.0, 4.0, 5.0, 7.0]\n s0 = pd.Series(values, index=dates)\n s1 = pd.Series(values, index=pd.date_range('2019-01-01', periods=6, freq='D'))\n\n for x in [s0, s1]:\n assert (date_range(x, 0, 0) == x).all()\n assert (date_range(x, 0, 0, True) == x.iloc[:-2]).all()\n\n assert date_range(x, 0, date(2019, 1, 3)).index[-1] == date(2019, 1, 3)\n assert (date_range(x, 0, date(2019, 1, 3)) == x.iloc[:3]).all()\n\n assert date_range(x, date(2019, 1, 3), date(2019, 1, 6)).index[0] == date(2019, 1, 3)\n assert date_range(x, date(2019, 1, 3), date(2019, 1, 6)).index[-1] == date(2019, 1, 6)\n assert (date_range(x, date(2019, 1, 3), date(2019, 1, 6)) == x.iloc[2:6]).all()\n\n y = pd.Series(values, index=pd.date_range('2020-10-23', periods=6, freq='D'))\n assert (date_range(y, 1, 1, True) == y.iloc[3:5]).all()\n\n with pytest.raises(MqValueError):\n date_range(pd.Series([1]), 0, 0)\n\n with pytest.raises(MqTypeError):\n date_range(pd.Series([1]), 0, 0, 'string')\n\n\ndef test_prepend():\n x = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0, 7.0], index=pd.date_range('2019-01-01', \"2019-01-06\"))\n y = pd.Series([3.1, 4.1, 5.1], index=pd.date_range('2019-01-03', '2019-01-05'))\n\n assert_series_equal(prepend([]), pd.Series(dtype='float64'), obj='prepend empty')\n\n assert_series_equal(prepend([x]), x, obj='prepend one series')\n\n actual = prepend([x, y])\n expected = pd.Series([1.0, 2.0, 3.1, 4.1, 5.1], index=pd.date_range('2019-01-01', '2019-01-05'))\n assert_series_equal(actual, expected, obj='prepend two series')\n\n x = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0, 7.0], index=pd.date_range('2019-01-01', periods=6, freq='H'))\n y = pd.Series([3.1, 4.1, 5.1], index=pd.date_range('2019-01-01 02:00', periods=3, freq='H'))\n\n actual = prepend([x, y])\n expected = pd.Series([1.0, 2.0, 3.1, 4.1, 5.1], index=pd.date_range('2019-01-01', periods=5, freq='H'))\n assert_series_equal(actual, expected, obj='prepend two real-time series')\n\n\ndef test_union():\n x = pd.Series([3.1, 4.1, np.nan], index=pd.date_range('2019-01-03', '2019-01-05'))\n y = pd.Series([1.0, np.nan, 3.0, 4.0, 5.0, 6.0], index=pd.date_range('2019-01-01', \"2019-01-06\"))\n z = pd.Series([60.0, 70.0], index=pd.date_range('2019-01-06', \"2019-01-07\"))\n\n assert_series_equal(union([]), pd.Series(dtype='float64'), obj='union empty')\n\n x.index.freq = None\n assert_series_equal(union([x]), x, obj='union of one series')\n\n actual = union([x, y, z])\n expected = pd.Series([1.0, np.nan, 3.1, 4.1, 5.0, 6.0, 70], index=pd.date_range('2019-01-01', '2019-01-07'))\n assert_series_equal(actual, expected, obj='union of three series')\n\n x = pd.Series([3.1, 4.1, np.nan], index=pd.date_range('2019-01-01 02:00', periods=3, freq='H'))\n y = pd.Series([1.0, np.nan, 3.0, 4.0, 5.0, 6.0], index=pd.date_range('2019-01-01', periods=6, freq='H'))\n\n actual = union([x, y])\n expected = pd.Series([1.0, np.nan, 3.1, 4.1, 5.0, 6.0], index=pd.date_range('2019-01-01', periods=6, freq='H'))\n assert_series_equal(actual, expected, obj='union of two real-time series')\n\n\ndef test_bucketize():\n dates = pd.bdate_range(start='1/1/2021', end='4/23/2021')\n series = pd.Series(range(len(dates)), index=dates)\n\n actual = bucketize(series, AggregateFunction.MAX, AggregatePeriod.MONTH)\n expected_index = pd.DatetimeIndex([date(2021, 1, 31), date(2021, 2, 28), date(2021, 3, 31), date(2021, 4, 30)])\n expected = pd.Series([20, 40, 63, 80], index=expected_index)\n actual.index.freq = None # Ignore the index freq\n assert_series_equal(actual, expected, check_index_type=False)\n\n\ndef test_day_count():\n assert day_count(datetime.date(2021, 5, 7), datetime.date(2021, 5, 10)) == 1\n assert day_count(datetime.date(2021, 5, 10), datetime.date(2021, 5, 14)) == 4\n assert day_count(datetime.date(2021, 5, 10), datetime.date(2021, 5, 17)) == 5\n\n with pytest.raises(MqValueError):\n day_count(datetime.date(2021, 5, 7), '2021-05-10')\n\n\nif __name__ == \"__main__\":\n pytest.main(args=[\"test_datetime.py\"])\n"
] | [
[
"pandas.testing.assert_series_equal"
]
] |
Fa-Li/E3SM | [
"a91995093ec6fc0dd6e50114f3c70b5fb64de0f0"
] | [
"components/mpas-seaice/testing_and_setup/forcing/create_ocean_forcing.py"
] | [
"from __future__ import print_function\nfrom netCDF4 import Dataset\nimport netCDF4\nimport numpy as np\nimport os\nimport sys\nimport ConfigParser\nimport math\nfrom scipy.interpolate import griddata\nfrom create_forcing import create_scrip_grid_file, get_mpas_grid_info, create_scrip_file_MPAS, write_scrip_in_file, create_output_times, get_remapping_data\n\n#-------------------------------------------------------------------------------\n\ndef latlon_to_xyz(lat, lon):\n\n x = math.cos(lat) * math.cos(lon)\n y = math.cos(lat) * math.sin(lon)\n z = math.sin(lat)\n\n return x, y, z\n\n#-------------------------------------------------------------------------------\n\ndef xyz_to_latlon(x, y, z):\n\n lon = 0.0\n if (x != 0.0 or y != 0.0): lon = math.atan2(y,x)\n\n lat = math.asin(z / math.sqrt(x*x + y*y + z*z))\n\n return lat, lon\n\n#-------------------------------------------------------------------------------\n\ndef create_scrip_file_gx1(filenameScrip, filenameGx1Grid):\n\n filenameGx1Grid = \"/Users/akt/Work/Forcing/gx1/grid_info/global_gx1.nc\"\n fileIn = Dataset(filenameGx1Grid,\"r\")\n\n nx = len(fileIn.dimensions[\"nx\"])\n ny = len(fileIn.dimensions[\"ny\"])\n\n ULONin = fileIn.variables[\"ULON\"][:]\n ULATin = fileIn.variables[\"ULAT\"][:]\n\n KMT = fileIn.variables[\"KMT\"][:]\n\n fileIn.close()\n\n nCells = nx * ny\n gridDims = [nx, ny]\n\n gridImask = np.ones(nCells,dtype=\"i\")\n\n ULAT = np.zeros((ny+1,nx+1))\n ULON = np.zeros((ny+1,nx+1))\n\n ULAT[1:,1:] = ULATin[:,:]\n ULON[1:,1:] = ULONin[:,:]\n\n ULAT[:,0] = ULAT[:,-1]\n ULON[:,0] = ULON[:,-1]\n\n ULON[0,:] = ULON[1,:]\n ULAT[0,:] = ULAT[1,:] - math.pi / 180.0\n\n cornerLat = np.zeros((4,nCells))\n cornerLon = np.zeros((4,nCells))\n\n for i in range(0,nx):\n for j in range(0,ny):\n\n ii = i + 1\n jj = j + 1\n\n iCell = ii + nx * (jj-1) - 1\n\n i1 = ii-1 ; j1 = jj-1\n i2 = ii ; j2 = jj-1\n i3 = ii ; j3 = jj\n i4 = ii-1 ; j4 = jj\n\n cornerLat[0,iCell] = ULAT[j1,i1]\n cornerLat[1,iCell] = ULAT[j2,i2]\n cornerLat[2,iCell] = ULAT[j3,i3]\n cornerLat[3,iCell] = ULAT[j4,i4]\n\n cornerLon[0,iCell] = ULON[j1,i1]\n cornerLon[1,iCell] = ULON[j2,i2]\n cornerLon[2,iCell] = ULON[j3,i3]\n cornerLon[3,iCell] = ULON[j4,i4]\n\n centerLat = np.zeros(nCells)\n centerLon = np.zeros(nCells)\n\n for i in range(0,nx):\n for j in range(0,ny):\n\n ii = i + 1\n jj = j + 1\n\n iCell = ii + nx * (jj-1) - 1\n\n x1,y1,z1 = latlon_to_xyz(cornerLat[0,iCell],cornerLon[0,iCell])\n x2,y2,z2 = latlon_to_xyz(cornerLat[1,iCell],cornerLon[1,iCell])\n x3,y3,z3 = latlon_to_xyz(cornerLat[2,iCell],cornerLon[2,iCell])\n x4,y4,z4 = latlon_to_xyz(cornerLat[3,iCell],cornerLon[3,iCell])\n\n x0 = 0.25 * (x1 + x2 + x3 + x4)\n y0 = 0.25 * (y1 + y2 + y3 + y4)\n z0 = 0.25 * (z1 + z2 + z3 + z4)\n\n centerLat[iCell], centerLon[iCell] = xyz_to_latlon(x0, y0, z0)\n\n create_scrip_grid_file(filenameScrip, nCells, 4, 2, gridDims, centerLat, centerLon, gridImask, cornerLat, cornerLon, \"gx1\")\n\n#-------------------------------------------------------------------------------\n\ndef fill_array(arrayIn):\n\n nTimes = arrayIn.shape[0]\n nx = arrayIn.shape[1]\n ny = arrayIn.shape[2]\n\n arrayOut = np.zeros((nTimes,nx,ny))\n arrayOut[:] = arrayIn[:]\n\n grid_x, grid_y = np.mgrid[0:nx, 0:ny]\n\n for iTime in range(0,nTimes):\n\n array = np.zeros((nx,3*ny))\n\n array[:, 0: ny] = arrayIn[iTime,:,:]\n array[:, ny:2*ny] = arrayIn[iTime,:,:]\n array[:,2*ny:3*ny] = arrayIn[iTime,:,:]\n\n pointsGood = []\n valuesGood = []\n\n pointsBad = []\n\n for i in range(0,nx):\n for j in range(0,ny):\n if (array[i,j] > -900.0):\n pointsGood.append((i,j))\n valuesGood.append(array[i,j])\n else:\n pointsBad.append((i,j))\n\n pointsGood = np.array(pointsGood)\n valuesGood = np.array(valuesGood)\n pointsBad = np.array(pointsBad)\n\n valuesBad = griddata(pointsGood, valuesGood, (grid_x, grid_y), method='nearest')\n\n for iBad in range(0,pointsBad.shape[0]):\n i = pointsBad[iBad,0]\n j = pointsBad[iBad,1]\n arrayOut[iTime,i,j] = valuesBad[i,j]\n\n return arrayOut\n\n#-------------------------------------------------------------------------------\n\ndef interpolate_array(nCells, remapMatrix, arrayIn):\n\n arrayOut = np.zeros((12,nCells))\n\n for iTime in range(0,12):\n\n arrayInTime = arrayIn[iTime,:,:].flatten()\n\n arrayOut[iTime,:] = remapMatrix.dot(arrayInTime)\n\n return arrayOut\n\n#-------------------------------------------------------------------------------\n\ndef create_forcing(\\\n filenameIn, \\\n filenameOut, \\\n nCells, \\\n remapMatrix):\n\n fileIn = Dataset(filenameIn,\"r\")\n\n fileOut = Dataset(filenameOut,\"w\",format=\"NETCDF3_CLASSIC\")\n\n fileOut.createDimension(\"nCells\",nCells)\n fileOut.createDimension(\"StrLen\",64)\n fileOut.createDimension(\"Time\",None)\n\n # time\n xtimes = create_output_times(12, 0)\n varXtime = fileOut.createVariable(\"xtime\",\"c\",dimensions=[\"Time\",\"StrLen\"])\n for iTime in range(0,12):\n varXtime[iTime,0:19] = netCDF4.stringtochar(np.array(xtimes[iTime], 'S19'))\n varXtime[iTime,19:] = \" \"*45\n\n varSST = fileOut.createVariable(\"seaSurfaceTemperature\", \"d\",dimensions=[\"Time\",\"nCells\"])\n varSSS = fileOut.createVariable(\"seaSurfaceSalinity\", \"d\",dimensions=[\"Time\",\"nCells\"])\n varU = fileOut.createVariable(\"uOceanVelocity\", \"d\",dimensions=[\"Time\",\"nCells\"])\n varV = fileOut.createVariable(\"vOceanVelocity\", \"d\",dimensions=[\"Time\",\"nCells\"])\n varDhdx = fileOut.createVariable(\"seaSurfaceTiltU\", \"d\",dimensions=[\"Time\",\"nCells\"])\n varDhdy = fileOut.createVariable(\"seaSurfaceTiltV\", \"d\",dimensions=[\"Time\",\"nCells\"])\n varHblt = fileOut.createVariable(\"oceanMixedLayerDepth\", \"d\",dimensions=[\"Time\",\"nCells\"])\n varQdp = fileOut.createVariable(\"oceanHeatFluxConvergence\",\"d\",dimensions=[\"Time\",\"nCells\"])\n\n print(\"Interpolate seaSurfaceTemperature\")\n arrayIn = fileIn.variables[\"T\"][:]\n arrayIn = fill_array(arrayIn)\n arrayOut = interpolate_array(nCells, remapMatrix, arrayIn)\n varSST[:] = arrayOut[:]\n\n print(\"Interpolate seaSurfaceSalinity\")\n arrayIn = fileIn.variables[\"S\"][:]\n arrayIn = fill_array(arrayIn)\n arrayOut = interpolate_array(nCells, remapMatrix, arrayIn)\n varSSS[:] = arrayOut[:]\n\n print(\"Interpolate uOceanVelocity\")\n arrayIn = fileIn.variables[\"U\"][:,0,:,:]\n arrayIn = fill_array(arrayIn)\n arrayOut = interpolate_array(nCells, remapMatrix, arrayIn)\n varU[:] = arrayOut[:]\n\n print(\"Interpolate vOceanVelocity\")\n arrayIn = fileIn.variables[\"V\"][:,0,:,:]\n arrayIn = fill_array(arrayIn)\n arrayOut = interpolate_array(nCells, remapMatrix, arrayIn)\n varV[:] = arrayOut[:]\n\n print(\"Interpolate seaSurfaceTiltU\")\n arrayIn = fileIn.variables[\"dhdx\"][:]\n arrayIn = fill_array(arrayIn)\n arrayOut = interpolate_array(nCells, remapMatrix, arrayIn)\n varDhdx[:] = arrayOut[:]\n\n print(\"Interpolate seaSurfaceTiltV\")\n arrayIn = fileIn.variables[\"dhdy\"][:]\n arrayIn = fill_array(arrayIn)\n arrayOut = interpolate_array(nCells, remapMatrix, arrayIn)\n varDhdy[:] = arrayOut[:]\n\n print(\"Interpolate oceanMixedLayerDepth\")\n arrayIn = fileIn.variables[\"hblt\"][:]\n arrayIn = fill_array(arrayIn)\n arrayOut = interpolate_array(nCells, remapMatrix, arrayIn)\n varHblt[:] = arrayOut[:]\n\n print(\"Interpolate oceanHeatFluxConvergence\")\n arrayIn = fileIn.variables[\"qdp\"][:]\n arrayIn = fill_array(arrayIn)\n arrayOut = interpolate_array(nCells, remapMatrix, arrayIn)\n varQdp[:] = arrayOut[:]\n\n fileIn.close()\n fileOut.close()\n\n#-------------------------------------------------------------------------------\n\ndef perform_remapping(\\\n filenameMPASGrid, \\\n filenameGx1Grid, \\\n filenameGx1OceanMixed, \\\n filenameMPASOceanMixed, \\\n scripDir):\n\n # create MPAS scrip grid file\n print(\"create_scrip_file_MPAS\")\n scripGridFilename = \"remap_grid_MPAS_tmp.nc\"\n create_scrip_file_MPAS(filenameMPASGrid, scripGridFilename)\n\n # create gx1 scrip grid file\n print(\"create_scrip_file_gx1\")\n scripGx1Filename = \"remap_grid_gx1_tmp.nc\"\n create_scrip_file_gx1(scripGx1Filename, filenameGx1Grid)\n\n # create input scrip file\n print(\"write_scrip_in_file\")\n write_scrip_in_file(\"gx1\")\n\n # run scrip to generate weights\n print(\"SCRIP\")\n cmd = scripDir + \"/scrip\"\n os.system(cmd)\n\n # get remapping weights\n print(\"get_remapping_data\")\n filenameRemapping = \"remap_gx1_to_MPAS_tmp.nc\"\n remapMatrix, dstGridSize = get_remapping_data(filenameRemapping)\n\n print(\"create_forcing ocean climatology\")\n # combined ocean climatology\n create_forcing(\\\n filenameGx1OceanMixed, \\\n filenameMPASOceanMixed, \\\n dstGridSize, \\\n remapMatrix)\n\n#-------------------------------------------------------------------------------\n\n'''\ncreate_ocean_forcing.py\n=======================\n\nUsage\n-----\n\nThis script creates ocean forcing using CESM output.\n\nUsage: python create_ocean_forcing.py configFilename\n\nwhere configFilename is a python config file with the following example format:\n\n[forcing_generation]\nfilenameMPASGrid = /location/of/MPAS/grid\nfilenameGx1Grid = /location/of/gx1/grid\nfilenameGx1OceanMixed = /location/of/gx1/ocean_mixed_file\nfilenameMPASOceanMixed = /location/of/output/ocean_mixed_file\nscripDir = /location/of/SCRIP/executable\n\nSCRIP\n-----\n\nThis script requires the SCRIP package to be installed.\nSCRIP is a software package which computes addresses and weights for remapping\nand interpolating fields between grids in spherical coordinates. It can be\nobtained from https://github.com/SCRIP-Project/SCRIP\n\ngx1 input data\n--------------\n\nThis script requires a gx1 grid file and ocean mixed file as input. These can be\nobtained from:\nhttps://web.lcrc.anl.gov/public/e3sm/mpas_standalonedata/mpas-seaice/forcing/\nMPAS-Seaice_clim_data.tar.gz\n'''\n\nif (len(sys.argv) != 2):\n print(\"Usage: python create_ocean_forcing.py configFilename\")\n sys.exit()\n\nconfig = ConfigParser.ConfigParser()\nconfig.read(sys.argv[1])\n\nfilenameMPASGrid = config.get('forcing_generation','filenameMPASGrid')\nfilenameGx1Grid = config.get('forcing_generation','filenameGx1Grid')\nfilenameGx1OceanMixed = config.get('forcing_generation','filenameGx1OceanMixed')\nfilenameMPASOceanMixed = config.get('forcing_generation','filenameMPASOceanMixed')\nscripDir = config.get('forcing_generation','scripDir')\n\nperform_remapping(\\\n filenameMPASGrid, \\\n filenameGx1Grid, \\\n filenameGx1OceanMixed, \\\n filenameMPASOceanMixed, \\\n scripDir)\n"
] | [
[
"numpy.array",
"numpy.ones",
"scipy.interpolate.griddata",
"numpy.zeros"
]
] |
minkowski0125/multilayer-gcn-simulation | [
"15a4cd29d819246549148e3a32c99f3b8589f3b4"
] | [
"main.py"
] | [
"import json\nfrom utils import *\nfrom config import args\nfrom train import train\nfrom torch.utils.tensorboard import SummaryWriter\n\nif __name__ == '__main__':\n set_seed(args.seed)\n\n series = []\n if args.dataset == 'pubmed':\n graphs, features, adjs, labels = load_pubmed_data({\n 'deg_num': args.deg,\n 'sample_num': 1,\n })\n elif args.dataset == 'random':\n graphs, features, adjs, labels = load_pubmed_data({\n 'deg_num': args.deg,\n 'feat_dim': args.feat_dim,\n 'sample_num': 1,\n })\n\n writer = SummaryWriter(f'./log_pubmed')\n\n hiddens = [50, 100, 200, 500, 1000, 1500, 2000, 3000]\n for hidden in hiddens:\n series.append(train(data = (graphs, features, adjs, labels), deg = args.deg, feat_dim = args.feat_dim, hidden_dim = hidden, layer_num = args.layer_num, o = 0, writer=writer))\n print()\n \n visualize(series, hiddens, 'hidden')\n # print(series)\n"
] | [
[
"torch.utils.tensorboard.SummaryWriter"
]
] |
SatyaSiddharthDash/headlinegen | [
"ec11cb4b4dd4e6dce553c787cf31670a83f1c650"
] | [
"data_preprocessing_scripts/preprocess.py"
] | [
"import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\nrandom_state = 100\n\ndata = pd.read_csv(\"~/headlinegen/data/nytime_front_page.csv\")\ndata['title'] = data['title'].apply(lambda x: ' '.join(x.split(' ')[:-5]))\n\nlens = data[\"content\"].apply(lambda x: len(x.split(\" \"))).nlargest(10)\n\nprint(\n f'max_input_len = {data[\"content\"].apply(lambda x: len(x.split(\" \"))).min()}')\nprint(\n f'max_output_len = {data[\"title\"].apply(lambda x: len(x.split(\" \"))).max()}')\n\nprint(lens)\n\n# train, valid_test = train_test_split(data,\n# test_size=0.2,\n# random_state=random_state,\n# shuffle=True)\n# valid, test = train_test_split(valid_test,\n# test_size=0.5,\n# random_state=random_state,\n# shuffle=True)\n\n# print(train.shape, valid.shape, test.shape)\n\n# for dataset, prefix in zip([train, valid, test], ['train', 'val', 'test']):\n# for columnname, suffix in zip(['content', 'title'], ['source', 'target']):\n# filename = \"/Users/satyasiddharthdash/headlinegen/data/nytimes/\" + prefix + '.' + suffix\n# with open(filename, 'w') as outfile:\n# outfile.write(dataset[columnname].str.cat(sep='\\n'))\n"
] | [
[
"pandas.read_csv"
]
] |
duypham2108/stLearn | [
"91b6bae91b29aba8b4f055bf92da13f1558ddbe8"
] | [
"stlearn/tools/microenv/cci/base_grouping.py"
] | [
"\"\"\" Performs LR analysis by grouping LR pairs which having hotspots across\n similar tissues.\n\"\"\"\n\nfrom stlearn.pl import het_plot\nfrom sklearn.cluster import DBSCAN, AgglomerativeClustering\nfrom anndata import AnnData\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sb\n\n\ndef get_hotspots(\n adata: AnnData,\n lr_scores: np.ndarray,\n lrs: np.array,\n eps: float,\n quantile=0.05,\n verbose=True,\n plot_diagnostics: bool = False,\n show_plot: bool = False,\n):\n \"\"\"Determines the hotspots for the inputted scores by progressively setting more stringent cutoffs & clustering in space, chooses point which maximises number of clusters.\n Parameters\n ----------\n adata: AnnData The data object\n lr_scores: np.ndarray LR_pair*Spots containing the LR scores.\n lrs: np.array The LR_pairs, in-line with the rows of scores.\n eps: float The eps parameter used in DBScan to get the number of clusters.\n quantile: float The quantiles to use for the cutoffs, if 0.05 then will take non-zero quantiles of 0.05, 0.1,..., 1 quantiles to cluster.\n\n Returns\n -------\n lr_hot_scores: np.ndarray, lr_cutoffs: np.array First is the LR scores for just the hotspots, second is the cutoff used to get those LR_scores.\n \"\"\"\n coors = adata.obs[[\"imagerow\", \"imagecol\"]].values\n lr_summary, lr_hot_scores = hotspot_core(\n lr_scores, lrs, coors, eps, quantile, plot_diagnostics, adata\n )\n\n if plot_diagnostics and show_plot: # Showing the diagnostic plotting #\n plt.show()\n\n if verbose:\n print(\"Clustering LRs to help with ranking/interpretation...\")\n # Clustering the LR pairs to obtain a set of clusters so to order within\n # each cluster\n clusterer = AgglomerativeClustering(\n affinity=\"euclidean\", linkage=\"ward\", distance_threshold=10, n_clusters=None\n )\n clusterer.fit(lr_hot_scores > 0)\n dist_cutoff = np.quantile(clusterer.distances_, 0.98)\n clusterer = AgglomerativeClustering(\n affinity=\"euclidean\",\n linkage=\"ward\",\n distance_threshold=dist_cutoff,\n n_clusters=None,\n )\n clusters = clusterer.fit_predict(lr_hot_scores > 0)\n cluster_set = np.unique(clusters)\n\n if verbose:\n print(\"Ranking LRs...\")\n\n # Determining the ordering of the clusters so is useful to user #\n cluster_mean_spots = []\n for cluster in cluster_set:\n cluster_bool = clusters == cluster\n cluster_mean_spots.append(np.mean(lr_summary[cluster_bool, 2]))\n cluster_order = np.argsort(-np.array(cluster_mean_spots))\n\n # Determining order of lrs in cluster & also overall cluster scores #\n lr_order = []\n new_clusters = []\n cluster_scores = np.zeros((adata.shape[0], len(cluster_set)))\n for i, index in enumerate(cluster_order):\n cluster = cluster_set[index]\n cluster_indices = np.where(clusters == cluster)[0]\n lr_order_ = np.argsort(-lr_summary[cluster_indices, 2])\n lr_order.extend(cluster_indices[lr_order_])\n\n new_clusters += [i] * len(cluster_indices)\n\n cluster_scores[:, i] = lr_hot_scores[cluster_indices, :].mean(axis=0)\n\n if verbose:\n print(\"Saving results:\")\n\n # Re-ordering the summary & the scores #\n lrs = lrs[lr_order]\n lr_summary = lr_summary[lr_order, :]\n lr_summary[:, 3] = new_clusters\n lr_summary = pd.DataFrame(\n lr_summary,\n index=lrs,\n columns=[\"spot_counts\", \"cutoff\", \"hotspot_counts\", \"lr_cluster\"],\n )\n lr_scores = lr_scores[lr_order, :].transpose()\n lr_hot_scores = lr_hot_scores[lr_order, :].transpose()\n\n # Adding all this information to the AnnData #\n adata.uns[\"lr_summary\"] = lr_summary\n adata.obsm[\"lr_scores\"] = lr_scores\n adata.obsm[\"lr_hot_scores\"] = lr_hot_scores\n adata.obsm[\"cluster_scores\"] = cluster_scores\n\n if verbose:\n print(f\"\\tSummary values of lrs in adata.uns['lr_summary'].\")\n print(\n f\"\\tMatrix of lr scores in same order as the summary in adata.obsm['lr_scores'].\"\n )\n print(f\"\\tMatrix of the hotspot scores in adata.obsm['lr_hot_scores'].\")\n print(\n f\"\\tMatrix of the mean LR cluster scores in adata.obsm['cluster_scores'].\"\n )\n\n\ndef hotspot_core(\n lr_scores,\n lrs,\n coors,\n eps,\n quantile,\n plot_diagnostics=False,\n adata=None,\n verbose=True,\n max_score=False,\n):\n \"\"\"Made code for getting the hotspot information.\"\"\"\n score_copy = lr_scores.copy()\n quantiles = [quantile * i for i in range(int(1 / quantile))]\n\n # Values to return #\n lr_hot_scores = np.zeros(score_copy.shape)\n # cols: spot_counts, cutoff, hotspot_counts, lr_cluster\n lr_summary = np.zeros((score_copy.shape[0], 4))\n\n ### Also creating grouping lr_pairs by quantiles to plot diagnostics ###\n if plot_diagnostics:\n lr_quantiles = [(i / 6) for i in range(1, 7)][::-1]\n lr_mean_scores = np.apply_along_axis(non_zero_mean, 1, score_copy)\n lr_quant_values = np.quantile(lr_mean_scores, lr_quantiles)\n quant_lrs = np.array(\n [lrs[lr_mean_scores == quant] for quant in lr_quant_values]\n )\n fig, axes = plt.subplots(6, 4, figsize=(20, 15))\n\n # Determining the cutoffs for hotspots #\n with tqdm(\n total=len(lrs),\n desc=\"Removing background lr scores...\",\n bar_format=\"{l_bar}{bar}\",\n disable=verbose == False,\n ) as pbar:\n for i, lr_ in enumerate(lrs):\n lr_score_ = score_copy[i, :]\n lr_summary[i, 0] = len(np.where(lr_score_ > 0)[0])\n\n cutoff_scores = []\n cutoffs = np.quantile(lr_score_[lr_score_ > 0], quantiles)\n for cutoff in cutoffs:\n spot_bool = lr_score_ >= cutoff\n if len(np.where(spot_bool)[0]) == 0:\n cutoff_scores.append(0)\n continue\n\n coor_ = coors[spot_bool, :]\n clusters = DBSCAN(\n min_samples=2, eps=eps, metric=\"manhattan\"\n ).fit_predict(coor_)\n score = len(np.unique(clusters)) * (np.mean(lr_score_[spot_bool])) ** 2\n cutoff_scores.append(score)\n\n # Cutoff point where maximum number of clusters occurs #\n best_cutoff = cutoffs[np.argmax(cutoff_scores)]\n if not max_score:\n lr_summary[i, 1] = best_cutoff\n else:\n lr_summary[i, 1] = cutoff_scores[np.argmax(cutoff_scores)]\n\n lr_score_[lr_score_ < best_cutoff] = 0\n lr_hot_scores[i, :] = lr_score_\n lr_summary[i, 2] = len(np.where(lr_score_ > 0)[0])\n\n # Adding the diagnostic plots #\n if plot_diagnostics and lr_ in quant_lrs and type(adata) != type(None):\n add_diagnostic_plots(\n adata,\n i,\n lr_,\n quant_lrs,\n lr_quantiles,\n lr_scores,\n lr_hot_scores,\n axes,\n cutoffs,\n cutoff_scores,\n best_cutoff,\n )\n\n pbar.update(1)\n\n return lr_summary, lr_hot_scores\n\n\ndef non_zero_mean(vals):\n \"\"\"Gives the non-zero mean of the values.\"\"\"\n return vals[vals > 0].mean()\n\n\ndef add_diagnostic_plots(\n adata,\n i,\n lr_,\n quant_lrs,\n lr_quantiles,\n lr_scores,\n lr_hot_scores,\n axes,\n cutoffs,\n n_clusters,\n best_cutoff,\n):\n \"\"\"Adds diagnostic plots for the quantile LR pair to a figure to illustrate \\\n how the cutoff is functioning.\n \"\"\"\n q_i = np.where(quant_lrs == lr_)[0][0]\n\n # Scatter plot #\n axes[q_i][0].scatter(cutoffs, n_clusters)\n axes[q_i][0].set_title(f\"n_clusts*mean_spot_score vs cutoff\")\n axes[q_i][0].set_xlabel(\"cutoffs\")\n axes[q_i][0].set_ylabel(\"n_clusts*mean_spot_score\")\n\n # Distribution of scores with cutoff #\n scores_ = lr_scores[i, :]\n sb.distplot(\n scores_[scores_ > 0],\n ax=axes[q_i][1],\n hist=True,\n kde=False,\n color=\"red\",\n norm_hist=True,\n )\n v_height = 0.5\n axes[q_i][1].vlines(best_cutoff, 0, v_height)\n axes[q_i][1].text(best_cutoff, v_height, str(round(best_cutoff, 2)))\n axes[q_i][1].set_title(f\"Distrib {round(lr_quantiles[q_i], 2)}({lr_})\")\n\n # Showing before & after filtering spots #\n adata.obsm[\"lr_scores\"] = scores_\n het_plot(\n adata,\n use_het=\"lr_scores\",\n ax=axes[q_i][2],\n show_color_bar=False,\n )\n axes[q_i][2].set_title(\"scores\")\n\n adata.obsm[\"lr_scores\"] = lr_hot_scores[i, :]\n het_plot(\n adata,\n use_het=\"lr_scores\",\n ax=axes[q_i][3],\n show_color_bar=False,\n )\n axes[q_i][3].set_title(\"hotspot scores\")\n"
] | [
[
"numpy.zeros",
"numpy.quantile",
"numpy.mean",
"sklearn.cluster.DBSCAN",
"pandas.DataFrame",
"numpy.argsort",
"matplotlib.pyplot.subplots",
"numpy.argmax",
"matplotlib.pyplot.show",
"numpy.apply_along_axis",
"numpy.array",
"numpy.where",
"numpy.unique",
"sklearn.cluster.AgglomerativeClustering"
]
] |
MarkDaoust/agents | [
"00ddf75a8a35a26a03a9323b78d95c06211b5b3f"
] | [
"tf_agents/bandits/agents/utils_test.py"
] | [
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.bandits.agents.utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nimport tensorflow_probability as tfp\n\nfrom tf_agents.bandits.agents import utils\nfrom tf_agents.specs import tensor_spec\n\ntfd = tfp.distributions\ntf.compat.v1.enable_v2_behavior()\n\n\ndef test_cases():\n return parameterized.named_parameters(\n {\n 'testcase_name': '_batch1_contextdim10',\n 'batch_size': 1,\n 'context_dim': 10,\n }, {\n 'testcase_name': '_batch4_contextdim5',\n 'batch_size': 4,\n 'context_dim': 5,\n })\n\n\nclass UtilsTest(tf.test.TestCase, parameterized.TestCase):\n\n def testNumActionsFromTensorSpecGoodSpec(self):\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=15)\n num_actions = utils.get_num_actions_from_tensor_spec(action_spec)\n self.assertEqual(num_actions, 16)\n\n def testNumActionsFromTensorSpecWrongRank(self):\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(2, 3), minimum=0, maximum=15)\n\n with self.assertRaisesRegexp(ValueError, r'Action spec must be a scalar'):\n utils.get_num_actions_from_tensor_spec(action_spec)\n\n @test_cases()\n def testBUpdate(self, batch_size, context_dim):\n b_array = np.array(range(context_dim))\n r_array = np.array(range(batch_size)).reshape((batch_size, 1))\n x_array = np.array(range(batch_size * context_dim)).reshape(\n (batch_size, context_dim))\n rx = r_array * x_array\n expected_b_updated_array = b_array + np.sum(rx, axis=0)\n\n b = tf.constant(b_array, dtype=tf.float32, shape=[context_dim])\n r = tf.constant(r_array, dtype=tf.float32, shape=[batch_size])\n x = tf.constant(x_array, dtype=tf.float32, shape=[batch_size, context_dim])\n b_update = utils.sum_reward_weighted_observations(r, x)\n self.assertAllClose(expected_b_updated_array, self.evaluate(b + b_update))\n\n @test_cases()\n def testBUpdateEmptyObservations(self, batch_size, context_dim):\n r = tf.constant([], dtype=tf.float32, shape=[0, 1])\n x = tf.constant([], dtype=tf.float32, shape=[0, context_dim])\n b_update = utils.sum_reward_weighted_observations(r, x)\n expected_b_update_array = np.zeros([context_dim], dtype=np.float32)\n self.assertAllClose(expected_b_update_array, self.evaluate(b_update))\n\n def testLaplacian1D(self):\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=4)\n num_actions = utils.get_num_actions_from_tensor_spec(action_spec)\n laplacian_matrix = tf.convert_to_tensor(\n utils.build_laplacian_over_ordinal_integer_actions(action_spec),\n dtype=tf.float32)\n res = tf.matmul(\n laplacian_matrix, tf.ones([num_actions, 1], dtype=tf.float32))\n # The vector of ones is in the null space of the Laplacian matrix.\n self.assertAllClose(0.0, self.evaluate(tf.norm(res)))\n\n # The row sum is zero.\n row_sum = tf.reduce_sum(laplacian_matrix, 1)\n self.assertAllClose(0.0, self.evaluate(tf.norm(row_sum)))\n\n # The column sum is zero.\n column_sum = tf.reduce_sum(laplacian_matrix, 0)\n self.assertAllClose(0.0, self.evaluate(tf.norm(column_sum)))\n\n # The diagonal elements are 2.0.\n self.assertAllClose(2.0, laplacian_matrix[1, 1])\n\n laplacian_matrix_expected = np.array(\n [[1.0, -1.0, 0.0, 0.0, 0.0],\n [-1.0, 2.0, -1.0, 0.0, 0.0],\n [0.0, -1.0, 2.0, -1.0, 0.0],\n [0.0, 0.0, -1.0, 2.0, -1.0],\n [0.0, 0.0, 0.0, -1.0, 1.0]])\n self.assertAllClose(laplacian_matrix_expected,\n self.evaluate(laplacian_matrix))\n\n def testComputePairwiseDistances(self):\n input_vects = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n pdist_matrix = np.array(\n [[0.0, 27.0, 108.0,],\n [27.0, 0.0, 27.0],\n [108.0, 27.0, 0.0]])\n tf_dist_matrix = utils.compute_pairwise_distances(\n tf.constant(input_vects, dtype=tf.float32))\n self.assertAllClose(pdist_matrix, self.evaluate(tf_dist_matrix))\n\n def testBuildLaplacianNearestNeighborGraph(self):\n input_vects = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9],\n [10, 11, 12], [13, 14, 15]])\n num_actions = input_vects.shape[0]\n laplacian_matrix = utils.build_laplacian_nearest_neighbor_graph(\n tf.constant(input_vects, dtype=tf.float32), k=2)\n\n # The vector of ones is in the null space of the Laplacian matrix.\n res = tf.matmul(\n laplacian_matrix, tf.ones([num_actions, 1], dtype=tf.float32))\n self.assertAllClose(0.0, self.evaluate(tf.norm(res)))\n\n # The row sum is zero.\n row_sum = tf.reduce_sum(laplacian_matrix, 1)\n self.assertAllClose(0.0, self.evaluate(tf.norm(row_sum)))\n\n # The column sum is zero.\n column_sum = tf.reduce_sum(laplacian_matrix, 0)\n self.assertAllClose(0.0, self.evaluate(tf.norm(column_sum)))\n\n self.assertAllClose(2.0, laplacian_matrix[0, 0])\n self.assertAllClose(4.0, laplacian_matrix[2, 2])\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"numpy.sum",
"tensorflow.norm",
"numpy.zeros",
"tensorflow.ones",
"tensorflow.compat.v1.enable_v2_behavior",
"numpy.array",
"tensorflow.constant",
"tensorflow.reduce_sum",
"tensorflow.test.main"
]
] |
abhinavralhan/dask | [
"e840ba38eadfa93c3b9959347f0a43c1279a94ab"
] | [
"dask/dataframe/tests/test_hashing.py"
] | [
"import numpy as np\nimport pandas as pd\nimport pandas.util.testing as tm\n\nimport pytest\n\nfrom dask.dataframe.hashing import hash_pandas_object\nfrom dask.dataframe.utils import assert_eq\n\n\[email protected]('obj', [\n pd.Series([1, 2, 3]),\n pd.Series([1.0, 1.5, 3.2]),\n pd.Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),\n pd.Series(['a', 'b', 'c']),\n pd.Series([True, False, True]),\n pd.Index([1, 2, 3]),\n pd.Index([True, False, True]),\n pd.DataFrame({'x': ['a', 'b', 'c'], 'y': [1, 2, 3]}),\n pd.util.testing.makeMissingDataframe(),\n pd.util.testing.makeMixedDataFrame(),\n pd.util.testing.makeTimeDataFrame(),\n pd.util.testing.makeTimeSeries(),\n pd.util.testing.makeTimedeltaIndex()])\ndef test_hash_pandas_object(obj):\n a = hash_pandas_object(obj)\n b = hash_pandas_object(obj)\n if isinstance(a, np.ndarray):\n np.testing.assert_equal(a, b)\n else:\n assert_eq(a, b)\n\n\ndef test_categorical_consistency():\n # Check that categoricals hash consistent with their values, not codes\n # This should work for categoricals of any dtype\n for s1 in [pd.Series(['a', 'b', 'c', 'd']),\n pd.Series([1000, 2000, 3000, 4000]),\n pd.Series(pd.date_range(0, periods=4))]:\n s2 = s1.astype('category').cat.set_categories(s1)\n s3 = s2.cat.set_categories(list(reversed(s1)))\n for categorize in [True, False]:\n # These should all hash identically\n h1 = hash_pandas_object(s1, categorize=categorize)\n h2 = hash_pandas_object(s2, categorize=categorize)\n h3 = hash_pandas_object(s3, categorize=categorize)\n tm.assert_series_equal(h1, h2)\n tm.assert_series_equal(h1, h3)\n\n\ndef test_object_missing_values():\n # Check that the presence of missing values doesn't change how object dtype\n # is hashed.\n s = pd.Series(['a', 'b', 'c', None])\n h1 = hash_pandas_object(s).iloc[:3]\n h2 = hash_pandas_object(s.iloc[:3])\n tm.assert_series_equal(h1, h2)\n"
] | [
[
"pandas.util.testing.makeTimeDataFrame",
"pandas.util.testing.makeTimeSeries",
"pandas.Series",
"pandas.date_range",
"numpy.testing.assert_equal",
"pandas.DataFrame",
"pandas.util.testing.makeMixedDataFrame",
"pandas.util.testing.makeTimedeltaIndex",
"pandas.util.testing.assert_series_equal",
"pandas.Index",
"pandas.util.testing.makeMissingDataframe"
]
] |
alechfho/dog_breed | [
"2e2f7083c859fdb250f5ba920246b9d2f8168b4d"
] | [
"dataset_processing.py"
] | [
"import numpy as np\nimport pandas as pd\n\n\ndef partition_images(df_labels, identifier_label=None, label_postfix='postfix', target_dir='./', filter_identity=[],\n dev_portion=0.20, encoding_strategy='vgg19_4096'):\n if np.size(filter_identity) == 0:\n filter_identity = df_labels[identifier_label].unique()\n\n df_filter_labels = df_labels[df_labels.breed.isin(filter_identity)]\n df_filter_identifier_label_count = df_filter_labels.groupby([identifier_label]).agg(['count'])\n df_filter_identifier_label_count['dev_count'] = np.ceil(\n df_filter_identifier_label_count[df_filter_identifier_label_count.columns[0]] * dev_portion).astype(int)\n\n df_result_train = pd.DataFrame()\n df_result_dev = pd.DataFrame()\n\n for ident_label, row in df_filter_identifier_label_count.iterrows():\n total = row[0]\n dev_count = row[1]\n train_count = total - dev_count\n df_train, df_dev = filter_images_by_label(df_filter_labels, ident_label, train_count, dev_count)\n df_result_train = df_result_train.append(df_train)\n df_result_dev = df_result_dev.append(df_dev)\n\n train_label = '{target_dir}/labels_train_{label_postfix}.csv'.format(target_dir=target_dir,\n label_postfix=label_postfix)\n dev_label = '{target_dir}/labels_dev_{label_postfix}.csv'.format(target_dir=target_dir, label_postfix=label_postfix)\n\n print('Split into training and dev sets')\n print('Training set in ' + train_label)\n print(df_result_train.groupby([identifier_label]).agg(['count']))\n print('Dev set in ' + dev_label)\n print(df_result_dev.groupby([identifier_label]).agg(['count']))\n\n df_result_train.to_csv(train_label, index=False)\n df_result_dev.to_csv(dev_label, index=False)\n return\n\n\ndef filter_images_by_label(df_labels, label, train_count, dev_count):\n df_selected_label = df_labels[df_labels.breed.isin([label])]\n df_selected_label_train = df_selected_label.head(train_count)\n df_selected_label_vaidation = df_selected_label.tail(dev_count)\n return df_selected_label_train, df_selected_label_vaidation\n"
] | [
[
"pandas.DataFrame",
"numpy.ceil",
"numpy.size"
]
] |
ebothmann/heppyplot | [
"dab969879391f70a91c34f71482a9691b9c80141"
] | [
"heppyplot/plot_helpers.py"
] | [
"import math\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.transforms as mtransforms\nfrom mpl_toolkits.axes_grid.anchored_artists import AnchoredText\n\ndef setup_axes(diff=False):\n fig = plt.figure()\n axes = []\n if diff:\n gs = gridspec.GridSpec(2, 1, height_ratios=[2,1])\n main_axis = plt.subplot(gs[0])\n axes.append(plt.subplot(gs[0]))\n axes.append(plt.subplot(gs[1], sharex=main_axis))\n else:\n axes.append(plt.subplot())\n return fig, axes\n\ndef layout_main_and_diff_axis(fig, axes):\n main_axis, diff_axis = axes\n fig.subplots_adjust(hspace=0.0)\n main_axis.spines['bottom'].set_visible(False)\n plt.setp(main_axis.get_xticklabels(), visible=False)\n main_axis.set_xlabel('')\n diff_axis.xaxis.tick_bottom()\n\ndef configure_legend_on_axis(axis, title='', loc='best', borderpad=1.2, draws_background=True):\n legend = axis.legend(loc=loc,\n title=title,\n borderaxespad=borderpad,\n framealpha=0.8,\n frameon=draws_background,\n fancybox=draws_background)\n legend.get_frame().set_color((0.96,0.96,0.96))\n for line in legend.get_lines():\n line.set_alpha(1.0)\n\ndef add_annotation_on_axis(axis, annotation, loc='upper right', borderpad=1.2):\n codes = {'upper right': 1, 'upper left': 2, 'lower left': 3, 'lower right': 4,\n 'right': 5, 'center left': 6,'center right': 7,\n 'lower center': 8, 'upper center': 9, 'center': 10}\n at = AnchoredText(annotation,\n codes[loc],\n frameon=False,\n borderpad=borderpad,\n prop=dict(linespacing=2.5))\n axis.add_artist(at)\n\ndef get_major_ticks_within_view_interval(axis):\n interval = axis.get_view_interval()\n ticks_in_view_interval = []\n for tick, loc in zip(axis.get_major_ticks(),\n axis.get_major_locator()()):\n if mtransforms.interval_contains(interval, loc):\n ticks_in_view_interval.append(tick)\n return ticks_in_view_interval\n\ndef set_figure_size_with_width(width):\n params = {'figure.figsize': figure_size_from_width(width)}\n plt.rcParams.update(params)\n\ndef figure_size_from_width(width):\n \"\"\"Returns a single plot figure size in inches given a width in points\"\"\"\n inches_per_point = 1.0/72.27\n golden_mean = (math.sqrt(5)-1.0)/2.0\n inches_width = width * inches_per_point\n fig_height = inches_width*golden_mean\n return [inches_width,fig_height]\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.transforms.interval_contains",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec"
]
] |
SeanNaren/transformers | [
"8d43c71a1ca3ad322cc45008eb66a5611f1e017e"
] | [
"examples/tensorflow/text-classification/run_text_classification.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Fine-tuning the library models for sequence classification.\"\"\"\n# You can also adapt this script on your own text classification task. Pointers for this are left as comments.\n\nimport logging\nimport os\nimport random\nimport sys\nfrom dataclasses import dataclass, field\nfrom math import ceil\nfrom pathlib import Path\nfrom typing import Optional\n\nimport numpy as np\nfrom datasets import load_dataset\n\nfrom transformers import (\n AutoConfig,\n AutoTokenizer,\n HfArgumentParser,\n PretrainedConfig,\n TFAutoModelForSequenceClassification,\n TrainingArguments,\n set_seed,\n)\nfrom transformers.file_utils import CONFIG_NAME, TF2_WEIGHTS_NAME\n\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"1\" # Reduce the amount of console output from TF\nimport tensorflow as tf # noqa: E402\n\n\nlogger = logging.getLogger(__name__)\n\n\n# region Helper classes\nclass DataSequence(tf.keras.utils.Sequence):\n # We use a Sequence object to load the data. Although it's completely possible to load your data as Numpy/TF arrays\n # and pass those straight to the Model, this constrains you in a couple of ways. Most notably, it requires all\n # the data to be padded to the length of the longest input example, and it also requires the whole dataset to be\n # loaded into memory. If these aren't major problems for you, you can skip the sequence object in your own code!\n def __init__(self, dataset, non_label_column_names, batch_size, labels, shuffle=True):\n super().__init__()\n # Retain all of the columns not present in the original data - these are the ones added by the tokenizer\n self.data = {\n key: dataset[key]\n for key in dataset.features.keys()\n if key not in non_label_column_names and key != \"label\"\n }\n data_lengths = {len(array) for array in self.data.values()}\n assert len(data_lengths) == 1, \"Dataset arrays differ in length!\"\n self.data_length = data_lengths.pop()\n self.num_batches = ceil(self.data_length / batch_size)\n if labels:\n self.labels = np.array(dataset[\"label\"])\n assert len(self.labels) == self.data_length, \"Labels not the same length as input arrays!\"\n else:\n self.labels = None\n self.batch_size = batch_size\n self.shuffle = shuffle\n if self.shuffle:\n # Shuffle the data order\n self.permutation = np.random.permutation(self.data_length)\n else:\n self.permutation = None\n\n def on_epoch_end(self):\n # If we're shuffling, reshuffle the data order after each epoch\n if self.shuffle:\n self.permutation = np.random.permutation(self.data_length)\n\n def __getitem__(self, item):\n # Note that this yields a batch, not a single sample\n batch_start = item * self.batch_size\n batch_end = (item + 1) * self.batch_size\n if self.shuffle:\n data_indices = self.permutation[batch_start:batch_end]\n else:\n data_indices = np.arange(batch_start, batch_end)\n # We want to pad the data as little as possible, so we only pad each batch\n # to the maximum length within that batch. We do that by stacking the variable-\n # length inputs into a ragged tensor and then densifying it.\n batch_input = {\n key: tf.ragged.constant([data[i] for i in data_indices]).to_tensor() for key, data in self.data.items()\n }\n if self.labels is None:\n return batch_input\n else:\n batch_labels = self.labels[data_indices]\n return batch_input, batch_labels\n\n def __len__(self):\n return self.num_batches\n\n\nclass SavePretrainedCallback(tf.keras.callbacks.Callback):\n # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary\n # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback\n # that saves the model with this method after each epoch.\n def __init__(self, output_dir, **kwargs):\n super().__init__()\n self.output_dir = output_dir\n\n def on_epoch_end(self, epoch, logs=None):\n self.model.save_pretrained(self.output_dir)\n\n\n# endregion\n\n# region Command-line arguments\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n\n Using `HfArgumentParser` we can turn this class\n into argparse arguments to be able to specify them on\n the command line.\n \"\"\"\n\n train_file: Optional[str] = field(\n default=None, metadata={\"help\": \"A csv or a json file containing the training data.\"}\n )\n validation_file: Optional[str] = field(\n default=None, metadata={\"help\": \"A csv or a json file containing the validation data.\"}\n )\n test_file: Optional[str] = field(default=None, metadata={\"help\": \"A csv or a json file containing the test data.\"})\n\n max_seq_length: int = field(\n default=128,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached preprocessed datasets or not.\"}\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to `max_seq_length`. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n },\n )\n max_predict_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of predict examples to this \"\n \"value if set.\"\n },\n )\n\n def __post_init__(self):\n train_extension = self.train_file.split(\".\")[-1].lower() if self.train_file is not None else None\n validation_extension = (\n self.validation_file.split(\".\")[-1].lower() if self.validation_file is not None else None\n )\n test_extension = self.test_file.split(\".\")[-1].lower() if self.test_file is not None else None\n extensions = {train_extension, validation_extension, test_extension}\n extensions.discard(None)\n assert len(extensions) != 0, \"Need to supply at least one of --train_file, --validation_file or --test_file!\"\n assert len(extensions) == 1, \"All input files should have the same file extension, either csv or json!\"\n assert \"csv\" in extensions or \"json\" in extensions, \"Input files should have either .csv or .json extensions!\"\n self.input_file_extension = extensions.pop()\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n\n\n# endregion\n\n\ndef main():\n # region Argument parsing\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n output_dir = Path(training_args.output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n # endregion\n\n # region Checkpoints\n # Detecting last checkpoint.\n checkpoint = None\n if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir:\n if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file():\n checkpoint = output_dir\n logger.info(\n f\"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this\"\n \" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n else:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to continue regardless.\"\n )\n\n # endregion\n\n # region Logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n logger.setLevel(logging.INFO)\n\n logger.info(f\"Training/evaluation parameters {training_args}\")\n # endregion\n\n # region Loading data\n # For CSV/JSON files, this script will use the 'label' field as the label and the 'sentence1' and optionally\n # 'sentence2' fields as inputs if they exist. If not, the first two fields not named label are used if at least two\n # columns are provided. Note that the term 'sentence' can be slightly misleading, as they often contain more than\n # a single grammatical sentence, when the task requires it.\n #\n # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this\n # single column. You can easily tweak this behavior (see below)\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n data_files = {\"train\": data_args.train_file, \"validation\": data_args.validation_file, \"test\": data_args.test_file}\n data_files = {key: file for key, file in data_files.items() if file is not None}\n\n for key in data_files.keys():\n logger.info(f\"Loading a local file for {key}: {data_files[key]}\")\n\n if data_args.input_file_extension == \"csv\":\n # Loading a dataset from local csv files\n datasets = load_dataset(\"csv\", data_files=data_files, cache_dir=model_args.cache_dir)\n else:\n # Loading a dataset from local json files\n datasets = load_dataset(\"json\", data_files=data_files, cache_dir=model_args.cache_dir)\n # See more about loading any type of standard or custom dataset at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n # endregion\n\n # region Label preprocessing\n # If you've passed us a training set, we try to infer your labels from it\n if \"train\" in datasets:\n # By default we assume that if your label column looks like a float then you're doing regression,\n # and if not then you're doing classification. This is something you may want to change!\n is_regression = datasets[\"train\"].features[\"label\"].dtype in [\"float32\", \"float64\"]\n if is_regression:\n num_labels = 1\n else:\n # A useful fast method:\n # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique\n label_list = datasets[\"train\"].unique(\"label\")\n label_list.sort() # Let's sort it for determinism\n num_labels = len(label_list)\n # If you haven't passed a training set, we read label info from the saved model (this happens later)\n else:\n num_labels = None\n label_list = None\n is_regression = None\n # endregion\n\n # region Load pretrained model and tokenizer\n # Set seed before initializing model\n set_seed(training_args.seed)\n #\n # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n if checkpoint is not None:\n config_path = training_args.output_dir\n elif model_args.config_name:\n config_path = model_args.config_name\n else:\n config_path = model_args.model_name_or_path\n if num_labels is not None:\n config = AutoConfig.from_pretrained(\n config_path,\n num_labels=num_labels,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n else:\n config = AutoConfig.from_pretrained(\n config_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n if checkpoint is None:\n model_path = model_args.model_name_or_path\n else:\n model_path = checkpoint\n model = TFAutoModelForSequenceClassification.from_pretrained(\n model_path,\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n # endregion\n\n # region Optimizer, loss and compilation\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=training_args.learning_rate,\n beta_1=training_args.adam_beta1,\n beta_2=training_args.adam_beta2,\n epsilon=training_args.adam_epsilon,\n clipnorm=training_args.max_grad_norm,\n )\n if is_regression:\n loss = tf.keras.losses.MeanSquaredError()\n metrics = []\n else:\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n metrics = [\"accuracy\"]\n model.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n # endregion\n\n # region Dataset preprocessing\n # Again, we try to have some nice defaults but don't hesitate to tweak to your use case.\n column_names = {col for cols in datasets.column_names.values() for col in cols}\n non_label_column_names = [name for name in column_names if name != \"label\"]\n if \"sentence1\" in non_label_column_names and \"sentence2\" in non_label_column_names:\n sentence1_key, sentence2_key = \"sentence1\", \"sentence2\"\n elif \"sentence1\" in non_label_column_names:\n sentence1_key, sentence2_key = \"sentence1\", None\n else:\n if len(non_label_column_names) >= 2:\n sentence1_key, sentence2_key = non_label_column_names[:2]\n else:\n sentence1_key, sentence2_key = non_label_column_names[0], None\n\n # Padding strategy\n if data_args.pad_to_max_length:\n padding = \"max_length\"\n else:\n # We will pad later, dynamically at batch creation, to the max sequence length in each batch\n padding = False\n\n if data_args.max_seq_length > tokenizer.model_max_length:\n logger.warning(\n f\"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the\"\n f\"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.\"\n )\n max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)\n\n # Ensure that our labels match the model's, if it has some pre-specified\n if \"train\" in datasets:\n if not is_regression and model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id:\n label_name_to_id = model.config.label2id\n if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):\n label_to_id = label_name_to_id # Use the model's labels\n else:\n logger.warning(\n \"Your model seems to have been trained with labels, but they don't match the dataset: \",\n f\"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.\"\n \"\\nIgnoring the model labels as a result.\",\n )\n label_to_id = {v: i for i, v in enumerate(label_list)}\n elif not is_regression:\n label_to_id = {v: i for i, v in enumerate(label_list)}\n else:\n label_to_id = None\n # Now we've established our label2id, let's overwrite the model config with it.\n model.config.label2id = label_to_id\n if model.config.label2id is not None:\n model.config.id2label = {id: label for label, id in label_to_id.items()}\n else:\n model.config.id2label = None\n else:\n label_to_id = model.config.label2id # Just load the data from the model\n\n if \"validation\" in datasets and model.config.label2id is not None:\n validation_label_list = datasets[\"validation\"].unique(\"label\")\n for val_label in validation_label_list:\n assert val_label in label_to_id, f\"Label {val_label} is in the validation set but not the training set!\"\n\n def preprocess_function(examples):\n # Tokenize the texts\n args = (\n (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])\n )\n result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)\n\n # Map labels to IDs\n if model.config.label2id is not None and \"label\" in examples:\n result[\"label\"] = [(model.config.label2id[l] if l != -1 else -1) for l in examples[\"label\"]]\n return result\n\n datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)\n\n if \"train\" in datasets:\n train_dataset = datasets[\"train\"]\n if data_args.max_train_samples is not None:\n train_dataset = train_dataset.select(range(data_args.max_train_samples))\n # Log a few random samples from the training set so we can see that it's working as expected:\n for index in random.sample(range(len(train_dataset)), 3):\n logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n if \"validation\" in datasets:\n eval_dataset = datasets[\"validation\"]\n if data_args.max_eval_samples is not None:\n eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))\n\n if \"test\" in datasets:\n predict_dataset = datasets[\"test\"]\n if data_args.max_predict_samples is not None:\n predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))\n\n # endregion\n\n # region Training\n if \"train\" in datasets:\n training_dataset = DataSequence(\n train_dataset, non_label_column_names, batch_size=training_args.per_device_train_batch_size, labels=True\n )\n if \"validation\" in datasets:\n eval_dataset = DataSequence(\n eval_dataset, non_label_column_names, batch_size=training_args.per_device_eval_batch_size, labels=True\n )\n else:\n eval_dataset = None\n\n callbacks = [SavePretrainedCallback(output_dir=training_args.output_dir)]\n model.fit(\n training_dataset,\n validation_data=eval_dataset,\n epochs=int(training_args.num_train_epochs),\n callbacks=callbacks,\n )\n elif \"validation\" in datasets:\n # If there's a validation dataset but no training set, just evaluate the metrics\n eval_dataset = DataSequence(\n eval_dataset, non_label_column_names, batch_size=training_args.per_device_eval_batch_size, labels=True\n )\n logger.info(\"Computing metrics on validation data...\")\n if is_regression:\n loss = model.evaluate(eval_dataset)\n logger.info(f\"Loss: {loss:.5f}\")\n else:\n loss, accuracy = model.evaluate(eval_dataset)\n logger.info(f\"Loss: {loss:.5f}, Accuracy: {accuracy * 100:.4f}%\")\n # endregion\n\n # region Prediction\n if \"test\" in datasets:\n logger.info(\"Doing predictions on Predict dataset...\")\n\n predict_dataset = DataSequence(\n predict_dataset, non_label_column_names, batch_size=training_args.per_device_eval_batch_size, labels=False\n )\n predictions = model.predict(predict_dataset)[\"logits\"]\n predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)\n output_predict_file = os.path.join(training_args.output_dir, \"predict_results.txt\")\n with open(output_predict_file, \"w\") as writer:\n writer.write(\"index\\tprediction\\n\")\n for index, item in enumerate(predictions):\n if is_regression:\n writer.write(f\"{index}\\t{item:3.3f}\\n\")\n else:\n item = model.config.id2label[item]\n writer.write(f\"{index}\\t{item}\\n\")\n logger.info(f\"Wrote predictions to {output_predict_file}!\")\n # endregion\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"numpy.squeeze",
"numpy.random.permutation",
"tensorflow.ragged.constant",
"numpy.argmax",
"numpy.arange",
"numpy.array"
]
] |
ParticularMiner/dask | [
"f40ef97ac802efb6d8bef03b03c6357cf871bc0a"
] | [
"dask/dataframe/io/parquet/fastparquet.py"
] | [
"import copy\nimport pickle\nimport threading\nimport warnings\nfrom collections import OrderedDict, defaultdict\nfrom contextlib import ExitStack\n\nimport numpy as np\nimport pandas as pd\nimport tlz as toolz\nfrom packaging.version import parse as parse_version\n\nfrom dask.core import flatten\n\ntry:\n import fastparquet\n from fastparquet import ParquetFile\n from fastparquet.util import ex_from_sep, get_file_scheme, groupby_types, val_to_num\n from fastparquet.writer import make_part_file, partition_on_columns\nexcept ImportError:\n pass\n\nfrom dask.base import tokenize\n\n#########################\n# Fastparquet interface #\n#########################\nfrom dask.dataframe.io.parquet.utils import (\n Engine,\n _get_aggregation_depth,\n _normalize_index_columns,\n _parse_pandas_metadata,\n _process_open_file_options,\n _row_groups_to_parts,\n _set_gather_statistics,\n _set_metadata_task_size,\n _sort_and_analyze_paths,\n _split_user_options,\n)\nfrom dask.dataframe.io.utils import _is_local_fs, _meta_from_dtypes, _open_input_files\nfrom dask.dataframe.utils import UNKNOWN_CATEGORIES\nfrom dask.delayed import Delayed\nfrom dask.utils import natural_sort_key\n\n# Thread lock required to reset row-groups\n_FP_FILE_LOCK = threading.RLock()\n\n\ndef _paths_to_cats(paths, file_scheme):\n \"\"\"\n Extract categorical fields and labels from hive- or drill-style paths.\n FixMe: This has been pasted from https://github.com/dask/fastparquet/pull/471\n Use fastparquet.api.paths_to_cats from fastparquet>0.3.2 instead.\n\n Parameters\n ----------\n paths (Iterable[str]): file paths relative to root\n file_scheme (str):\n\n Returns\n -------\n cats (OrderedDict[str, List[Any]]): a dict of field names and their values\n \"\"\"\n if file_scheme in [\"simple\", \"flat\", \"other\"]:\n cats = {}\n return cats\n\n cats = OrderedDict()\n raw_cats = OrderedDict()\n s = ex_from_sep(\"/\")\n paths = toolz.unique(paths)\n if file_scheme == \"hive\":\n partitions = toolz.unique((k, v) for path in paths for k, v in s.findall(path))\n for key, val in partitions:\n cats.setdefault(key, set()).add(val_to_num(val))\n raw_cats.setdefault(key, set()).add(val)\n else:\n i_val = toolz.unique(\n (i, val) for path in paths for i, val in enumerate(path.split(\"/\")[:-1])\n )\n for i, val in i_val:\n key = \"dir%i\" % i\n cats.setdefault(key, set()).add(val_to_num(val))\n raw_cats.setdefault(key, set()).add(val)\n\n for key, v in cats.items():\n # Check that no partition names map to the same value after transformation by val_to_num\n raw = raw_cats[key]\n if len(v) != len(raw):\n conflicts_by_value = OrderedDict()\n for raw_val in raw_cats[key]:\n conflicts_by_value.setdefault(val_to_num(raw_val), set()).add(raw_val)\n conflicts = [\n c for k in conflicts_by_value.values() if len(k) > 1 for c in k\n ]\n raise ValueError(\"Partition names map to the same value: %s\" % conflicts)\n vals_by_type = groupby_types(v)\n\n # Check that all partition names map to the same type after transformation by val_to_num\n if len(vals_by_type) > 1:\n examples = [x[0] for x in vals_by_type.values()]\n warnings.warn(\n \"Partition names coerce to values of different types, e.g. %s\"\n % examples\n )\n\n cats = OrderedDict([(key, list(v)) for key, v in cats.items()])\n return cats\n\n\npaths_to_cats = (\n _paths_to_cats # FixMe: use fastparquet.api.paths_to_cats for fastparquet>0.3.2\n)\n\n\nclass FastParquetEngine(Engine):\n @classmethod\n def _organize_row_groups(\n cls,\n pf,\n split_row_groups,\n gather_statistics,\n stat_col_indices,\n filters,\n dtypes,\n base_path,\n has_metadata_file,\n chunksize,\n aggregation_depth,\n ):\n \"\"\"Organize row-groups by file.\"\"\"\n\n # Get partitioning metadata\n pqpartitions = list(pf.cats)\n\n # Fastparquet does not use a natural sorting\n # order for partitioned data. Re-sort by path\n if (\n pqpartitions\n and aggregation_depth\n and pf.row_groups\n and pf.row_groups[0].columns[0].file_path\n ):\n pf.row_groups = sorted(\n pf.row_groups,\n key=lambda x: natural_sort_key(x.columns[0].file_path),\n )\n\n # Store types specified in pandas metadata\n pandas_type = {}\n if pf.row_groups and pf.pandas_metadata:\n for c in pf.pandas_metadata.get(\"columns\", []):\n if \"field_name\" in c:\n pandas_type[c[\"field_name\"]] = c.get(\"pandas_type\", None)\n\n # Get the number of row groups per file\n single_rg_parts = int(split_row_groups) == 1\n file_row_groups = defaultdict(list)\n file_row_group_stats = defaultdict(list)\n file_row_group_column_stats = defaultdict(list)\n cmax_last = {}\n for rg, row_group in enumerate(pf.row_groups):\n\n # We can filter partition columns here without dealing\n # with statistics\n if (\n pqpartitions\n and filters\n and fastparquet.api.filter_out_cats(row_group, filters)\n ):\n continue\n\n # NOTE: Here we assume that all column chunks are stored\n # in the same file. This is not strictly required by the\n # parquet spec.\n fp = row_group.columns[0].file_path\n fpath = fp.decode() if isinstance(fp, bytes) else fp\n if fpath is None:\n if not has_metadata_file:\n # There doesn't need to be a file_path if the\n # row group is in the same file as the metadata.\n # Assume this is a single-file dataset.\n fpath = pf.fn\n base_path = base_path or \"\"\n else:\n raise ValueError(\n \"Global metadata structure is missing a file_path string. \"\n \"If the dataset includes a _metadata file, that file may \"\n \"have one or more missing file_path fields.\"\n )\n\n # Append a tuple to file_row_groups. This tuple will\n # be structured as: `(<local-row-group-id>, <global-row-group-id>)`\n if file_row_groups[fpath]:\n file_row_groups[fpath].append((file_row_groups[fpath][-1][0] + 1, rg))\n else:\n file_row_groups[fpath].append((0, rg))\n\n if gather_statistics:\n if single_rg_parts:\n s = {\n \"file_path_0\": fpath,\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n \"columns\": [],\n }\n else:\n s = {\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n }\n cstats = []\n for name, i in stat_col_indices.items():\n column = row_group.columns[i]\n if column.meta_data.statistics:\n cmin = None\n cmax = None\n # TODO: Avoid use of `pf.statistics`\n if pf.statistics[\"min\"][name][0] is not None:\n cmin = pf.statistics[\"min\"][name][rg]\n cmax = pf.statistics[\"max\"][name][rg]\n elif dtypes[name] == \"object\":\n cmin = column.meta_data.statistics.min_value\n cmax = column.meta_data.statistics.max_value\n # Older versions may not have cmin/cmax_value\n if cmin is None:\n cmin = column.meta_data.statistics.min\n if cmax is None:\n cmax = column.meta_data.statistics.max\n # Decode bytes as long as \"bytes\" is not the\n # expected `pandas_type` for this column\n if (\n isinstance(cmin, (bytes, bytearray))\n and pandas_type.get(name, None) != \"bytes\"\n ):\n cmin = cmin.decode(\"utf-8\")\n cmax = cmax.decode(\"utf-8\")\n if isinstance(cmin, np.datetime64):\n tz = getattr(dtypes[name], \"tz\", None)\n cmin = pd.Timestamp(cmin, tz=tz)\n cmax = pd.Timestamp(cmax, tz=tz)\n last = cmax_last.get(name, None)\n\n if not (filters or chunksize or aggregation_depth):\n # Only think about bailing if we don't need\n # stats for filtering\n if cmin is None or (last and cmin < last):\n # We are collecting statistics for divisions\n # only (no filters) - Column isn't sorted, or\n # we have an all-null partition, so lets bail.\n #\n # Note: This assumes ascending order.\n #\n gather_statistics = False\n file_row_group_stats = {}\n file_row_group_column_stats = {}\n break\n\n if single_rg_parts:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": cmin,\n \"max\": cmax,\n }\n )\n else:\n cstats += [cmin, cmax]\n cmax_last[name] = cmax\n else:\n if (\n not (filters or chunksize or aggregation_depth)\n and column.meta_data.num_values > 0\n ):\n # We are collecting statistics for divisions\n # only (no filters) - Lets bail.\n gather_statistics = False\n file_row_group_stats = {}\n file_row_group_column_stats = {}\n break\n\n if single_rg_parts:\n s[\"columns\"].append({\"name\": name})\n else:\n cstats += [None, None, None]\n if gather_statistics:\n file_row_group_stats[fpath].append(s)\n if not single_rg_parts:\n file_row_group_column_stats[fpath].append(tuple(cstats))\n\n return (\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n gather_statistics,\n base_path,\n )\n\n @classmethod\n def _get_thrift_row_groups(\n cls,\n pf,\n filename,\n row_groups,\n ):\n \"\"\"Turn a set of row-groups into bytes-serialized form\n using thrift via pickle.\n \"\"\"\n\n real_row_groups = []\n for rg, rg_global in row_groups:\n row_group = pf.row_groups[rg_global]\n columns = row_group.columns\n for c, col in enumerate(columns):\n if c:\n col.file_path = None\n md = col.meta_data\n md.key_value_metadata = None\n # NOTE: Fastparquet may need the null count in the\n # statistics, so we cannot just set statistics\n # to none. Set attributes separately:\n st = md.statistics\n if st:\n st.distinct_count = None\n st.max = None\n st.min = None\n st.max_value = None\n st.min_value = None\n md.encodings = None\n md.total_uncompressed_size = None\n md.encoding_stats = None\n row_group.columns = columns\n real_row_groups.append(row_group)\n return real_row_groups\n\n @classmethod\n def _make_part(\n cls,\n filename,\n rg_list,\n fs=None,\n pf=None,\n base_path=None,\n partitions=None,\n ):\n \"\"\"Generate a partition-specific element of `parts`.\"\"\"\n\n if partitions:\n real_row_groups = cls._get_thrift_row_groups(\n pf,\n filename,\n rg_list,\n )\n part = {\"piece\": (real_row_groups,)}\n else:\n # Get full path (empty strings should be ignored)\n full_path = fs.sep.join([p for p in [base_path, filename] if p != \"\"])\n row_groups = [rg[0] for rg in rg_list] # Don't need global IDs\n part = {\"piece\": (full_path, row_groups)}\n\n return part\n\n @classmethod\n def _collect_dataset_info(\n cls,\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n parquet_file_extension,\n kwargs,\n ):\n\n # Define the parquet-file (pf) object to use for metadata,\n # Also, initialize `parts`. If `parts` is populated here,\n # then each part will correspond to a file. Otherwise, each part will\n # correspond to a row group (populated later).\n\n # Extract \"supported\" key-word arguments from `kwargs`.\n # Split items into `dataset_kwargs` and `read_kwargs`\n dataset_kwargs, read_kwargs, user_kwargs = _split_user_options(**kwargs)\n\n parts = []\n _metadata_exists = False\n if len(paths) == 1 and fs.isdir(paths[0]):\n\n # This is a directory.\n # Check if _metadata and/or _common_metadata files exists\n base = paths[0]\n _metadata_exists = True\n if not ignore_metadata_file:\n _metadata_exists = fs.isfile(fs.sep.join([base, \"_metadata\"]))\n\n # Find all files if we are not using a _metadata file\n if ignore_metadata_file or not _metadata_exists:\n # For now, we need to discover every file under paths[0]\n paths, base, fns = _sort_and_analyze_paths(fs.find(base), fs)\n _update_paths = False\n for fn in [\"_metadata\", \"_common_metadata\"]:\n try:\n fns.remove(fn)\n _update_paths = True\n except ValueError:\n pass\n if _update_paths:\n paths = [fs.sep.join([base, fn]) for fn in fns]\n _metadata_exists = False\n if _metadata_exists:\n # Using _metadata file (best-case scenario)\n pf = ParquetFile(\n fs.sep.join([base, \"_metadata\"]),\n open_with=fs.open,\n **dataset_kwargs,\n )\n else:\n # Use 0th file\n # Note that \"_common_metadata\" can cause issues for\n # partitioned datasets.\n if parquet_file_extension:\n # Raise error if all files have been filtered by extension\n len0 = len(paths)\n paths = [\n path for path in paths if path.endswith(parquet_file_extension)\n ]\n if len0 and paths == []:\n raise ValueError(\n \"No files satisfy the `parquet_file_extension` criteria \"\n f\"(files must end with {parquet_file_extension}).\"\n )\n pf = ParquetFile(\n paths[:1], open_with=fs.open, root=base, **dataset_kwargs\n )\n scheme = get_file_scheme(fns)\n pf.file_scheme = scheme\n pf.cats = paths_to_cats(fns, scheme)\n if not gather_statistics:\n parts = [fs.sep.join([base, fn]) for fn in fns]\n else:\n # This is a list of files\n paths, base, fns = _sort_and_analyze_paths(paths, fs)\n\n # Check if _metadata is in paths, and\n # remove it if ignore_metadata_file=True\n _metadata_exists = \"_metadata\" in fns\n if _metadata_exists and ignore_metadata_file:\n fns.remove(\"_metadata\")\n _metadata_exists = False\n paths = [fs.sep.join([base, fn]) for fn in fns]\n\n if _metadata_exists:\n # We have a _metadata file, lets use it\n pf = ParquetFile(\n fs.sep.join([base, \"_metadata\"]),\n open_with=fs.open,\n **dataset_kwargs,\n )\n else:\n # Rely on metadata for 0th file.\n # Will need to pass a list of paths to read_partition\n scheme = get_file_scheme(fns)\n pf = ParquetFile(\n paths[:1], open_with=fs.open, root=base, **dataset_kwargs\n )\n pf.file_scheme = scheme\n pf.cats = paths_to_cats(fns, scheme)\n if not gather_statistics:\n parts = paths.copy()\n\n # Check the `aggregate_files` setting\n aggregation_depth = _get_aggregation_depth(\n aggregate_files,\n list(pf.cats),\n )\n\n # Ensure that there is no overlap between partition columns\n # and explicit columns in `pf`\n if pf.cats:\n _partitions = [p for p in pf.cats if p not in pf.columns]\n if not _partitions:\n pf.cats = {}\n elif len(_partitions) != len(pf.cats):\n raise ValueError(\n \"No partition-columns should be written in the \\n\"\n \"file unless they are ALL written in the file.\\n\"\n \"columns: {} | partitions: {}\".format(pf.columns, pf.cats.keys())\n )\n\n return {\n \"pf\": pf,\n \"paths\": paths,\n \"has_metadata_file\": _metadata_exists,\n \"parts\": parts,\n \"base\": base,\n \"fs\": fs,\n \"gather_statistics\": gather_statistics,\n \"categories\": categories,\n \"index\": index,\n \"filters\": filters,\n \"split_row_groups\": split_row_groups,\n \"chunksize\": chunksize,\n \"aggregate_files\": aggregate_files,\n \"aggregation_depth\": aggregation_depth,\n \"metadata_task_size\": metadata_task_size,\n \"kwargs\": {\n \"dataset\": dataset_kwargs,\n \"read\": read_kwargs,\n **user_kwargs,\n },\n }\n\n @classmethod\n def _create_dd_meta(cls, dataset_info):\n\n # Collect necessary information from dataset_info\n pf = dataset_info[\"pf\"]\n index = dataset_info[\"index\"]\n categories = dataset_info[\"categories\"]\n\n columns = None\n pandas_md = pf.pandas_metadata\n\n if pandas_md:\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(pandas_md)\n # auto-ranges should not be created by fastparquet\n column_names.extend(pf.cats)\n\n else:\n index_names = []\n column_names = pf.columns + list(pf.cats)\n storage_name_mapping = {k: k for k in column_names}\n column_index_names = [None]\n\n if index is None and len(index_names) > 0:\n if len(index_names) == 1 and index_names[0] is not None:\n index = index_names[0]\n else:\n index = index_names\n\n # Normalize user inputs\n column_names, index_names = _normalize_index_columns(\n columns, column_names, index, index_names\n )\n\n all_columns = index_names + column_names\n\n categories_dict = None\n if isinstance(categories, dict):\n categories_dict = categories\n\n if categories is None:\n categories = pf.categories\n elif isinstance(categories, str):\n categories = [categories]\n else:\n categories = list(categories)\n\n # Check that categories are included in columns\n if categories and not set(categories).intersection(all_columns):\n raise ValueError(\n \"categories not in available columns.\\n\"\n \"categories: {} | columns: {}\".format(categories, list(all_columns))\n )\n\n dtypes = pf._dtypes(categories)\n dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}\n\n index_cols = index or ()\n if isinstance(index_cols, str):\n index_cols = [index_cols]\n for ind in index_cols:\n if getattr(dtypes.get(ind), \"numpy_dtype\", None):\n # index does not support masked types\n dtypes[ind] = dtypes[ind].numpy_dtype\n for cat in categories:\n if cat in all_columns:\n dtypes[cat] = pd.CategoricalDtype(categories=[UNKNOWN_CATEGORIES])\n\n for catcol in pf.cats:\n if catcol in all_columns:\n dtypes[catcol] = pd.CategoricalDtype(categories=pf.cats[catcol])\n\n meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names)\n\n # Update `dataset_info` and return `meta`\n dataset_info[\"dtypes\"] = dtypes\n dataset_info[\"index\"] = index\n dataset_info[\"index_cols\"] = index_cols\n dataset_info[\"categories\"] = categories\n dataset_info[\"categories_dict\"] = categories_dict\n\n return meta\n\n @classmethod\n def _construct_collection_plan(cls, dataset_info):\n\n # Collect necessary information from dataset_info\n fs = dataset_info[\"fs\"]\n parts = dataset_info[\"parts\"]\n paths = dataset_info[\"paths\"]\n filters = dataset_info[\"filters\"]\n pf = dataset_info[\"pf\"]\n split_row_groups = dataset_info[\"split_row_groups\"]\n chunksize = dataset_info[\"chunksize\"]\n gather_statistics = dataset_info[\"gather_statistics\"]\n base_path = dataset_info[\"base\"]\n aggregation_depth = dataset_info[\"aggregation_depth\"]\n index_cols = dataset_info[\"index_cols\"]\n categories = dataset_info[\"categories\"]\n dtypes = dataset_info[\"dtypes\"]\n categories_dict = dataset_info[\"categories_dict\"]\n has_metadata_file = dataset_info[\"has_metadata_file\"]\n metadata_task_size = dataset_info[\"metadata_task_size\"]\n kwargs = dataset_info[\"kwargs\"]\n\n # Ensure metadata_task_size is set\n # (Using config file or defaults)\n metadata_task_size = _set_metadata_task_size(\n dataset_info[\"metadata_task_size\"], fs\n )\n\n # Determine which columns need statistics.\n # At this point, gather_statistics is only True if\n # the user specified calculate_divisions=True\n filter_columns = {t[0] for t in flatten(filters or [], container=list)}\n stat_col_indices = {}\n _index_cols = index_cols if (gather_statistics and len(index_cols) == 1) else []\n for i, name in enumerate(pf.columns):\n if name in _index_cols or name in filter_columns:\n stat_col_indices[name] = i\n\n # Decide final `gather_statistics` setting.\n # NOTE: The \"fastparquet\" engine requires statistics for\n # filtering even if the filter is on a paritioned column\n gather_statistics = _set_gather_statistics(\n gather_statistics,\n chunksize,\n split_row_groups,\n aggregation_depth,\n filter_columns,\n set(stat_col_indices) | filter_columns,\n )\n\n # Define common_kwargs\n common_kwargs = {\n \"categories\": categories_dict or categories,\n \"root_cats\": pf.cats,\n \"root_file_scheme\": pf.file_scheme,\n \"base_path\": base_path,\n **kwargs,\n }\n\n # Check if this is a very simple case where we can just\n # return the path names. This requires that `parts`\n # already be a list of paths. Also, we cannot be splitting\n # by row-group or collecting statistics.\n if (\n gather_statistics is False\n and not split_row_groups\n and isinstance(parts, list)\n and len(parts)\n and isinstance(parts[0], str)\n ):\n return (\n [{\"piece\": (full_path, None)} for full_path in parts],\n [],\n common_kwargs,\n )\n\n dataset_info_kwargs = {\n \"fs\": fs,\n \"split_row_groups\": split_row_groups,\n \"gather_statistics\": gather_statistics,\n \"filters\": filters,\n \"dtypes\": dtypes,\n \"stat_col_indices\": stat_col_indices,\n \"aggregation_depth\": aggregation_depth,\n \"chunksize\": chunksize,\n \"root_cats\": pf.cats,\n \"root_file_scheme\": pf.file_scheme,\n \"base_path\": \"\" if base_path is None else base_path,\n \"has_metadata_file\": has_metadata_file,\n }\n\n if (\n has_metadata_file\n or metadata_task_size == 0\n or metadata_task_size > len(paths)\n ):\n # Construct the output-partitioning plan on the\n # client process (in serial). This means we have\n # a global _metadata file, or that `metadata_task_size`\n # is zero or larger than the number of files.\n pf_or_paths = pf if has_metadata_file else paths\n parts, stats = cls._collect_file_parts(pf_or_paths, dataset_info_kwargs)\n\n else:\n # We DON'T have a global _metadata file to work with.\n # We should loop over files in parallel\n parts, stats = [], []\n if paths:\n # Build and compute a task graph to construct stats/parts\n gather_parts_dsk = {}\n name = \"gather-pq-parts-\" + tokenize(paths, dataset_info_kwargs)\n finalize_list = []\n for task_i, file_i in enumerate(\n range(0, len(paths), metadata_task_size)\n ):\n finalize_list.append((name, task_i))\n gather_parts_dsk[finalize_list[-1]] = (\n cls._collect_file_parts,\n paths[file_i : file_i + metadata_task_size],\n dataset_info_kwargs,\n )\n\n def _combine_parts(parts_and_stats):\n parts, stats = [], []\n for part, stat in parts_and_stats:\n parts += part\n if stat:\n stats += stat\n return parts, stats\n\n gather_parts_dsk[\"final-\" + name] = (_combine_parts, finalize_list)\n parts, stats = Delayed(\"final-\" + name, gather_parts_dsk).compute()\n\n return parts, stats, common_kwargs\n\n @classmethod\n def _collect_file_parts(\n cls,\n pf_or_files,\n dataset_info_kwargs,\n ):\n\n # Collect necessary information from dataset_info\n fs = dataset_info_kwargs[\"fs\"]\n split_row_groups = dataset_info_kwargs[\"split_row_groups\"]\n gather_statistics = dataset_info_kwargs[\"gather_statistics\"]\n stat_col_indices = dataset_info_kwargs[\"stat_col_indices\"]\n filters = dataset_info_kwargs[\"filters\"]\n dtypes = dataset_info_kwargs[\"dtypes\"]\n chunksize = dataset_info_kwargs[\"chunksize\"]\n aggregation_depth = dataset_info_kwargs[\"aggregation_depth\"]\n base_path = dataset_info_kwargs.get(\"base_path\", None)\n root_cats = dataset_info_kwargs.get(\"root_cats\", None)\n root_file_scheme = dataset_info_kwargs.get(\"root_file_scheme\", None)\n has_metadata_file = dataset_info_kwargs[\"has_metadata_file\"]\n\n # Get ParquetFile\n if not isinstance(pf_or_files, fastparquet.api.ParquetFile):\n # Construct local `ParquetFile` object\n pf = ParquetFile(\n pf_or_files,\n open_with=fs.open,\n root=base_path,\n )\n # Update hive-partitioning to match global cats/scheme\n pf.cats = root_cats or {}\n if root_cats:\n pf.file_scheme = root_file_scheme\n else:\n # We already have a ParquetFile object to work with\n pf = pf_or_files\n\n # Organize row-groups by file\n (\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n gather_statistics,\n base_path,\n ) = cls._organize_row_groups(\n pf,\n split_row_groups,\n gather_statistics,\n stat_col_indices,\n filters,\n dtypes,\n base_path,\n has_metadata_file,\n chunksize,\n aggregation_depth,\n )\n\n # Convert organized row-groups to parts\n parts, stats = _row_groups_to_parts(\n gather_statistics,\n split_row_groups,\n aggregation_depth,\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n cls._make_part,\n make_part_kwargs={\n \"fs\": fs,\n \"pf\": pf,\n \"base_path\": base_path,\n \"partitions\": list(pf.cats),\n },\n )\n\n return parts, stats\n\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n split_row_groups=False,\n chunksize=None,\n aggregate_files=None,\n ignore_metadata_file=False,\n metadata_task_size=None,\n parquet_file_extension=None,\n **kwargs,\n ):\n\n # Stage 1: Collect general dataset information\n dataset_info = cls._collect_dataset_info(\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n parquet_file_extension,\n kwargs,\n )\n\n # Stage 2: Generate output `meta`\n meta = cls._create_dd_meta(dataset_info)\n\n # Stage 3: Generate parts and stats\n parts, stats, common_kwargs = cls._construct_collection_plan(dataset_info)\n\n # Cannot allow `None` in columns if the user has specified index=False\n index = dataset_info[\"index\"]\n if index is False and None in meta.columns:\n meta.drop(columns=[None], inplace=True)\n\n # Add `common_kwargs` to the first element of `parts`.\n # We can return as a separate element in the future, but\n # should avoid breaking the API for now.\n if len(parts):\n parts[0][\"common_kwargs\"] = common_kwargs\n parts[0][\"aggregation_depth\"] = dataset_info[\"aggregation_depth\"]\n\n if len(parts) and len(parts[0][\"piece\"]) == 1:\n\n # Strip all partition-dependent or unnecessary\n # data from the `ParquetFile` object\n pf = dataset_info[\"pf\"]\n pf.row_groups = None\n pf.fmd.row_groups = None\n pf._statistics = None\n parts[0][\"common_kwargs\"][\"parquet_file\"] = pf\n\n return (meta, stats, parts, index)\n\n @classmethod\n def multi_support(cls):\n return cls == FastParquetEngine\n\n @classmethod\n def read_partition(\n cls,\n fs,\n pieces,\n columns,\n index,\n categories=(),\n root_cats=None,\n root_file_scheme=None,\n base_path=None,\n **kwargs,\n ):\n\n null_index_name = False\n base_path = False if not root_cats else base_path\n if isinstance(index, list):\n if index == [None]:\n # Handling a None-labeled index...\n # The pandas metadata told us to read in an index\n # labeled `None`. If this corresponds to a `RangeIndex`,\n # fastparquet will need use the pandas metadata to\n # construct the index. Otherwise, the index will correspond\n # to a column named \"__index_level_0__\". We will need to\n # check the `ParquetFile` object for this column below.\n index = []\n null_index_name = True\n columns += index\n\n # Use global `parquet_file` object. Need to reattach\n # the desired row_group\n parquet_file = kwargs.pop(\"parquet_file\", None)\n\n # Always convert pieces to list\n if not isinstance(pieces, list):\n pieces = [pieces]\n\n sample = pieces[0]\n if isinstance(sample, tuple):\n if isinstance(sample[0], str):\n # We have paths to read from\n assert parquet_file is None\n\n row_groups = []\n rg_offset = 0\n parquet_file = ParquetFile(\n [p[0] for p in pieces],\n open_with=fs.open,\n root=base_path or False,\n **kwargs.get(\"dataset\", {}),\n )\n for piece in pieces:\n _pf = (\n parquet_file\n if len(pieces) == 1\n else ParquetFile(\n piece[0],\n open_with=fs.open,\n root=base_path or False,\n **kwargs.get(\"dataset\", {}),\n )\n )\n n_local_row_groups = len(_pf.row_groups)\n local_rg_indices = piece[1] or list(range(n_local_row_groups))\n row_groups += [\n parquet_file.row_groups[rg + rg_offset]\n for rg in local_rg_indices\n ]\n rg_offset += n_local_row_groups\n update_parquet_file = len(row_groups) < len(parquet_file.row_groups)\n\n elif parquet_file:\n\n row_groups = []\n for piece in pieces:\n # `piece[1]` will contain actual row-group objects,\n # but they may be pickled\n rgs = piece[0]\n if isinstance(rgs, bytes):\n rgs = pickle.loads(rgs)\n row_groups += rgs\n update_parquet_file = True\n\n else:\n raise ValueError(\"Neither path nor ParquetFile detected!\")\n\n if update_parquet_file:\n with _FP_FILE_LOCK:\n for rg in row_groups:\n for chunk in rg.columns:\n s = chunk.file_path\n if s and isinstance(s, bytes):\n chunk.file_path = s.decode()\n\n parquet_file.fmd.row_groups = row_groups\n # NOTE: May lose cats after `_set_attrs` call\n save_cats = parquet_file.cats\n parquet_file._set_attrs()\n parquet_file.cats = save_cats\n\n if null_index_name:\n if \"__index_level_0__\" in parquet_file.columns:\n # See \"Handling a None-labeled index\" comment above\n index = [\"__index_level_0__\"]\n columns += index\n\n # Update hive-partitioning information if necessary\n parquet_file.cats = root_cats or {}\n if root_cats:\n parquet_file.file_scheme = root_file_scheme\n\n parquet_file._dtypes = (\n lambda *args: parquet_file.dtypes\n ) # ugly patch, could be fixed\n\n # Convert ParquetFile to pandas\n return cls.pf_to_pandas(\n parquet_file,\n fs=fs,\n columns=columns,\n categories=categories,\n index=index,\n **kwargs.get(\"read\", {}),\n )\n\n else:\n # `sample` is NOT a tuple\n raise ValueError(f\"Expected tuple, got {type(sample)}\")\n\n @classmethod\n def pf_to_pandas(\n cls,\n pf,\n fs=None,\n columns=None,\n categories=None,\n index=None,\n open_file_options=None,\n **kwargs,\n ):\n # This method was mostly copied from the fastparquet\n # `ParquetFile.to_pandas` definition. We maintain our\n # own implmentation in Dask to enable better remote\n # file-handling control\n\n # Handle selected columns\n if columns is not None:\n columns = columns[:]\n else:\n columns = pf.columns + list(pf.cats)\n if index:\n columns += [i for i in index if i not in columns]\n\n # Extract row-groups and pre-allocate df\n rgs = pf.row_groups\n size = sum(rg.num_rows for rg in rgs)\n df, views = pf.pre_allocate(size, columns, categories, index)\n start = 0\n\n # Get a map of file names -> row-groups\n fn_rg_map = defaultdict(list)\n for rg in rgs:\n fn = pf.row_group_filename(rg)\n fn_rg_map[fn].append(rg)\n\n # Define file-opening options\n precache_options, open_file_options = _process_open_file_options(\n open_file_options,\n **(\n {\n \"allow_precache\": False,\n \"default_cache\": \"readahead\",\n }\n if _is_local_fs(fs)\n else {\n \"metadata\": pf,\n \"columns\": list(set(columns).intersection(pf.columns)),\n \"row_groups\": [rgs for rgs in fn_rg_map.values()],\n \"default_engine\": \"fastparquet\",\n \"default_cache\": \"readahead\",\n }\n ),\n )\n\n with ExitStack() as stack:\n\n for fn, infile in zip(\n fn_rg_map.keys(),\n _open_input_files(\n list(fn_rg_map.keys()),\n fs=fs,\n context_stack=stack,\n precache_options=precache_options,\n **open_file_options,\n ),\n ):\n for rg in fn_rg_map[fn]:\n thislen = rg.num_rows\n parts = {\n name: (\n v\n if name.endswith(\"-catdef\")\n else v[start : start + thislen]\n )\n for (name, v) in views.items()\n }\n\n # Add row-group data to df\n pf.read_row_group_file(\n rg,\n columns,\n categories,\n index,\n assign=parts,\n partition_meta=pf.partition_meta,\n infile=infile,\n **kwargs,\n )\n start += thislen\n return df\n\n @classmethod\n def initialize_write(\n cls,\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n schema=None,\n object_encoding=\"utf8\",\n index_cols=None,\n custom_metadata=None,\n **kwargs,\n ):\n if index_cols is None:\n index_cols = []\n if append and division_info is None:\n ignore_divisions = True\n fs.mkdirs(path, exist_ok=True)\n if object_encoding == \"infer\" or (\n isinstance(object_encoding, dict) and \"infer\" in object_encoding.values()\n ):\n raise ValueError(\n '\"infer\" not allowed as object encoding, '\n \"because this required data in memory.\"\n )\n\n metadata_file_exists = False\n if append:\n try:\n # to append to a dataset without _metadata, need to load\n # _common_metadata or any data file here\n pf = fastparquet.api.ParquetFile(path, open_with=fs.open)\n metadata_file_exists = fs.exists(fs.sep.join([path, \"_metadata\"]))\n except (OSError, ValueError):\n # append for create\n append = False\n if append:\n if pf.file_scheme not in [\"hive\", \"empty\", \"flat\"]:\n raise ValueError(\n \"Requested file scheme is hive, but existing file scheme is not.\"\n )\n elif (set(pf.columns) != set(df.columns) - set(partition_on)) or (\n set(partition_on) != set(pf.cats)\n ):\n raise ValueError(\n \"Appended columns not the same.\\n\"\n \"Previous: {} | New: {}\".format(pf.columns, list(df.columns))\n )\n elif (pd.Series(pf.dtypes).loc[pf.columns] != df[pf.columns].dtypes).any():\n raise ValueError(\n \"Appended dtypes differ.\\n{}\".format(\n set(pf.dtypes.items()) ^ set(df.dtypes.items())\n )\n )\n else:\n df = df[pf.columns + partition_on]\n\n fmd = pf.fmd\n i_offset = fastparquet.writer.find_max_part(fmd.row_groups)\n if not ignore_divisions:\n if not set(index_cols).intersection([division_info[\"name\"]]):\n ignore_divisions = True\n if not ignore_divisions:\n minmax = fastparquet.api.sorted_partitioned_columns(pf)\n # If fastparquet detects that a partitioned column isn't sorted, it won't\n # appear in the resulting min/max dictionary\n old_end = (\n minmax[index_cols[0]][\"max\"][-1]\n if index_cols[0] in minmax\n else None\n )\n divisions = division_info[\"divisions\"]\n if old_end is None or divisions[0] <= old_end:\n raise ValueError(\n \"Appended divisions overlapping with previous ones.\"\n \"\\n\"\n \"Previous: {} | New: {}\".format(old_end, divisions[0])\n )\n else:\n fmd = fastparquet.writer.make_metadata(\n df._meta,\n object_encoding=object_encoding,\n index_cols=index_cols,\n ignore_columns=partition_on,\n **kwargs,\n )\n i_offset = 0\n if custom_metadata is not None:\n kvm = fmd.key_value_metadata or []\n kvm.extend(\n [\n fastparquet.parquet_thrift.KeyValue(key=key, value=value)\n for key, value in custom_metadata.items()\n ]\n )\n fmd.key_value_metadata = kvm\n\n extra_write_kwargs = {\"fmd\": fmd}\n return i_offset, fmd, metadata_file_exists, extra_write_kwargs\n\n @classmethod\n def write_partition(\n cls,\n df,\n path,\n fs,\n filename,\n partition_on,\n return_metadata,\n fmd=None,\n compression=None,\n custom_metadata=None,\n **kwargs,\n ):\n # Update key/value metadata if necessary\n fmd = copy.copy(fmd)\n for s in fmd.schema:\n if isinstance(s.name, bytes):\n # can be coerced to bytes on copy\n s.name = s.name.decode()\n if custom_metadata and fmd is not None:\n fmd.key_value_metadata = fmd.key_value_metadata + (\n [\n fastparquet.parquet_thrift.KeyValue(key=key, value=value)\n for key, value in custom_metadata.items()\n ]\n )\n\n if not len(df):\n # Write nothing for empty partitions\n rgs = []\n elif partition_on:\n mkdirs = lambda x: fs.mkdirs(x, exist_ok=True)\n if parse_version(fastparquet.__version__) >= parse_version(\"0.1.4\"):\n rgs = partition_on_columns(\n df, partition_on, path, filename, fmd, compression, fs.open, mkdirs\n )\n else:\n rgs = partition_on_columns(\n df,\n partition_on,\n path,\n filename,\n fmd,\n fs.sep,\n compression,\n fs.open,\n mkdirs,\n )\n else:\n with fs.open(fs.sep.join([path, filename]), \"wb\") as fil:\n fmd.num_rows = len(df)\n rg = make_part_file(\n fil, df, fmd.schema, compression=compression, fmd=fmd\n )\n for chunk in rg.columns:\n chunk.file_path = filename\n rgs = [rg]\n if return_metadata:\n return rgs\n else:\n return []\n\n @classmethod\n def write_metadata(cls, parts, meta, fs, path, append=False, **kwargs):\n _meta = copy.copy(meta)\n rgs = meta.row_groups\n if parts:\n for rg in parts:\n if rg is not None:\n if isinstance(rg, list):\n for r in rg:\n rgs.append(r)\n else:\n rgs.append(rg)\n _meta.row_groups = rgs\n fn = fs.sep.join([path, \"_metadata\"])\n fastparquet.writer.write_common_metadata(\n fn, _meta, open_with=fs.open, no_row_groups=False\n )\n\n # if appending, could skip this, but would need to check existence\n fn = fs.sep.join([path, \"_common_metadata\"])\n fastparquet.writer.write_common_metadata(fn, _meta, open_with=fs.open)\n"
] | [
[
"pandas.CategoricalDtype",
"pandas.Series",
"pandas.Timestamp"
]
] |
facebookresearch/pythia | [
"079740bee4b357a7b1b866d35e2f1fad6edba8a4"
] | [
"mmf/modules/encoders.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport importlib\nimport logging\nimport os\nimport pickle\nimport re\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom dataclasses import asdict, dataclass\nfrom enum import Enum\nfrom typing import Any\n\nimport torch\nimport torchvision\nfrom mmf.common.registry import registry\nfrom mmf.models.frcnn import GeneralizedRCNN\nfrom mmf.modules.embeddings import ProjectionEmbedding, TextEmbedding\nfrom mmf.modules.hf_layers import BertModelJit\nfrom mmf.modules.layers import Identity\nfrom mmf.utils.build import build_image_encoder, build_text_encoder\nfrom mmf.utils.download import download_pretrained_model\nfrom mmf.utils.file_io import PathManager\nfrom mmf.utils.general import get_absolute_path\nfrom mmf.utils.logger import log_class_usage\nfrom omegaconf import MISSING, OmegaConf\nfrom torch import nn, Tensor\nfrom transformers.configuration_auto import AutoConfig\nfrom transformers.modeling_auto import AutoModel\n\ntry:\n from detectron2.modeling import build_resnet_backbone, ShapeSpec\nexcept ImportError:\n pass\n\n\nlogger = logging.getLogger()\n\n\nclass Encoder(nn.Module):\n @dataclass\n class Config:\n name: str = MISSING\n\n def __init__(self):\n super().__init__()\n log_class_usage(\"Encoder\", self.__class__)\n\n @classmethod\n def from_params(cls, **kwargs):\n config = OmegaConf.structured(cls.Config(**kwargs))\n return cls(config)\n\n\nclass EncoderFactory(nn.Module):\n @dataclass\n class Config:\n type: str = MISSING\n params: Encoder.Config = MISSING\n\n\nclass ImageFeatureEncoderTypes(Enum):\n default = \"default\"\n identity = \"identity\"\n projection = \"projection\"\n frcnn_fc7 = \"finetune_faster_rcnn_fpn_fc7\"\n\n\nclass ImageFeatureEncoder(Encoder):\n @dataclass\n class Config(Encoder.Config):\n in_dim: int = MISSING\n\n\nclass ImageFeatureEncoderFactory(EncoderFactory):\n @dataclass\n class Config(EncoderFactory.Config):\n type: ImageFeatureEncoderTypes = MISSING\n params: ImageFeatureEncoder.Config = MISSING\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n encoder_type = config.type\n if isinstance(encoder_type, ImageFeatureEncoderTypes):\n encoder_type = encoder_type.value\n\n assert (\n \"in_dim\" in config.params\n ), \"ImageFeatureEncoder require 'in_dim' param in config\"\n params = config.params\n\n if encoder_type == \"default\" or encoder_type == \"identity\":\n self.module = Identity()\n self.module.in_dim = params.in_dim\n self.module.out_dim = params.in_dim\n elif encoder_type == \"projection\":\n if \"module\" not in params:\n params = deepcopy(params)\n params.module = \"linear\"\n self.module = ProjectionEmbedding(**params)\n elif encoder_type == \"finetune_faster_rcnn_fpn_fc7\":\n self.module = FinetuneFasterRcnnFpnFc7(params)\n else:\n raise NotImplementedError(\"Unknown Image Encoder: %s\" % encoder_type)\n\n self.out_dim = self.module.out_dim\n\n def forward(self, *args, **kwargs):\n return self.module(*args, **kwargs)\n\n\[email protected]_encoder(\"finetune_faster_rcnn_fpn_fc7\")\nclass FinetuneFasterRcnnFpnFc7(ImageFeatureEncoder):\n @dataclass\n class Config(ImageFeatureEncoder.Config):\n name: str = \"finetune_faster_rcnn_fpn_fc7\"\n in_dim: int = MISSING\n weights_file: str = \"fc7_w.pkl\"\n bias_file: str = \"fc7_b.pkl\"\n model_data_dir: str = MISSING\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n model_data_dir = get_absolute_path(config.model_data_dir)\n\n if not os.path.isabs(config.weights_file):\n weights_file = os.path.join(model_data_dir, config.weights_file)\n if not os.path.isabs(config.bias_file):\n bias_file = os.path.join(model_data_dir, config.bias_file)\n\n if not PathManager.exists(bias_file) or not PathManager.exists(weights_file):\n download_path = download_pretrained_model(\"detectron.vmb_weights\")\n weights_file = get_absolute_path(os.path.join(download_path, \"fc7_w.pkl\"))\n bias_file = get_absolute_path(os.path.join(download_path, \"fc7_b.pkl\"))\n\n with PathManager.open(weights_file, \"rb\") as w:\n weights = pickle.load(w)\n with PathManager.open(bias_file, \"rb\") as b:\n bias = pickle.load(b)\n out_dim = bias.shape[0]\n\n self.lc = nn.Linear(config.in_dim, out_dim)\n self.lc.weight.data.copy_(torch.from_numpy(weights))\n self.lc.bias.data.copy_(torch.from_numpy(bias))\n self.out_dim = out_dim\n\n def _load_from_state_dict(\n self,\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n ):\n old_prefix = prefix + \"module.\"\n for k in list(state_dict.keys()):\n if k.startswith(old_prefix):\n new_k = k.replace(old_prefix, prefix)\n state_dict[new_k] = state_dict.pop(k)\n\n super()._load_from_state_dict(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n )\n\n def forward(self, image):\n i2 = self.lc(image)\n i3 = nn.functional.relu(i2)\n return i3\n\n\[email protected]_encoder(\"identity\")\nclass IdentityEncoder(Encoder):\n @dataclass\n class Config(Encoder.Config):\n name: str = \"identity\"\n # Random in_dim if not specified\n in_dim: int = 100\n\n def __init__(self, config: Config):\n super().__init__()\n self.module = nn.Identity()\n self.in_dim = config.get(\"in_dim\", 100)\n self.out_dim = self.in_dim\n\n def forward(self, x):\n return self.module(x)\n\n\nclass ImageEncoderTypes(Enum):\n default = \"default\"\n identity = \"identity\"\n torchvision_resnet = \"torchvision_resnet\"\n resnet152 = \"resnet152\"\n detectron2_resnet = \"detectron2_resnet\"\n\n\nclass ImageEncoderFactory(EncoderFactory):\n @dataclass\n class Config(EncoderFactory.Config):\n type: ImageEncoderTypes = MISSING\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n self._type = config.type\n\n if isinstance(self._type, ImageEncoderTypes):\n self._type = self._type.value\n\n params = config.params\n\n if self._type == \"default\" or self._type == \"identity\":\n self.module = nn.Identity()\n self.module.out_dim = params.in_dim\n elif self._type == \"resnet152\":\n self.module = ResNet152ImageEncoder(params)\n elif self._type == \"torchvision_resnet\":\n self.module = TorchvisionResNetImageEncoder(params)\n elif self._type == \"detectron2_resnet\":\n self.module = Detectron2ResnetImageEncoder(params)\n elif self._type == \"frcnn\":\n self.module = FRCNNImageEncoder(params)\n else:\n raise NotImplementedError(\"Unknown Image Encoder: %s\" % self._type)\n\n @property\n def out_dim(self):\n return self.module.out_dim\n\n def forward(self, image):\n return self.module(image)\n\n\n# Taken from facebookresearch/mmbt with some modifications\[email protected]_encoder(\"resnet152\")\nclass ResNet152ImageEncoder(Encoder):\n @dataclass\n class Config(Encoder.Config):\n name: str = \"resnet152\"\n pretrained: bool = True\n # \"avg\" or \"adaptive\"\n pool_type: str = \"avg\"\n num_output_features: int = 1\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n self.config = config\n model = torchvision.models.resnet152(pretrained=config.get(\"pretrained\", True))\n modules = list(model.children())[:-2]\n self.model = nn.Sequential(*modules)\n\n pool_func = (\n nn.AdaptiveAvgPool2d if config.pool_type == \"avg\" else nn.AdaptiveMaxPool2d\n )\n\n # -1 will keep the original feature size\n if config.num_output_features == -1:\n self.pool = nn.Identity()\n elif config.num_output_features in [1, 2, 3, 5, 7]:\n self.pool = pool_func((config.num_output_features, 1))\n elif config.num_output_features == 4:\n self.pool = pool_func((2, 2))\n elif config.num_output_features == 6:\n self.pool = pool_func((3, 2))\n elif config.num_output_features == 8:\n self.pool = pool_func((4, 2))\n elif config.num_output_features == 9:\n self.pool = pool_func((3, 3))\n\n self.out_dim = 2048\n\n def forward(self, x):\n # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048\n out = self.pool(self.model(x))\n out = torch.flatten(out, start_dim=2)\n out = out.transpose(1, 2).contiguous()\n return out # BxNx2048\n\n\[email protected]_encoder(\"torchvision_resnet\")\nclass TorchvisionResNetImageEncoder(Encoder):\n @dataclass\n class Config(Encoder.Config):\n name: str = \"resnet50\"\n pretrained: bool = False\n zero_init_residual: bool = True\n num_output_features: int = -1\n pool_type: str = \"avg\"\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n self.config = config\n\n model = getattr(torchvision.models, config.name)(\n pretrained=config.pretrained, zero_init_residual=config.zero_init_residual\n )\n\n # checks if use_avgpool exists to maintain the old logic\n self.use_avgpool = config.get(\"use_avgpool\", None)\n if self.use_avgpool: # use_avgpool is True\n config.num_output_features = 1\n config.pool_type = \"avg\"\n elif self.use_avgpool is False: # use_avgpool is False\n config.num_output_features = -1\n\n if config.pretrained:\n model = self._load_pretrained(model, config)\n\n modules = list(model.children())[:-2]\n self.model = nn.Sequential(*modules)\n self.pool = self._pool_func(config)\n self.out_dim = config.get(\"out_dim\", 2048)\n\n def _load_pretrained(self, model, config: Config):\n pretrained_model = config.get(\"pretrained_model\", \"supervised\")\n if pretrained_model == \"supervised\":\n pass # this is already loaded via torchvision using pretrained=True\n elif os.path.exists(pretrained_model):\n model.load_state_dict(torch.load(pretrained_model))\n else:\n try:\n with PathManager.open(pretrained_model, \"rb\") as f:\n model.load_state_dict(\n torch.load(f, map_location=lambda storage, loc: storage),\n strict=False,\n )\n except Exception:\n raise Exception(f\"unknown pretrained ResNet model: {pretrained_model}\")\n return model\n\n def _pool_func(self, config: Config):\n pool_func = (\n nn.AdaptiveAvgPool2d if config.pool_type == \"avg\" else nn.AdaptiveMaxPool2d\n )\n # -1 will keep the original feature size\n if config.num_output_features == -1:\n pool = nn.Identity()\n elif config.num_output_features in [1, 2, 3, 5, 7]:\n pool = pool_func((config.num_output_features, 1))\n elif config.num_output_features == 4:\n pool = pool_func((2, 2))\n elif config.num_output_features == 6:\n pool = pool_func((3, 2))\n elif config.num_output_features == 8:\n pool = pool_func((4, 2))\n elif config.num_output_features == 9:\n pool = pool_func((3, 3))\n\n return pool\n\n def forward(self, x):\n # B x 3 x 224 x 224 -> B x out_dim x 7 x 7\n out = self.pool(self.model(x))\n if self.use_avgpool is None:\n out = torch.flatten(out, start_dim=2)\n out = out.transpose(1, 2).contiguous() # BxNxout_dim\n else:\n out = torch.flatten(out, start_dim=1) # BxN*out_dim\n return out\n\n\[email protected]_encoder(\"detectron2_resnet\")\nclass Detectron2ResnetImageEncoder(Encoder):\n @dataclass\n class Config(Encoder.Config):\n name: str = \"detectron2_resnet\"\n pretrained: bool = True\n pretrained_path: str = None\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n self.config = config\n pretrained = config.get(\"pretrained\", False)\n pretrained_path = config.get(\"pretrained_path\", None)\n\n self.resnet = build_resnet_backbone(config, ShapeSpec(channels=3))\n\n if pretrained:\n state_dict = torch.hub.load_state_dict_from_url(\n pretrained_path, progress=False\n )\n new_state_dict = OrderedDict()\n replace_layer = {\"backbone.\": \"\"}\n\n for key, value in state_dict[\"model\"].items():\n new_key = re.sub(\n r\"(backbone\\.)\", lambda x: replace_layer[x.groups()[0]], key\n )\n new_state_dict[new_key] = value\n self.resnet.load_state_dict(new_state_dict, strict=False)\n\n self.out_dim = 2048\n\n def forward(self, x):\n x = self.resnet(x)\n return x[\"res5\"]\n\n\[email protected]_encoder(\"frcnn\")\nclass FRCNNImageEncoder(Encoder):\n @dataclass\n class Config(Encoder.Config):\n name: str = \"frcnn\"\n pretrained: bool = True\n pretrained_path: str = None\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n self.config = config\n pretrained = config.get(\"pretrained\", False)\n pretrained_path = config.get(\"pretrained_path\", None)\n self.frcnn = GeneralizedRCNN(config)\n if pretrained:\n state_dict = torch.load(pretrained_path)\n self.frcnn.load_state_dict(state_dict)\n self.frcnn.eval()\n\n def forward(\n self,\n x: torch.Tensor,\n sizes: torch.Tensor = None,\n scales_yx: torch.Tensor = None,\n padding: torch.Tensor = None,\n max_detections: int = 0,\n return_tensors: str = \"pt\",\n ):\n x = self.frcnn(\n x,\n sizes,\n scales_yx=scales_yx,\n padding=padding,\n max_detections=max_detections,\n return_tensors=return_tensors,\n )\n return x\n\n\nclass TextEncoderTypes(Enum):\n identity = \"identity\"\n transformer = \"transformer\"\n embedding = \"embedding\"\n\n\nclass TextEncoderFactory(EncoderFactory):\n @dataclass\n class Config(EncoderFactory.Config):\n # identity, transformer or embedding as of now\n type: TextEncoderTypes = MISSING\n params: Encoder.Config = MISSING\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n self._type = config.type\n if isinstance(self._type, TextEncoderTypes):\n self._type = self._type.value\n\n if self._type == \"identity\":\n self.module = nn.Identity()\n elif self._type == \"transformer\":\n self._module = TransformerEncoder(config.params)\n self.module = self._module.module\n elif self._type == \"embedding\":\n self.module = TextEmbeddingEncoder(config.params)\n else:\n raise NotImplementedError(f\"Unknown Text Encoder {self._type}\")\n\n def forward(self, *args, **kwargs):\n return self.module(*args, **kwargs)\n\n\[email protected]_encoder(\"text_embedding\")\nclass TextEmbeddingEncoder(Encoder):\n @dataclass\n class Config(Encoder.Config):\n name: str = \"text_embedding\"\n operator: str = MISSING\n # Keeping this Any for now as this\n # needs a separate refactor PR.\n embedding_params: Any = MISSING\n\n def __init__(self, config: Config):\n super().__init__()\n self._operator = config.operator\n self._embedding_params = config.embedding_params\n\n self.module = TextEmbedding(\n self._embedding_params.type, **self._embedding_params.params\n )\n\n def forward(self, x):\n x = self.module(x)\n if self._operator == \"sum\":\n x = x.sum(dim=1)\n elif self._operator == \"concat\":\n x = torch.cat(x, dim=1)\n elif self._operator == \"mul\":\n x = torch.prod(x, dim=1)\n\n return x.squeeze()\n\n\[email protected]_encoder(\"transformer\")\nclass TransformerEncoder(Encoder):\n @dataclass\n class Config(Encoder.Config):\n name: str = \"transformer\"\n num_segments: int = 2\n bert_model_name: str = \"bert-base-uncased\"\n # Options below can be overridden to update the bert configuration used\n # to initialize the bert encoder. If some option is missing or\n # if you are using an encoder different then BERT, add extra parameters\n # by inheriting and extending this config\n # Those options will automatically override the options for your transformer\n # encoder's configuration. For e.g. vocab_size is missing here, just add\n # vocab_size: x to update the size of the vocabulary with which encoder is\n # initialized. If you update the default values, the transformer you\n # will get will be initialized from scratch.\n hidden_size: int = 768\n num_hidden_layers: int = 12\n num_attention_heads: int = 12\n output_attentions: bool = False\n output_hidden_states: bool = False\n random_init: bool = False\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n self.config = config\n hf_params = {\"config\": self._build_encoder_config(config)}\n should_random_init = self.config.get(\"random_init\", False)\n\n # For BERT models, initialize using Jit version\n if self.config.bert_model_name.startswith(\"bert-\"):\n if should_random_init:\n self.module = BertModelJit(**hf_params)\n else:\n self.module = BertModelJit.from_pretrained(\n self.config.bert_model_name, **hf_params\n )\n else:\n if should_random_init:\n self.module = AutoModel.from_config(**hf_params)\n else:\n self.module = AutoModel.from_pretrained(\n self.config.bert_model_name, **hf_params\n )\n\n self.embeddings = self.module.embeddings\n self.original_config = self.config\n self.config = self.module.config\n self._init_segment_embeddings()\n\n def _init_segment_embeddings(self):\n if self.original_config.get(\"num_segments\", None):\n num_segments = self.original_config.num_segments\n if hasattr(self.embeddings, \"token_type_embeddings\"):\n new_embeds = nn.Embedding(num_segments, self.config.hidden_size)\n new_embeds.weight.data[:2].copy_(\n self.embeddings.token_type_embeddings.weight\n )\n for idx in range(2, num_segments - 1):\n new_embeds.weight.data[idx].copy_(\n self.embeddings.token_type_embeddings.weight.data.mean(dim=0)\n )\n self.embeddings.token_type_embeddings = new_embeds\n\n def _build_encoder_config(self, config: Config):\n return AutoConfig.from_pretrained(\n config.bert_model_name, **OmegaConf.to_container(config)\n )\n\n def forward(self, *args, return_sequence=False, **kwargs) -> Tensor:\n # Only return pooled output\n output = self.module(*args, **kwargs)\n return output[0] if return_sequence else output[1]\n\n\nclass MultiModalEncoderBase(Encoder):\n __jit_unused_properties__ = [\"encoder_config\"]\n\n @dataclass\n class Config(Encoder.Config):\n # This actually is Union[ImageEncoderConfig, ImageFeatureEncoderConfig]\n modal_encoder: EncoderFactory.Config = ImageEncoderFactory.Config(\n type=ImageEncoderTypes.resnet152, params=ResNet152ImageEncoder.Config()\n )\n text_encoder: EncoderFactory.Config = TextEncoderFactory.Config(\n type=TextEncoderTypes.transformer, params=TransformerEncoder.Config()\n )\n direct_features_input: bool = False\n modal_hidden_size: int = 2048\n text_hidden_size: int = 768\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n self.config = config\n\n self._modal_encoder_config = self.config.get(\"modal_encoder\", None)\n\n self._is_direct_features_input = self.config.get(\"direct_features_input\", False)\n\n self.build()\n self.modal_hidden_size = self.config.get(\"modal_hidden_size\", None)\n self.text_hidden_size = self.config.get(\"text_hidden_size\", None)\n\n def build(self):\n encoders = self._build_encoders(self.config)\n self.text_encoder, self.modal_encoder = encoders[0], encoders[1]\n\n self._encoder_config = None\n if self.text_encoder:\n self._encoder_config = self.text_encoder.config\n\n @property\n def encoder_config(self):\n return self._encoder_config\n\n def _build_encoders(self, config):\n text_encoder = None\n if config.get(\"text_encoder\", None):\n text_encoder = build_text_encoder(config.text_encoder)\n\n modal_encoder = None\n if config.get(\"modal_encoder\", None):\n modal_encoder = self._build_modal_encoder(config.modal_encoder)\n\n return (text_encoder, modal_encoder)\n\n def _build_modal_encoder(self, config):\n return build_image_encoder(\n config, direct_features=self._is_direct_features_input\n )\n\n\nclass PooledEncoder(Encoder):\n \"\"\"\n Standard pooled encoder class which takes in an input, encodes it with an encoder\n implemented and returned from `self.build_encoder` function, pools it based\n `pool_type` and `num_output_features` specified, flattens it and returns it\n back as a tensor.\n \"\"\"\n\n @dataclass\n class Config(Encoder.Config):\n num_output_features: int = 1 # How many output features need to be returned.\n pool_type: str = \"avg\" # type of pooling to apply \"avg\" | \"adaptive\"\n out_dim: int = MISSING # size of out dim expected\n three_d: bool = False # if input requires 3D pooling (for video)\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n self.encoder = self.build_encoder(config)\n pool_func = (\n nn.AdaptiveAvgPool2d if config.pool_type == \"avg\" else nn.AdaptiveMaxPool2d\n )\n params = (config.num_output_features, 1)\n if config.three_d:\n pool_func = (\n nn.AdaptiveAvgPool3d\n if config.pool_type == \"avg\"\n else nn.AdaptiveMaxPool3d\n )\n params = (config.num_output_features, 1, 1)\n # -1 will keep the original feature size\n if config.num_output_features == -1:\n self.pool = nn.Identity()\n else:\n self.pool = pool_func(params)\n self.out_dim = config.out_dim\n\n def build_encoder(self, config: Config, *args, **kwargs):\n \"\"\"Build an encoder on whose output the pooling will be applied.\n\n Args:\n config (Config): Config parameter required to build the encoder.\n\n Raises:\n NotImplementedError: Not implemented by default.\n \"\"\"\n raise NotImplementedError()\n\n def forward(self, x: Tensor) -> Tensor:\n out = self.encoder(x)\n out = self.pool(out)\n out = torch.flatten(out, start_dim=2)\n out = out.transpose(1, 2).contiguous()\n return out\n\n\[email protected]_encoder(\"pytorchvideo\")\nclass PytorchVideoEncoder(Encoder):\n \"\"\"A thin wrapper around pytorchvideo models.\n This class is responsible for integrating pytorchvideo models as encoders.\n THis class attempts to construct a pytorchvideo model from torch hub.\n If this fails for a random weight model, and pytorchvideo package is available,\n build the model with random weights from pytorchvideo.models.\n\n Config:\n name (str): Always 'pytorchvideo' Used for builder_encoder()\n random_init (bool): Flag to load pretrained weights\n model_name (str): Name of the pytorchvideo model to use\n drop_last_n_layers (int):\n <=0 value for the number of layers to drop off the end\n pooler_name (str): Name of pooler used on model output\n\n Raises:\n ImportError:\n The constructor raises an ImportError if pytorchvideo is not installed.\n \"\"\"\n\n @dataclass\n class Config(Encoder.Config):\n name: str = \"pytorchvideo\"\n random_init: bool = False\n model_name: str = \"slowfast_r50\"\n drop_last_n_layers: int = -1\n pooler_name: str = \"identity\"\n\n PYTORCHVIDEO_REPO = \"facebookresearch/pytorchvideo:main\"\n\n def __init__(self, config: Config):\n super().__init__()\n config = OmegaConf.create({**asdict(self.Config()), **config})\n if config.random_init:\n params = dict(**OmegaConf.to_container(config))\n params = {\n k: v\n for k, v in params.items()\n if k not in PytorchVideoEncoder.Config().__dict__\n }\n try:\n model = torch.hub.load(\n PytorchVideoEncoder.PYTORCHVIDEO_REPO,\n model=config.model_name,\n pretrained=False,\n **params,\n )\n except BaseException as err:\n pytorchvideo_spec = importlib.util.find_spec(\"pytorchvideo\")\n if pytorchvideo_spec is None:\n raise err\n import pytorchvideo.models.hub as hub\n\n model_create_fn = getattr(hub, config.model_name)\n model = model_create_fn(pretrained=False, **params)\n else:\n # load weights from TorchHub\n model = torch.hub.load(\n PytorchVideoEncoder.PYTORCHVIDEO_REPO,\n model=config.model_name,\n pretrained=True,\n )\n encoder_list = []\n if config.drop_last_n_layers == 0:\n encoder_list += [model]\n else:\n modules_list = list(model.children())\n if len(modules_list) == 1:\n modules_list = list(modules_list[0].children())\n modules = modules_list[: config.drop_last_n_layers]\n encoder_list += modules\n\n pooler = registry.get_pool_class(config.pooler_name)()\n encoder_list += [pooler]\n self.encoder = nn.Sequential(*encoder_list)\n\n def forward(self, *args, **kwargs):\n # pass along input to model\n # assumes caller obeys the dynamic model signature\n return self.encoder(*args, **kwargs)\n\n\[email protected]_encoder(\"r2plus1d_18\")\nclass R2Plus1D18VideoEncoder(PooledEncoder):\n \"\"\"\n R2Plus1D based video encoder. Returns back a tensor of dim 2048.\n By default, pretrained version is used.\n See https://arxiv.org/abs/1711.11248.\n \"\"\"\n\n @dataclass\n class Config(PooledEncoder.Config):\n name: str = \"r2plus1d_18\"\n out_dim: int = 512 # out dim\n pretrained: bool = True # if should use pretrained version or not\n three_d: bool = True\n\n def build_encoder(self, config: Config, *args, **kwargs):\n model = torchvision.models.video.r2plus1d_18(\n pretrained=config.get(\"pretrained\", True)\n )\n modules = list(model.children())[:-2]\n return nn.Sequential(*modules)\n\n\[email protected]_encoder(\"resnet18_audio\")\nclass ResNet18AudioEncoder(PooledEncoder):\n \"\"\"\n Audio encoder based on ResNet18 used in various audio classification paper\n as a baseline. By default, not pretrained version is used.\n \"\"\"\n\n @dataclass\n class Config(PooledEncoder.Config):\n name: str = \"resnet18_audio\"\n out_dim: int = 512\n pretrained: bool = False\n\n def build_encoder(self, config: Config, *args, **kwargs):\n model = torchvision.models.resnet18(pretrained=config.get(\"pretrained\", False))\n model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)\n modules = list(model.children())[:-2]\n return nn.Sequential(*modules)\n\n\[email protected]_encoder(\"vit\")\nclass ViTEncoder(Encoder):\n @dataclass\n class Config(Encoder.Config):\n name: str = \"vit\"\n # See https://huggingface.co/models?filter=vit for available options\n pretrained_model_name: str = \"google/vit-base-patch16-224\"\n random_init: bool = False\n gradient_checkpointing: bool = False\n\n def __init__(self, config: Config, *args, **kwargs):\n super().__init__()\n self.config = config\n self.module, self.hf_config = self._model_class.from_config(config)\n self.embeddings = self.module.embeddings\n self.out_dim = self.hf_config.hidden_size\n\n @property\n def _model_class(self):\n from mmf.modules.vit import ViTModel\n\n return ViTModel\n\n def forward(self, *args, **kwargs):\n if \"output_hidden_states\" not in kwargs:\n kwargs[\"output_hidden_states\"] = False\n output = self.module(*args, **kwargs)\n return output[\"last_hidden_state\"], output.get(\"hidden_states\", None)\n"
] | [
[
"torch.prod",
"torch.nn.Linear",
"torch.hub.load_state_dict_from_url",
"torch.load",
"torch.flatten",
"torch.nn.Embedding",
"torch.nn.functional.relu",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.hub.load",
"torch.nn.Sequential",
"torch.nn.Identity",
"torch.cat"
]
] |
ZPAVelocity/DataStructureExercise | [
"39b1cce859e5c46599b3a6e69ac80ade5920aa34"
] | [
"DynamicProgramming/matrixChainMultiplication.py"
] | [
"import sys\nimport numpy as np\n\n\ndef main():\n p = [30, 35, 15, 5, 10, 20, 25]\n m, s = matrixChainOrder(p)\n \n print('m')\n for i in m:\n print(i)\n print('s')\n for i in s:\n print(i)\n\n\ndef matrixMultiply(A, B):\n if A.shape[1] != B.shape[0]:\n print('incompatible dimensions')\n return np.array([[]])\n C = np.array([[0 for i in range(A.shape[0])] for i in range(B.shape[1])])\n for i in range(A.shape[0]):\n for j in range(B.shape[1]):\n C[i][j] = 0\n for k in range(A.shape[1]):\n C[i][j] += + A[i][k] * B[k][j]\n return C\n\n\ndef matrixChainOrder(p):\n n = len(p) - 1\n m = [[0 for i in range(n)] for j in range(n)]\n s = [[0 for i in range(n)] for j in range(n)]\n\n for i in range(0, n):\n m[i][i] = 0\n\n for l in range(2, n + 1): # l is the chain length\n for i in range(0, n - l + 1):\n j = i + l - 1\n m[i][j] = sys.maxsize\n for k in range(i, j):\n q = m[i][k] + m[k + 1][j] + p[i] * p[k + 1] * p[j + 1]\n if q < m[i][j]:\n m[i][j] = q\n s[i][j] = k + 1\n return m, s\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array"
]
] |
IngrojShrestha/language | [
"674a3d016b1e17658e301e8d9bdfa63e3d3f5d15"
] | [
"language/bert_extraction/steal_bert_qa/data_generation/preprocess_thief_dev_squad.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Construct a held-out / validation set from a large pool of WIKI / RANDOM queries ensuring there is no overlap with the train set.\"\"\"\nimport json\nimport random\n\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf\n\napp = tf.compat.v1.app\nflags = tf.flags\ngfile = tf.gfile\nlogging = tf.logging\n\nflags.DEFINE_string(\"pool_dataset\", None,\n \"Large pool of queries having training set distribution.\")\nflags.DEFINE_string(\"train_dataset\", None,\n \"Training set of queries used for model extraction.\")\nflags.DEFINE_integer(\"dev_dataset_size\", 10570,\n \"Number of QAs in held-out set. (default: SQuAD 1.1 size\")\nflags.DEFINE_string(\"output_path\", None, \"Output path for the held-out set.\")\nflags.DEFINE_integer(\"random_seed\", 42, \"Random seed for determinism.\")\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n random.seed(FLAGS.random_seed)\n np.random.seed(FLAGS.random_seed)\n\n with gfile.Open(FLAGS.pool_dataset, \"r\") as f:\n pool_data = json.loads(f.read())[\"data\"]\n\n with gfile.Open(FLAGS.train_dataset, \"r\") as f:\n train_data = json.loads(f.read())[\"data\"]\n\n all_train_paras = {}\n\n for inst in train_data:\n for para in inst[\"paragraphs\"]:\n all_train_paras[para[\"context\"]] = 1\n\n num_dev_questions = FLAGS.dev_dataset_size\n\n # sanity check to verify all pool dataset question IDs are unique\n num_pool_questions = 0\n pool_qids = {}\n\n for inst in pool_data:\n for para in inst[\"paragraphs\"]:\n for qa in para[\"qas\"]:\n num_pool_questions += 1\n pool_qids[qa[\"id\"]] = 1\n\n assert len(pool_qids) == num_pool_questions\n\n random.shuffle(pool_data)\n\n output_data = {\"data\": [], \"version\": FLAGS.version}\n\n for instance in pool_data:\n curr_instance = {\"title\": \"Random dev data\", \"paragraphs\": []}\n for para in instance[\"paragraphs\"]:\n # Even if there is a paragraph overlap, do not consider it for the\n # held-out set since we want to minimize overlap\n if para[\"context\"] in all_train_paras:\n continue\n # Assume different paragraphs have different questions\n curr_instance[\"paragraphs\"].append(para)\n num_dev_questions = num_dev_questions - len(para[\"qas\"])\n if num_dev_questions <= 0:\n break\n if curr_instance[\"paragraphs\"]:\n output_data[\"data\"].append(curr_instance)\n if num_dev_questions <= 0:\n break\n\n total_questions = 0\n for instance in output_data[\"data\"]:\n for para in instance[\"paragraphs\"]:\n for qa in para[\"qas\"]:\n total_questions += 1\n\n logging.info(\"Final dataset size = %d\", total_questions)\n\n with gfile.Open(FLAGS.output_path, \"w\") as f:\n f.write(json.dumps(output_data))\n\n\nif __name__ == \"__main__\":\n app.run(main)\n"
] | [
[
"numpy.random.seed"
]
] |
tangzhiyi11/Paddle | [
"790cadd1f06fabeadc4b9aeca5622ea50985b990"
] | [
"python/paddle/fluid/tests/unittests/test_egr_python_api.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle.fluid.core as core\nimport paddle.fluid.eager.eager_tensor_patch_methods as eager_tensor_patch_methods\nimport paddle\nimport numpy as np\nfrom paddle.fluid.framework import _test_eager_guard, EagerParamBase, _in_eager_mode\nfrom paddle.fluid.data_feeder import convert_dtype\nimport unittest\nimport copy\n\n\nclass EagerScaleTestCase(unittest.TestCase):\n def test_scale_base(self):\n with _test_eager_guard():\n paddle.set_device(\"cpu\")\n arr = np.ones([4, 16, 16, 32]).astype('float32')\n tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())\n print(tensor)\n tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)\n for i in range(0, 100):\n tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)\n print(tensor)\n self.assertEqual(tensor.shape, [4, 16, 16, 32])\n self.assertEqual(tensor.stop_gradient, True)\n\n def test_retain_grad_and_run_backward(self):\n with _test_eager_guard():\n paddle.set_device(\"cpu\")\n\n input_data = np.ones([4, 16, 16, 32]).astype('float32')\n data_eager = paddle.to_tensor(input_data, 'float32',\n core.CPUPlace(), False)\n\n grad_data = np.ones([4, 16, 16, 32]).astype('float32')\n grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())\n\n data_eager.retain_grads()\n\n out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)\n self.assertFalse(data_eager.grad._is_initialized())\n out_eager.backward(grad_eager, False)\n self.assertTrue(data_eager.grad._is_initialized())\n self.assertTrue(np.array_equal(data_eager.grad.numpy(), input_data))\n\n def test_retain_grad_and_run_backward_raises(self):\n with _test_eager_guard():\n paddle.set_device(\"cpu\")\n\n input_data = np.ones([4, 16, 16, 32]).astype('float32')\n data_eager = paddle.to_tensor(input_data, 'float32',\n core.CPUPlace(), False)\n\n grad_data = np.ones([4, 16, 16, 32]).astype('float32')\n grad_data2 = np.ones([4, 16]).astype('float32')\n grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())\n grad_eager2 = paddle.to_tensor(grad_data2, 'float32',\n core.CPUPlace())\n\n data_eager.retain_grads()\n\n out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)\n self.assertFalse(data_eager.grad._is_initialized())\n with self.assertRaisesRegexp(\n AssertionError,\n \"The type of grad_tensor must be paddle.Tensor\"):\n out_eager.backward(grad_data, False)\n\n with self.assertRaisesRegexp(\n AssertionError,\n \"Tensor shape not match, Tensor of grad_tensor /*\"):\n out_eager.backward(grad_eager2, False)\n\n\nclass EagerDtypeTestCase(unittest.TestCase):\n def check_to_tesnsor_and_numpy(self, dtype, proto_dtype):\n with _test_eager_guard():\n arr = np.random.random([4, 16, 16, 32]).astype(dtype)\n tensor = paddle.to_tensor(arr, dtype)\n self.assertEqual(tensor.dtype, proto_dtype)\n self.assertTrue(np.array_equal(arr, tensor.numpy()))\n\n def test_dtype_base(self):\n print(\"Test_dtype\")\n self.check_to_tesnsor_and_numpy('bool', core.VarDesc.VarType.BOOL)\n self.check_to_tesnsor_and_numpy('int8', core.VarDesc.VarType.INT8)\n self.check_to_tesnsor_and_numpy('uint8', core.VarDesc.VarType.UINT8)\n self.check_to_tesnsor_and_numpy('int16', core.VarDesc.VarType.INT16)\n self.check_to_tesnsor_and_numpy('int32', core.VarDesc.VarType.INT32)\n self.check_to_tesnsor_and_numpy('int64', core.VarDesc.VarType.INT64)\n self.check_to_tesnsor_and_numpy('float16', core.VarDesc.VarType.FP16)\n self.check_to_tesnsor_and_numpy('float32', core.VarDesc.VarType.FP32)\n self.check_to_tesnsor_and_numpy('float64', core.VarDesc.VarType.FP64)\n self.check_to_tesnsor_and_numpy('complex64',\n core.VarDesc.VarType.COMPLEX64)\n self.check_to_tesnsor_and_numpy('complex128',\n core.VarDesc.VarType.COMPLEX128)\n\n\nclass EagerTensorPropertiesTestCase(unittest.TestCase):\n def constructor(self, place):\n egr_tensor = core.eager.EagerTensor()\n self.assertEqual(egr_tensor.persistable, False)\n self.assertTrue(\"generated\" in egr_tensor.name)\n self.assertEqual(egr_tensor.shape, [])\n self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor.stop_gradient, True)\n\n egr_tensor0 = core.eager.EagerTensor(\n core.VarDesc.VarType.FP32, [4, 16, 16, 32], \"test_eager_tensor\",\n core.VarDesc.VarType.LOD_TENSOR, True)\n self.assertEqual(egr_tensor0.persistable, True)\n self.assertEqual(egr_tensor0.name, \"test_eager_tensor\")\n self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)\n\n arr0 = np.random.rand(4, 16, 16, 32).astype('float32')\n egr_tensor1 = core.eager.EagerTensor(arr0, place, True, False,\n \"numpy_tensor1\", False)\n self.assertEqual(egr_tensor1.persistable, True)\n self.assertEqual(egr_tensor1.name, \"numpy_tensor1\")\n self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor1.stop_gradient, False)\n self.assertTrue(egr_tensor1.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor1.numpy(), arr0))\n\n arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64)\n egr_tensor2 = core.eager.EagerTensor(arr1, place, False, True,\n \"numpy_tensor2\", True)\n self.assertEqual(egr_tensor2.persistable, False)\n self.assertEqual(egr_tensor2.name, \"numpy_tensor2\")\n self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.INT64)\n self.assertEqual(egr_tensor2.stop_gradient, True)\n self.assertTrue(egr_tensor2.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor2.numpy(), arr1))\n\n arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32')\n egr_tensor3 = core.eager.EagerTensor(arr2)\n self.assertEqual(egr_tensor3.persistable, False)\n self.assertTrue(\"generated_tensor\" in egr_tensor3.name)\n self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64])\n self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor3.stop_gradient, True)\n self.assertTrue(\n egr_tensor3.place._equals(\n paddle.fluid.framework._current_expected_place()))\n self.assertTrue(np.array_equal(egr_tensor3.numpy(), arr2))\n\n egr_tensor3.stop_gradient = False\n egr_tensor4 = core.eager.EagerTensor(egr_tensor3)\n self.assertEqual(egr_tensor4.persistable, False)\n self.assertTrue(\"generated_tensor\" in egr_tensor4.name)\n self.assertEqual(egr_tensor4.shape, egr_tensor3.shape)\n self.assertEqual(egr_tensor4.dtype, egr_tensor3.dtype)\n self.assertEqual(egr_tensor4.stop_gradient, True)\n self.assertTrue(\n egr_tensor4.place._equals(\n paddle.fluid.framework._current_expected_place()))\n self.assertTrue(\n np.array_equal(egr_tensor4.numpy(), egr_tensor3.numpy()))\n\n arr4 = np.random.rand(4, 16, 16, 32).astype('float32')\n egr_tensor5 = core.eager.EagerTensor(arr4, place)\n self.assertEqual(egr_tensor5.persistable, False)\n self.assertTrue(\"generated_tensor\" in egr_tensor5.name)\n self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor5.stop_gradient, True)\n self.assertTrue(egr_tensor5.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor5.numpy(), arr4))\n\n egr_tensor6 = core.eager.EagerTensor(egr_tensor5, core.CPUPlace())\n self.assertEqual(egr_tensor6.persistable, False)\n self.assertTrue(\"generated_tensor\" in egr_tensor6.name)\n self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor6.stop_gradient, True)\n self.assertEqual(egr_tensor6.place.is_cpu_place(), True)\n self.assertTrue(\n np.array_equal(egr_tensor6.numpy(), egr_tensor5.numpy()))\n\n egr_tensor7 = core.eager.EagerTensor(arr4, place, True)\n self.assertEqual(egr_tensor7.persistable, True)\n self.assertTrue(\"generated_tensor\" in egr_tensor7.name)\n self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor7.stop_gradient, True)\n self.assertTrue(egr_tensor7.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor7.numpy(), arr4))\n\n egr_tensor8 = core.eager.EagerTensor(egr_tensor6, place, \"egr_tensor8\")\n self.assertEqual(egr_tensor8.persistable, False)\n self.assertEqual(egr_tensor8.name, \"egr_tensor8\")\n self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor8.stop_gradient, True)\n self.assertTrue(egr_tensor8.place._equals(place))\n self.assertTrue(\n np.array_equal(egr_tensor8.numpy(), egr_tensor5.numpy()))\n\n egr_tensor9 = core.eager.EagerTensor(arr4, place, True, True)\n self.assertEqual(egr_tensor9.persistable, True)\n self.assertTrue(\"generated_tensor\" in egr_tensor9.name)\n self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor9.stop_gradient, True)\n self.assertTrue(egr_tensor9.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor9.numpy(), arr4))\n\n x = np.random.rand(3, 3).astype('float32')\n t = paddle.fluid.Tensor()\n t.set(x, paddle.fluid.CPUPlace())\n egr_tensor10 = core.eager.EagerTensor(t, place)\n self.assertEqual(egr_tensor10.persistable, False)\n self.assertTrue(\"generated_tensor\" in egr_tensor10.name)\n self.assertEqual(egr_tensor10.shape, [3, 3])\n self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor10.stop_gradient, True)\n self.assertTrue(egr_tensor10.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor10.numpy(), x))\n\n egr_tensor11 = core.eager.EagerTensor(t, place, \"framework_constructed\")\n self.assertEqual(egr_tensor11.persistable, False)\n self.assertTrue(\"framework_constructed\" in egr_tensor11.name)\n self.assertEqual(egr_tensor11.shape, [3, 3])\n self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor11.stop_gradient, True)\n self.assertTrue(egr_tensor11.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor11.numpy(), x))\n\n egr_tensor12 = core.eager.EagerTensor(t)\n self.assertEqual(egr_tensor12.persistable, False)\n self.assertTrue(\"generated_tensor\" in egr_tensor12.name)\n self.assertEqual(egr_tensor12.shape, [3, 3])\n self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor12.stop_gradient, True)\n self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))\n self.assertTrue(np.array_equal(egr_tensor12.numpy(), x))\n\n with self.assertRaisesRegexp(\n ValueError, \"The shape of Parameter should not be None\"):\n eager_param = EagerParamBase(shape=None, dtype=\"float32\")\n\n with self.assertRaisesRegexp(\n ValueError, \"The dtype of Parameter should not be None\"):\n eager_param = EagerParamBase(shape=[1, 1], dtype=None)\n\n with self.assertRaisesRegexp(\n ValueError,\n \"The dimensions of shape for Parameter must be greater than 0\"):\n eager_param = EagerParamBase(shape=[], dtype=\"float32\")\n\n with self.assertRaisesRegexp(\n ValueError,\n \"Each dimension of shape for Parameter must be greater than 0, but received /*\"\n ):\n eager_param = EagerParamBase(shape=[-1], dtype=\"float32\")\n\n eager_param = EagerParamBase(shape=[1, 1], dtype=\"float32\")\n self.assertTrue(eager_param.trainable)\n eager_param.trainable = False\n self.assertFalse(eager_param.trainable)\n with self.assertRaisesRegexp(\n ValueError,\n \"The type of trainable MUST be bool, but the type is /*\"):\n eager_param.trainable = \"False\"\n\n def test_constructor(self):\n print(\"Test_constructor\")\n paddle.set_device(\"cpu\")\n place_list = [core.CPUPlace()]\n if core.is_compiled_with_cuda():\n place_list.append(core.CUDAPlace(0))\n with _test_eager_guard():\n for p in place_list:\n self.constructor(p)\n\n def test_copy_and_copy_to(self):\n print(\"Test_copy_and_copy_to\")\n with _test_eager_guard():\n paddle.set_device(\"cpu\")\n arr = np.ones([4, 16, 16, 32]).astype('float32')\n arr1 = np.zeros([4, 16]).astype('float32')\n arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(\n [4, 16, 16, 32]).astype('float32')\n tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,\n core.CPUPlace())\n self.assertEqual(tensor.stop_gradient, True)\n tensor.stop_gradient = False\n print(\"Set persistable\")\n tensor.persistable = False\n tensor1 = paddle.to_tensor(arr1, core.VarDesc.VarType.FP32,\n core.CPUPlace())\n tensor1.persistable = True\n self.assertEqual(tensor1.stop_gradient, True)\n self.assertTrue(np.array_equal(tensor.numpy(), arr))\n print(\"Test copy_\")\n tensor.copy_(tensor1, True)\n self.assertEqual(tensor.persistable, True)\n self.assertEqual(tensor.shape, [4, 16])\n self.assertEqual(tensor.dtype, core.VarDesc.VarType.FP32)\n self.assertTrue(np.array_equal(tensor.numpy(), arr1))\n\n print(\"Test _copy_to\")\n tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,\n core.CPUPlace())\n self.assertTrue(np.array_equal(tensor2.numpy(), arr2))\n self.assertTrue(tensor2.place.is_cpu_place())\n tensor2.persistable = True\n tensor2.stop_gradient = False\n if core.is_compiled_with_cuda():\n tensor3 = tensor2._copy_to(True, core.CUDAPlace(0))\n self.assertTrue(np.array_equal(tensor3.numpy(), arr2))\n self.assertTrue(tensor3.persistable, True)\n self.assertTrue(tensor3.stop_gradient, True)\n self.assertTrue(tensor3.place.is_gpu_place())\n else:\n tensor3 = tensor2._copy_to(True, core.CPUPlace())\n self.assertTrue(np.array_equal(tensor3.numpy(), arr2))\n self.assertTrue(tensor3.persistable, True)\n self.assertTrue(tensor3.stop_gradient, True)\n self.assertTrue(tensor3.place.is_cpu_place())\n\n def test_properties(self):\n print(\"Test_properties\")\n with _test_eager_guard():\n paddle.set_device(\"cpu\")\n arr = np.ones([4, 16, 16, 32]).astype('float32')\n tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,\n core.CPUPlace())\n self.assertEqual(tensor.shape, [4, 16, 16, 32])\n tensor.name = 'tensor_name_test'\n self.assertEqual(tensor.name, 'tensor_name_test')\n self.assertEqual(tensor.persistable, False)\n tensor.persistable = True\n self.assertEqual(tensor.persistable, True)\n tensor.persistable = False\n self.assertEqual(tensor.persistable, False)\n self.assertTrue(tensor.place.is_cpu_place())\n self.assertEqual(tensor._place_str, 'CPUPlace')\n self.assertEqual(tensor.stop_gradient, True)\n tensor.stop_gradient = False\n self.assertEqual(tensor.stop_gradient, False)\n tensor.stop_gradient = True\n self.assertEqual(tensor.stop_gradient, True)\n\n def test_global_properties(self):\n print(\"Test_global_properties\")\n self.assertFalse(core._in_eager_mode())\n with _test_eager_guard():\n self.assertTrue(core._in_eager_mode())\n self.assertFalse(core._in_eager_mode())\n\n def test_place_guard(self):\n core._enable_eager_mode()\n if core.is_compiled_with_cuda():\n paddle.set_device(\"gpu:0\")\n with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):\n self.assertTrue(core.eager._get_expected_place().is_cpu_place())\n else:\n paddle.set_device(\"cpu\")\n with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):\n self.assertTrue(core.eager._get_expected_place().is_cpu_place())\n core._disable_eager_mode()\n\n\nclass EagerParamBaseUsageTestCase(unittest.TestCase):\n def test_print(self):\n with _test_eager_guard():\n linear = paddle.nn.Linear(3, 3, bias_attr=False)\n print(linear.weight)\n\n def test_copy(self):\n with _test_eager_guard():\n linear = paddle.nn.Linear(1, 3)\n linear_copy = copy.deepcopy(linear)\n linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True)\n self.assertTrue(\n np.array_equal(linear.weight.numpy(),\n linear_copy.weight.numpy()))\n self.assertTrue(\n np.array_equal(linear.weight.numpy(), linear_copy2.numpy()))\n\n def func_fp16_initilaizer(self):\n paddle.set_default_dtype(\"float16\")\n linear1 = paddle.nn.Linear(1, 3, bias_attr=False)\n linear2 = paddle.nn.Linear(\n 1,\n 3,\n bias_attr=False,\n weight_attr=paddle.fluid.initializer.Uniform())\n linear3 = paddle.nn.Linear(\n 1,\n 3,\n bias_attr=False,\n weight_attr=paddle.fluid.initializer.TruncatedNormalInitializer())\n linear4 = paddle.nn.Linear(\n 1,\n 3,\n bias_attr=False,\n weight_attr=paddle.fluid.initializer.MSRAInitializer())\n res = [\n linear1.weight.numpy(), linear2.weight.numpy(),\n linear3.weight.numpy(), linear4.weight.numpy()\n ]\n paddle.set_default_dtype(\"float32\")\n return res\n\n def test_fp16_initializer(self):\n res1 = list()\n res2 = list()\n paddle.seed(102)\n paddle.framework.random._manual_program_seed(102)\n with _test_eager_guard():\n res1 = self.func_fp16_initilaizer()\n res2 = self.func_fp16_initilaizer()\n\n for i in range(len(res1)):\n self.assertTrue(np.array_equal(res1[i], res2[i]))\n\n def func_layer_helper_base(self, value):\n base = paddle.fluid.layer_helper_base.LayerHelperBase(\"test_layer\",\n \"test_layer\")\n return base.to_variable(value).numpy()\n\n def func_base_to_variable(self, value):\n paddle.fluid.dygraph.base.to_variable(value)\n\n def test_to_variable(self):\n value = np.random.rand(4, 16, 16, 32).astype('float32')\n res1 = None\n res3 = None\n with _test_eager_guard():\n res1 = self.func_layer_helper_base(value)\n res3 = self.func_base_to_variable(value)\n res2 = self.func_layer_helper_base(value)\n res4 = self.func_base_to_variable(value)\n self.assertTrue(np.array_equal(res1, res2))\n self.assertTrue(np.array_equal(res3, res4))\n\n def test_backward_with_single_tensor(self):\n arr4 = np.random.rand(4, 16, 16, 32).astype('float32')\n egr_tensor12 = core.eager.EagerTensor(arr4, core.CPUPlace())\n egr_tensor12.retain_grads()\n arr = np.ones([4, 16, 16, 32]).astype('float32')\n self.assertEqual(egr_tensor12.persistable, False)\n self.assertTrue(\"generated_tensor\" in egr_tensor12.name)\n self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor12.stop_gradient, True)\n self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))\n self.assertTrue(np.array_equal(egr_tensor12.numpy(), arr4))\n self.assertTrue(np.array_equal(egr_tensor12.gradient(), None))\n egr_tensor12.backward()\n self.assertTrue(np.array_equal(egr_tensor12.gradient(), arr))\n\n\nclass EagerGuardTestCase(unittest.TestCase):\n def test__test_eager_guard(self):\n tracer = paddle.fluid.dygraph.tracer.Tracer()\n with _test_eager_guard(tracer):\n self.assertTrue(_in_eager_mode())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.random.random",
"numpy.random.rand",
"numpy.array_equal",
"numpy.random.randint"
]
] |
Efreeto/face-alignment | [
"d496866ac3d66c8353ba3e0305f16ac8a2ccc017"
] | [
"face_alignment/FaceLandmarksDataset.py"
] | [
"import torch\nfrom torch.utils.data import Dataset\nfrom skimage import io, color, transform\nimport torchvision\nimport os, glob\nimport numpy as np\nimport random\nfrom scipy import ndimage\nfrom PIL import Image\nimport torch.nn.functional as F\n\nfrom . import utils\n\n######################################################################\n# Transforms\n# ----------\n#\n# One issue we can see from the above is that the samples are not of the\n# same size. Most neural networks expect the images of a fixed size.\n# Therefore, we will need to write some prepocessing code.\n# Let's create three transforms:\n#\n# - ``Rescale``: to scale the image\n# - ``RandomCrop``: to crop from image randomly. This is data\n# augmentation.\n# - ``ToTensor``: to convert the numpy images to torch images (we need to\n# swap axes).\n#\n# We will write them as callable classes instead of simple functions so\n# that parameters of the transform need not be passed everytime it's\n# called. For this, we just need to implement ``__call__`` method and\n# if required, ``__init__`` method. We can then use a transform like this:\n#\n# ::\n#\n# tsfm = Transform(params)\n# transformed_sample = tsfm(sample)\n#\n# Observe below how these transforms had to be applied both on the image and\n# landmarks.\n#\n\n\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n\n Args:\n output_size (tuple or tuple): Desired output size. If tuple, output is\n matched to output_size. If int, smaller of image edges is matched\n to output_size keeping aspect ratio the same.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n\n h, w = image.shape[:2]\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n else:\n new_h, new_w = self.output_size\n\n new_h, new_w = int(new_h), int(new_w)\n\n img = transform.resize(image, (new_h, new_w))\n\n # h and w are swapped for landmarks because for images,\n # x and y axes are axis 1 and 0 respectively\n landmarks = landmarks * [new_w / w, new_h / h]\n\n img = img.astype('float32')\n landmarks = landmarks.astype('float32')\n\n return {'image': img, 'landmarks': landmarks}\n\nclass RandomHorizFlip(object):\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n if random.random() < 0.5:\n image = np.fliplr(image).copy()\n landmarks = landmarks.transpose()\n landmarks[0] = image.shape[1] - landmarks[0]\n landmarks = landmarks.transpose()\n landmarks = utils.shuffle_lr(landmarks)\n\n return {'image': image, 'landmarks': landmarks}\n\n\n__imagenet_stats = {'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225]}\n\nimagenet_pca = {\n 'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),\n 'eigvec': torch.Tensor([\n [-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203],\n ])\n}\n\nclass Lighting(object):\n \"\"\"Lighting noise(AlexNet - style PCA - based noise)\"\"\"\n\n def __init__(self, alphastd=0.1, eigval=imagenet_pca['eigval'], eigvec=imagenet_pca['eigvec']):\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n if self.alphastd == 0:\n return image\n\n alpha = image.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(image).clone()\\\n .mul(alpha.view(1, 3).expand(3, 3))\\\n .mul(self.eigval.view(1, 3).expand(3, 3))\\\n .sum(1).squeeze()\n\n return {'image': image.add(rgb.view(3, 1, 1).expand_as(image)), 'landmarks': landmarks}\n\n\nclass FaceColorJitter(object):\n\n def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4):\n self.color_jitter = torchvision.transforms.ColorJitter(brightness, contrast, saturation)\n\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks'].copy()\n\n to_pil = torchvision.transforms.ToPILImage()\n img = to_pil(image)\n img = self.color_jitter(img)\n to_tensor = torchvision.transforms.ToTensor()\n image = to_tensor(img).numpy().transpose(1,2,0)\n return {'image': image, 'landmarks': landmarks}\n\n\nclass RandomRotation(object):\n def __init__(self, maximum_angle=50., minimum_angle=5.):\n self.maximum_angle = maximum_angle - minimum_angle\n self.minimum_angle = minimum_angle\n\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n rotation_angle = (random.random() - 0.5) * 2 * self.maximum_angle\n if rotation_angle > 0:\n rotation_angle += self.minimum_angle\n else:\n rotation_angle -= self.minimum_angle\n manual_theta = utils.transformation_matrix(-rotation_angle)\n manual_theta_inv = utils.transformation_matrix(rotation_angle)\n\n image_rot = ndimage.rotate(image, rotation_angle, reshape=True)\n origin_org = ((image.shape[1] / 2.0, image.shape[0] / 2.0))\n origin_rot = ((image_rot.shape[1] / 2.0, image_rot.shape[0] / 2.0))\n\n landmarks_rot = landmarks - origin_org\n landmarks_rot = np.asarray(np.dot(landmarks_rot, manual_theta_inv)[:, :2])\n landmarks_rot = landmarks_rot + origin_rot\n\n sample['image_rot'] = image_rot\n sample['landmarks_rot'] = landmarks_rot\n sample['theta'] = manual_theta\n sample['angle'] = rotation_angle\n\n return sample\n\n\nclass LandmarkCrop(object):\n def __init__(self, resolution):\n self.resolution = resolution\n\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n bbox = utils.bounding_box(landmarks)\n center, scale = utils.center_scale_from_bbox(bbox)\n image = utils.crop(image, center, scale, self.resolution)\n # landmarks = landmarks - (bbox[0], bbox[1])\n sample['image'] = image\n sample['landmarks'] = landmarks\n\n if 'image_rot' in sample: # if RandomRotation, crop around the rotated image\n image, landmarks = sample['image_rot'], sample['landmarks_rot']\n bbox = utils.bounding_box(landmarks)\n center, scale = utils.center_scale_from_bbox(bbox)\n image = utils.crop(image, center, scale, self.resolution)\n # landmarks = landmarks - (bbox[0], bbox[1])\n sample['image_rot'] = image\n sample['landmarks_rot'] = landmarks\n\n return sample\n\n\nclass CreateHeatmaps(object):\n def __init__(self, output_size=64, n_features=68):\n self.output_size = output_size\n self.n_features = n_features\n\n def __call__(self, sample):\n landmarks = sample['landmarks']\n center, scale = utils.center_scale_from_bbox(utils.bounding_box(landmarks))\n heatmap = np.zeros((self.n_features, self.output_size, self.output_size))\n for i in range(self.n_features):\n new_pts = utils.transform(landmarks[i], center, scale, self.output_size)\n heatmap[i] = utils.draw_gaussian(heatmap[i], new_pts, 1)\n sample['heatmaps'] = torch.from_numpy(heatmap).view(self.n_features, self.output_size, self.output_size).float()\n\n if 'image_rot' in sample: # if RandomRotation, crop around the rotated image\n landmarks = sample['landmarks_rot']\n center, scale = utils.center_scale_from_bbox(utils.bounding_box(landmarks))\n heatmap = np.zeros((self.n_features, self.output_size, self.output_size))\n for i in range(self.n_features):\n new_pts = utils.transform(landmarks[i], center, scale, self.output_size)\n heatmap[i] = utils.draw_gaussian(heatmap[i], new_pts, 1)\n sample['heatmaps_rot'] = torch.from_numpy(heatmap).view(self.n_features, self.output_size, self.output_size).float()\n\n return sample\n\nclass CreateHeatmaps2(object):\n def __init__(self, output_size=64, n_features=68):\n self.output_size = output_size\n self.n_features = n_features\n if self.n_features==68:\n self.neigbor_list = [[2],[1,3],[2,4],[3,5],[4,6],[5,7],[6,8],[7,9],[8,10],\n [9,11],[10,12],[11,13],[12,14],[13,15],[14,16],[15,17],\n [16], [19], [18,20], [19,21], [20,22], [21],[24],[23,25],\n [24,26],[25,27],[26],[29],[28,30],[29,31],[30,34],[33],\n [32,34],[33,35],[34,36],[35],[],[37,39],[38,40],[],[40,42],\n [37,41],[],[43,45],[44,46],[],[46,48],[43,47],[],[49,51],\n [50,52],[51,53],[52,54],[53,55],[],[55,57],[56,58],[57,59],\n [58,60],[59,49],[49],[61,63],[62,64],[63,65],[55],[65,67],\n [66,68],[61,67]]\n elif self.n_features==108:\n self.neigbor_list = [[2],[1,3],[2,4],[3,5],[4,6],[5,7],[6,8],[7,9],[8,10],\n [9,11],[10,12],[11,13],[12,14],[13,15],[14,16],[15,17],\n [16,18],[17,19],[18,20],[19,21],[20,22],[21,23],[22,24],\n [23,25],[24,26],[25,27],[26,28],[27,29],[28,30],[29,31],\n [30,32],[31,33],[32],[],[34,36],[35,37],[36,38],[], [39,41],\n [40,42],[41,43], [],[45],[44,46], [45,47], [46], [49],[48,50],\n [],[50,52],[51],[],[53,55],[54,56],[],[56,58], [],[],[59,61],\n [60,62],[],[62,64],[],[],[65,67],[66,68],[],[],[69,71],[70,72],[]\n [54,55],[58,57],[],[60,61],[63,64],[],[81],[82],[79,83],[80,84],\n [81,85],[82,86],[83,87],[84,88],[48],[52],[],[89,91],[90,92],\n [91,93],[92,94],[93,95],[],[95,97],[96,98],[97,99],[98,100],[89,99],\n [],[101,103],[102,104],[103,105],[],[105,107],[106,108],[101,107]]\n\n def __call__(self, sample):\n landmarks = sample['landmarks']\n center, scale = center_scale_from_landmark(landmarks)\n heatmap = np.zeros((self.n_features, self.output_size, self.output_size))\n foo = np.zeros((self.output_size, self.output_size))\n\n for i in range(self.n_features):\n neighbors = self.get_neighbors(i)\n num_neighbors = len(neighbors)\n if num_neighbors == 0:\n heatmap[i] = utils.draw_gaussian(heatmap[i], utils.transform(landmarks[i], center, scale, self.output_size), 1)\n foo = utils.draw_gaussian(foo, utils.transform(landmarks[i], center, scale, self.output_size), 1)\n else:\n if num_neighbors == 2:\n points = np.zeros((3,2))\n points[0] = utils.transform(landmarks[neighbors[0]-1], center, scale, self.output_size).numpy()\n points[1] = utils.transform(landmarks[i], center, scale, self.output_size).numpy()\n points[2] = utils.transform(landmarks[neighbors[1]-1], center, scale, self.output_size).numpy()\n else:\n points = np.zeros((2,2))\n points[0] = utils.transform(landmarks[neighbors[0]-1], center, scale, self.output_size).numpy()\n points[1] = utils.transform(landmarks[i], center, scale, self.output_size).numpy()\n\n heatmap[i] = utils.draw_gaussian2(heatmap[i], points, 1)\n # foo = utils.draw_gaussian(foo, utils.transform(landmarks[i], center, scale, self.output_size), 1)\n foo = utils.draw_gaussian2(foo, points, 1)\n \"\"\"\n from PIL import Image\n im = Image.fromarray(foo*255)\n im.show()\n \"\"\"\n\n heatmaps = torch.from_numpy(heatmap).view(1, self.n_features, self.output_size, self.output_size).float()\n\n return {'image': sample['image'], 'landmarks': heatmaps}\n\n def get_neighbors(self, landmark):\n return self.neigbor_list[landmark]\n\n\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\n\n Args:\n output_size (tuple or int): Desired output size. If int, square crop\n is made.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n\n h, w = image.shape[:2]\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n image = image[top: top + new_h,\n left: left + new_w]\n\n landmarks = landmarks - [left, top]\n\n return {'image': image, 'landmarks': landmarks}\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n for key in sample:\n if key in ['image', 'image_rot']:\n sample[key] = torchvision.transforms.ToTensor()(sample[key])\n elif key in ['filename', 'angle', 'heatmaps', 'heatmaps_rot']:\n continue\n else:\n sample[key] = torch.from_numpy(sample[key]).float()\n return sample\n\nclass FaceLandmarksDataset(Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n def __init__(self, path, type=1, transforms=None):\n \"\"\"\n Args:\n path (string): Directory with all the images and landmarks.\n transforms (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.type = type\n self.transforms = transforms\n\n image_exts = ('*.jpg', '*.png')\n self.images_list = []\n for ext in image_exts:\n self.images_list.extend(sorted(glob.glob(os.path.join(path, ext))))\n assert self.images_list, \"path does not contain images\"\n\n def __len__(self):\n return len(self.images_list)\n\n def __getitem__(self, idx):\n image = io.imread(self.images_list[idx])\n image = color.grey2rgb(image) # For some gray scale images\n\n filename = self.images_list[idx]\n basename = os.path.splitext(filename)[0]\n if self.type == 1: # 300W, lfpw\n landmarks = np.loadtxt(basename + '.pts', skiprows=3, comments='}')\n elif self.type == 2: # land110\n landmarks = np.loadtxt(basename + '.land', skiprows=1)\n # landmarks = np.vstack((landmarks[0:32:2], landmarks[32:64], landmarks[88:108]))\n elif self.type == 3: # FEI\n landmarks = np.ones((68,2))\n elif self.type == 4: # 8W\n landmarks = np.loadtxt(basename + '.pts')\n\n sample = {'image': image, 'landmarks': landmarks, 'filename': filename}\n if self.transforms:\n sample = self.transforms(sample)\n\n return sample\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.fliplr",
"torch.from_numpy",
"numpy.loadtxt",
"numpy.dot",
"numpy.random.randint",
"scipy.ndimage.rotate",
"torch.Tensor"
]
] |
PNNL-Comp-Mass-Spec/CRNT4SBML | [
"20406f452863f35f766b504fe2b3f3ab034b62fe"
] | [
"crnt4sbml/safety_wrap.py"
] | [
"import os\nimport pickle\nimport numpy\nimport antimony\nimport roadrunner\nimport rrplugins\nimport sys\n\nroadrunner.Logger.setLevel(roadrunner.Logger.LOG_ERROR)\nroadrunner.Logger.disableLogging()\nroadrunner.Logger.disableConsoleLogging()\nroadrunner.Logger.disableFileLogging()\nrrplugins.setLogLevel('error')\n\nstderr_fileno = sys.stderr.fileno()\nstderr_save = os.dup(stderr_fileno)\nstderr_pipe = os.pipe()\nos.dup2(stderr_pipe[1], stderr_fileno)\nos.close(stderr_pipe[1])\n\n\n# functions taken from Tellurium!! Give them\n# credit, they deserve it!\n#################################################\ndef __check_antimony_return_code(code):\n if code < 0:\n raise Exception('Antimony: {}'.format(antimony.getLastError()))\n\n\ndef __antimony_to_sbml(ant):\n try:\n isfile = os.path.isfile(ant)\n except ValueError:\n isfile = False\n if isfile:\n code = antimony.loadAntimonyFile(ant)\n else:\n code = antimony.loadAntimonyString(ant)\n __check_antimony_return_code(code)\n mid = antimony.getMainModuleName()\n return antimony.getSBMLString(mid)\n\n\ndef __loada(ant):\n return __load_antimony_model(ant)\n\n\ndef __load_antimony_model(ant):\n sbml = __antimony_to_sbml(ant)\n return roadrunner.RoadRunner(sbml)\n\n\nwith open('input_arguments.pickle', 'rb') as pickle_file:\n input_arguments = pickle.loads(pickle_file.read())\n\nant_str = input_arguments[0]\ndirection = input_arguments[1]\nauto = rrplugins.Plugin(\"tel_auto2000\")\nauto_parameters = input_arguments[2]\n\nantimony_r = __loada(ant_str)\n\n# # making the directory auto_fort_files if is does not exist\n# if not os.path.isdir(\"./auto_fort_files\"):\n# os.mkdir(\"./auto_fort_files\")\n\nauto.setProperty(\"SBML\", antimony_r.getCurrentSBML())\nauto.setProperty(\"ScanDirection\", direction)\nauto.setProperty(\"PreSimulation\", \"True\")\nauto.setProperty(\"PreSimulationDuration\", 1.0)\nauto.setProperty('KeepTempFiles', True)\nauto.setProperty(\"TempFolder\", \"auto_fort_files\")\n\n# assigning values provided by the user\nfor i in auto_parameters.keys():\n auto.setProperty(i, auto_parameters[i])\n\ntry:\n auto.execute()\n # indices where special points are\n pts = auto.BifurcationPoints\n # labeling of special points\n lbls = auto.BifurcationLabels\n # all data for parameters and species found by continuation\n bi_data = auto.BifurcationData\n\n # convertes bi_data to numpy array, where first\n # column is the principal continuation parameter and\n # the rest of the columns are the species\n bi_data_np = bi_data.toNumpy\n flag = True\n\nexcept Exception as e:\n flag = False\n pts = []\n lbls = []\n bi_data_np = numpy.zeros(2)\n\nant_float_ids = antimony_r.model.getFloatingSpeciesIds()\nnumpy.save('bi_data_np.npy', bi_data_np)\n\noutput_arguments = [pts, lbls, ant_float_ids, flag]\n\nif os.path.exists(\"output_arguments.pickle\"):\n os.remove(\"output_arguments.pickle\")\n with open('output_arguments.pickle', 'wb') as outf:\n outf.write(pickle.dumps(output_arguments))\nelse:\n with open('output_arguments.pickle', 'wb') as outf:\n outf.write(pickle.dumps(output_arguments))\n\nos.close(stderr_pipe[0])\nos.dup2(stderr_save, stderr_fileno)\nos.close(stderr_save)\nos.close(stderr_fileno)\n"
] | [
[
"numpy.save",
"numpy.zeros"
]
] |
TimeTraveller-San/FairGAN | [
"526c2937714fc322714db54dc6a3f392f2c88e18"
] | [
"we.py"
] | [
"from __future__ import print_function, division\nimport re\nimport sys\nimport numpy as np\nimport scipy.sparse\nimport codecs\nfrom sklearn.decomposition import PCA\nif sys.version_info[0] < 3:\n import io\n open = io.open\nelse:\n unicode = str\n\"\"\"\nTools for debiasing word embeddings\n\nMan is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings\nTolga Bolukbasi, Kai-Wei Chang, James Zou, Venkatesh Saligrama, and Adam Kalai\n2016\n\"\"\"\n\nDEFAULT_NUM_WORDS = 27000\nFILENAMES = {\"g_wiki\": \"glove.6B.300d.small.txt\",\n \"g_twitter\": \"glove.twitter.27B.200d.small.txt\",\n \"g_crawl\": \"glove.840B.300d.small.txt\",\n \"w2v\": \"GoogleNews-word2vec.small.txt\",\n \"w2v_large\": \"GoogleNews-word2vec.txt\"}\n\n\ndef dedup(seq):\n seen = set()\n return [x for x in seq if not (x in seen or seen.add(x))]\n\n\ndef safe_word(w):\n # ignore words with numbers, etc.\n # [a-zA-Z\\.'_\\- :;\\(\\)\\]] for emoticons\n return (re.match(r\"^[a-z_]*$\", w) and len(w) < 20 and not re.match(r\"^_*$\", w))\n\n\ndef to_utf8(text, errors='strict', encoding='utf8'):\n \"\"\"Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8.\"\"\"\n if isinstance(text, unicode):\n return text.encode('utf8')\n # do bytestring -> unicode -> utf8 full circle, to ensure valid utf8\n return unicode(text, encoding, errors=errors).encode('utf8')\n\n\ndef load_embeddings_from_np(filename):\n print('loading ...')\n with codecs.open(filename + '.vocab', 'r', 'utf-8') as f_embed:\n vocab = [line.strip() for line in f_embed]\n wv = np.load(filename + '.wv.npy')\n return vocab, wv\n\nclass WordEmbedding:\n def __init__(self, fname):\n self.thresh = None\n self.max_words = None\n self.desc = fname\n print(\"*** Reading data from \" + fname)\n if fname.endswith(\".bin\"):\n import gensim.models\n model = gensim.models.KeyedVectors.load_word2vec_format(fname, binary=True)\n words = sorted([w for w in model.vocab], key=lambda w: model.vocab[w].index)\n vecs = [model[w] for w in words]\n elif fname.endswith(\".txt\"):\n print(\"Loading w2vec format\")\n vecs = []\n words = []\n with open(fname, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n tokens = line.split()\n v = np.array([float(x) for x in tokens[-300:]])\n w = \"_\".join([str(x) for x in tokens[:-300]])\n if len(v) != 300:\n print(f\"Weird line: {tokens} | {len(v)}\")\n continue\n words.append(w)\n vecs.append(v)\n else:\n print(\"Loading numpy format\")\n words, vecs = load_embeddings_from_np(fname)\n\n self.vecs = np.array(vecs, dtype='float32')\n print(self.vecs.shape)\n self.words = words\n self.reindex()\n norms = np.linalg.norm(self.vecs, axis=1)\n if max(norms)-min(norms) > 0.0001:\n self.normalize()\n\n def reindex(self):\n self.index = {w: i for i, w in enumerate(self.words)}\n self.rindex = {i: w for i, w in enumerate(self.words)}\n self.n, self.d = self.vecs.shape\n assert self.n == len(self.words) == len(self.index)\n self._neighbors = None\n print(self.n, \"words of dimension\", self.d, \":\", \", \".join(self.words[:4] + [\"...\"] + self.words[-4:]))\n\n def v(self, word):\n return self.vecs[self.index[word]]\n\n def diff(self, word1, word2):\n v = self.vecs[self.index[word1]] - self.vecs[self.index[word2]]\n return v/np.linalg.norm(v)\n\n def normalize(self):\n self.desc += \", normalize\"\n self.vecs /= np.linalg.norm(self.vecs, axis=1)[:, np.newaxis]\n self.reindex()\n\n def shrink(self, numwords):\n self.desc += \", shrink \" + str(numwords)\n self.filter_words(lambda w: self.index[w]<numwords)\n\n def filter_words(self, test):\n \"\"\"\n Keep some words based on test, e.g. lambda x: x.lower()==x\n \"\"\"\n self.desc += \", filter\"\n kept_indices, words = zip(*[[i, w] for i, w in enumerate(self.words) if test(w)])\n self.words = list(words)\n self.vecs = self.vecs[kept_indices, :]\n self.reindex()\n\n def save(self, filename):\n with open(filename, \"w\") as f:\n f.write(\"\\n\".join([w+\" \" + \" \".join([str(x) for x in v]) for w, v in zip(self.words, self.vecs)]))\n print(\"Wrote\", self.n, \"words to\", filename)\n\n def save_w2v(self, filename, binary=True):\n with open(filename, 'wb') as fout:\n fout.write(to_utf8(\"%s %s\\n\" % self.vecs.shape))\n # store in sorted order: most frequent words at the top\n for i, word in enumerate(self.words):\n row = self.vecs[i]\n if binary:\n fout.write(to_utf8(word) + b\" \" + row.tostring())\n else:\n fout.write(to_utf8(\"%s %s\\n\" % (word, ' '.join(\"%f\" % val for val in row))))\n\n def remove_directions(self, directions): #directions better be orthogonal\n self.desc += \", removed\"\n for direction in directions:\n self.desc += \" \"\n if type(direction) is np.ndarray:\n v = direction / np.linalg.norm(direction)\n self.desc += \"vector \"\n else:\n w1, w2 = direction\n v = self.diff(w1, w2)\n self.desc += w1 + \"-\" + w2\n self.vecs = self.vecs - self.vecs.dot(v)[:, np.newaxis].dot(v[np.newaxis, :])\n self.normalize()\n\n def compute_neighbors_if_necessary(self, thresh, max_words):\n thresh = float(thresh) # dang python 2.7!\n if self._neighbors is not None and self.thresh == thresh and self.max_words == max_words:\n return\n print(\"Computing neighbors\")\n self.thresh = thresh\n self.max_words = max_words\n vecs = self.vecs[:max_words]\n dots = vecs.dot(vecs.T)\n dots = scipy.sparse.csr_matrix(dots * (dots >= 1-thresh/2))\n from collections import Counter\n rows, cols = dots.nonzero()\n nums = list(Counter(rows).values())\n print(\"Mean:\", np.mean(nums)-1)\n print(\"Median:\", np.median(nums)-1)\n rows, cols, vecs = zip(*[(i, j, vecs[i]-vecs[j]) for i, j, x in zip(rows, cols, dots.data) if i<j])\n self._neighbors = rows, cols, np.array([v/np.linalg.norm(v) for v in vecs])\n\n def neighbors(self, word, thresh=1):\n dots = self.vecs.dot(self.v(word))\n dd = dict(zip([abs(dot) for dot in dots], [i for i in range(len(dots))]))\n ns=[]\n for dot in sorted(dd, reverse=True):\n if dot>1-thresh/2:\n ns.append(self.words[int(dd[dot])])\n return ns[1:] #Since first word is the word itself\n\n def neighborsNoSort(self, word, thresh=1):\n dots = self.vecs.dot(self.v(word))\n dd = dict(zip([abs(dot) for dot in dots], [i for i in range(len(dots))]))\n ns=[]\n for dot in sorted(dd, reverse=True):\n if dot>1-thresh/2:\n ns.append(self.words[int(dd[dot])])\n return ns[1:] #Since first word is the word itself\n\n def more_words_like_these(self, words, topn=50, max_freq=100000):\n v = sum(self.v(w) for w in words)\n dots = self.vecs[:max_freq].dot(v)\n thresh = sorted(dots)[-topn]\n words = [w for w, dot in zip(self.words, dots) if dot>=thresh]\n return sorted(words, key=lambda w: self.v(w).dot(v))[-topn:][::-1]\n\n def best_analogies_dist_thresh(self, v, thresh=1, topn=500, max_words=50000):\n \"\"\"Metric is cos(a-c, b-d) if |b-d|^2 < thresh, otherwise 0\n \"\"\"\n vecs, vocab = self.vecs[:max_words], self.words[:max_words]\n self.compute_neighbors_if_necessary(thresh, max_words)\n rows, cols, vecs = self._neighbors\n scores = vecs.dot(v/np.linalg.norm(v))\n pi = np.argsort(-abs(scores))\n\n ans = []\n usedL = set()\n usedR = set()\n for i in pi:\n if abs(scores[i])<0.001:\n break\n row = rows[i] if scores[i] > 0 else cols[i]\n col = cols[i] if scores[i] > 0 else rows[i]\n if row in usedL or col in usedR:\n continue\n usedL.add(row)\n usedR.add(col)\n ans.append((vocab[row], vocab[col], abs(scores[i])))\n if len(ans)==topn:\n break\n\n return ans\n\n\ndef viz(analogies):\n print(\"\\n\".join(str(i).rjust(4)+a[0].rjust(29) + \" | \" + a[1].ljust(29) + (str(a[2]))[:4] for i, a in enumerate(analogies)))\n\n\ndef text_plot_words(xs, ys, words, width = 90, height = 40, filename=None):\n PADDING = 10 # num chars on left and right in case words spill over\n res = [[' ' for i in range(width)] for j in range(height)]\n def rescale(nums):\n a = min(nums)\n b = max(nums)\n return [(x-a)/(b-a) for x in nums]\n print(\"x:\", (min(xs), max(xs)), \"y:\",(min(ys),max(ys)))\n xs = rescale(xs)\n ys = rescale(ys)\n for (x, y, word) in zip(xs, ys, words):\n i = int(x*(width - 1 - PADDING))\n j = int(y*(height-1))\n row = res[j]\n z = list(row[i2] != ' ' for i2 in range(max(i-1, 0), min(width, i + len(word) + 1)))\n if any(z):\n continue\n for k in range(len(word)):\n if i+k>=width:\n break\n row[i+k] = word[k]\n string = \"\\n\".join(\"\".join(r) for r in res)\n# return string\n if filename:\n with open(filename, \"w\", encoding=\"utf8\") as f:\n f.write(string)\n print(\"Wrote to\", filename)\n else:\n print(string)\n\n\ndef doPCA(pairs, embedding, num_components = 10):\n matrix = []\n for a, b in pairs:\n center = (embedding.v(a) + embedding.v(b))/2\n matrix.append(embedding.v(a) - center)\n matrix.append(embedding.v(b) - center)\n matrix = np.array(matrix)\n pca = PCA(n_components = num_components)\n pca.fit(matrix)\n # bar(range(num_components), pca.explained_variance_ratio_)\n return pca\n\n\ndef drop(u, v):\n return u - v * u.dot(v) / v.dot(v)\n"
] | [
[
"numpy.load",
"sklearn.decomposition.PCA",
"numpy.median",
"numpy.array",
"numpy.linalg.norm",
"numpy.mean"
]
] |
seanlam97/PDK_Generator | [
"15c1f4f56575f8e21ea874443d06ef740ccb5aa5"
] | [
"PDK_Generator/inverse_design_y_branch/lumopt/geometries/parameterized_geometry.py"
] | [
"import numpy as np\nimport inspect\n\nfrom lumopt.geometries.geometry import Geometry\n\nclass ParameterizedGeometry(Geometry):\n \"\"\" \n Defines a parametrized geometry using any of the built-in geometric structures available in the FDTD CAD.\n Users must provide a Python function with the signature ('params', 'fdtd', 'only_update'). The function\n must take the optimization parameters and a handle to the FDTD CAD to build the geometry under optimization\n (material assignments included). The flag 'only_update' is used to avoid frequent recreations of the parameterized\n geometry: when the flag is true, it is assumed that the geometry was already added at least once to the CAD.\n\n Parameters\n ----------\n :param func: function with the signature ('params', 'fdtd', 'only_update', **kwargs).\n :param initial_params: flat array with the initial optimization parameter values.\n :param bounds: bounding ranges (min/max pairs) for each optimization parameter.\n :param dx: step size for computing the figure of merit gradient using permittivity perturbations.\n \"\"\"\n \n def __init__(self, func, initial_params, bounds, dx, deps_num_threads=1):\n self.deps_num_threads=deps_num_threads\n self.func = func\n self.current_params = np.array(initial_params).flatten()\n self.bounds = bounds\n self.dx = float(dx)\n\n if inspect.isfunction(self.func):\n bound_args = inspect.signature(self.func).bind('params', 'fdtd', 'only_update')\n if bound_args.args != ('params', 'fdtd', 'only_update'):\n raise UserWarning(\"user defined function does not take three positional arguments.\")\n else:\n raise UserWarning(\"argument 'func' must be a Python function.\")\n if self.dx <= 0.0:\n raise UserWarning(\"step size must be positive.\")\n\n self.params_hist = list(self.current_params)\n\n def update_geometry(self, params, sim):\n self.current_params = params\n self.params_hist.append(params)\n\n def get_current_params(self):\n return self.current_params\n\n def calculate_gradients(self, gradient_fields):\n raise UserWarning(\"unsupported gradient calculation method.\")\n\n def add_geo(self, sim, params, only_update):\n sim.fdtd.switchtolayout()\n if params is None:\n return self.func(self.current_params, sim.fdtd, only_update)\n else:\n return self.func(params, sim.fdtd, only_update)\n"
] | [
[
"numpy.array"
]
] |
szhaofelicia/sgan | [
"ead42d4bb3b1278c4c9ffcae8fa9c2dc036a52ff"
] | [
"vis/visualize_court.py"
] | [
"import numpy as np\n# import plotly\nimport plotly.graph_objects as go\n\n\n\ndef draw_plotly_half_court(fig, fig_width=600, margins=10):\n # From: https://community.plot.ly/t/arc-shape-with-path/7205/5\n def ellipse_arc(x_center=0.0, y_center=0.0, a=10.5, b=10.5, start_angle=0.0, end_angle=2 * np.pi, N=200, closed=False):\n t = np.linspace(start_angle, end_angle, N)\n x = x_center + a * np.cos(t)\n y = y_center + b * np.sin(t)\n path = f'M {x[0]}, {y[0]}'\n for k in range(1, len(t)):\n path += f'L{x[k]}, {y[k]}'\n if closed:\n path += ' Z'\n return path\n\n fig_height = fig_width * (470 + 2 * margins) / (500 + 2 * margins)\n fig.update_layout(width=fig_width, height=fig_height)\n\n # Set axes ranges\n fig.update_xaxes(range=[-250 - margins, 250 + margins])\n fig.update_yaxes(range=[-52.5 - margins, 417.5 + margins])\n\n threept_break_y = 89.47765084\n three_line_col = \"#777777\"\n main_line_col = \"#777777\"\n\n fig.update_layout(\n # Line Horizontal\n margin=dict(l=20, r=20, t=20, b=20),\n paper_bgcolor=\"white\",\n plot_bgcolor=\"white\",\n yaxis=dict(\n scaleanchor=\"x\",\n scaleratio=1,\n showgrid=False,\n zeroline=False,\n showline=False,\n ticks='',\n showticklabels=False,\n fixedrange=True,\n ),\n xaxis=dict(\n showgrid=False,\n zeroline=False,\n showline=False,\n ticks='',\n showticklabels=False,\n fixedrange=True,\n ),\n\n shapes=[\n # half_layout=[\n dict(\n type=\"rect\", x0=-250, y0=-52.5, x1=250, y1=417.5,\n line=dict(color=main_line_col, width=1),\n # fillcolor='#333333',\n layer='below'\n ), ## sideline rect\n dict(\n type=\"rect\", x0=-80, y0=-52.5, x1=80, y1=137.5,\n line=dict(color=main_line_col, width=1),\n # fillcolor='#333333',\n layer='below'\n ),# lane line rect\n dict(\n type=\"rect\", x0=-60, y0=-52.5, x1=60, y1=137.5,\n line=dict(color=main_line_col, width=1),\n # fillcolor='#333333',\n layer='below'\n ), # foul line rect\n dict(\n type=\"circle\", x0=-60, y0=77.5, x1=60, y1=197.5, xref=\"x\", yref=\"y\",\n line=dict(color=main_line_col, width=1),\n # fillcolor='#dddddd',\n layer='below'\n ), # free-throw circle\n dict(\n type=\"line\", x0=-60, y0=137.5, x1=60, y1=137.5,\n line=dict(color=main_line_col, width=1),\n layer='below'\n ), # foul line\n\n dict(\n type=\"rect\", x0=-2, y0=-7.25, x1=2, y1=-12.5,\n line=dict(color=\"#ec7607\", width=1),\n fillcolor='#ec7607',\n ), # hoop rect\n dict(\n type=\"circle\", x0=-7.5, y0=-7.5, x1=7.5, y1=7.5, xref=\"x\", yref=\"y\",\n line=dict(color=\"#ec7607\", width=1),\n ), # hoop circle\n dict(\n type=\"line\", x0=-30, y0=-12.5, x1=30, y1=-12.5,\n line=dict(color=\"#ec7607\", width=1),\n ), # backboard\n\n dict(type=\"path\",\n path=ellipse_arc(a=40, b=40, start_angle=0, end_angle=np.pi),\n line=dict(color=main_line_col, width=1), layer='below'), # no-change semi-circle\n dict(type=\"path\",\n path=ellipse_arc(a=237.5, b=237.5, start_angle=0.386283101, end_angle=np.pi - 0.386283101),\n line=dict(color=main_line_col, width=1), layer='below'), # three-point line:arc\n dict(\n type=\"line\", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,\n line=dict(color=three_line_col, width=1), layer='below'\n ), # three-point line:left edge\n # dict(\n # type=\"line\", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,\n # line=dict(color=three_line_col, width=1), layer='below'\n # ),\n dict(\n type=\"line\", x0=220, y0=-52.5, x1=220, y1=threept_break_y,\n line=dict(color=three_line_col, width=1), layer='below'\n ), # three-point line:right edge\n\n dict(\n type=\"line\", x0=-250, y0=227.5, x1=-220, y1=227.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # midcourt area marker:left\n dict(\n type=\"line\", x0=250, y0=227.5, x1=220, y1=227.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # midcourt area marker:right\n dict(\n type=\"line\", x0=-90, y0=17.5, x1=-80, y1=17.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=-90, y0=27.5, x1=-80, y1=27.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=-90, y0=57.5, x1=-80, y1=57.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=-90, y0=87.5, x1=-80, y1=87.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=90, y0=17.5, x1=80, y1=17.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=90, y0=27.5, x1=80, y1=27.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=90, y0=57.5, x1=80, y1=57.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=90, y0=87.5, x1=80, y1=87.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n\n dict(type=\"path\",\n path=ellipse_arc(y_center=417.5, a=60, b=60, start_angle=-0, end_angle=-np.pi),\n line=dict(color=main_line_col, width=1), layer='below'), # center circle: half\n\n ]\n )\n return True\n\ndef draw_plotly_whole_court(fig, fig_width=600, margins=10):\n # From: https://community.plot.ly/t/arc-shape-with-path/7205/5\n def ellipse_arc(x_center=0.0, y_center=0.0, a=10.5, b=10.5, start_angle=0.0, end_angle=2 * np.pi, N=200, closed=False):\n t = np.linspace(start_angle, end_angle, N)\n x = x_center + a * np.cos(t)\n y = y_center + b * np.sin(t)\n path = f'M {x[0]}, {y[0]}'\n for k in range(1, len(t)):\n path += f'L{x[k]}, {y[k]}'\n if closed:\n path += ' Z'\n return path\n\n fig_height = fig_width * (470*2 + 2 * margins) / (500 + 2 * margins)\n fig.update_layout(width=fig_width, height=fig_height)\n\n # Set axes ranges\n fig.update_xaxes(range=[-250 - margins, 250 + margins])\n fig.update_yaxes(range=[-52.5 - margins, 417.5+470 + margins])\n\n # fig.update_xaxes(range=[ margins, 500 + margins])\n # fig.update_yaxes(range=[margins, 470*2 + margins])\n\n threept_break_y = 89.47765084\n three_line_col = \"#777777\"\n main_line_col = \"#777777\"\n\n fig.update_layout(\n # Line Horizontal\n margin=dict(l=20, r=20, t=20, b=20),\n paper_bgcolor=\"white\",\n plot_bgcolor=\"white\",\n yaxis=dict(\n scaleanchor=\"x\",\n scaleratio=1,\n showgrid=False,\n zeroline=False,\n showline=False,\n ticks='',\n showticklabels=False,\n fixedrange=True,\n ),\n xaxis=dict(\n showgrid=False,\n zeroline=False,\n showline=False,\n ticks='',\n showticklabels=False,\n fixedrange=True,\n ),\n\n # width:500, height: 470\n shapes=[\n dict(\n type=\"rect\", x0=-250, y0=-52.5, x1=250, y1=417.5+470,\n line=dict(color=main_line_col, width=1),\n # fillcolor='#333333',\n layer='below'\n ), ## sideline rect\n # dict(\n # type=\"rect\", x0=-250, y0=-52.5, x1=250, y1=417.5,\n # line=dict(color=main_line_col, width=1),\n # # fillcolor='#333333',\n # layer='below'\n # ), ## sideline rect\n dict(\n type=\"rect\", x0=-80, y0=-52.5, x1=80, y1=137.5,\n line=dict(color=main_line_col, width=1),\n # fillcolor='#333333',\n layer='below'\n ),# lane line rect\n dict(\n type=\"rect\", x0=-60, y0=-52.5, x1=60, y1=137.5,\n line=dict(color=main_line_col, width=1),\n # fillcolor='#333333',\n layer='below'\n ), # foul line rect\n dict(\n type=\"circle\", x0=-60, y0=77.5, x1=60, y1=197.5, xref=\"x\", yref=\"y\",\n line=dict(color=main_line_col, width=1),\n # fillcolor='#dddddd',\n layer='below'\n ), # free-throw circle\n dict(\n type=\"line\", x0=-60, y0=137.5, x1=60, y1=137.5,\n line=dict(color=main_line_col, width=1),\n layer='below'\n ), # foul line\n\n dict(\n type=\"rect\", x0=-2, y0=-7.25, x1=2, y1=-12.5,\n line=dict(color=\"#ec7607\", width=1),\n fillcolor='#ec7607',\n ), # hoop rect\n dict(\n type=\"circle\", x0=-7.5, y0=-7.5, x1=7.5, y1=7.5, xref=\"x\", yref=\"y\",\n line=dict(color=\"#ec7607\", width=1),\n ), # hoop circle\n dict(\n type=\"line\", x0=-30, y0=-12.5, x1=30, y1=-12.5,\n line=dict(color=\"#ec7607\", width=1),\n ), # backboard\n\n dict(type=\"path\",\n path=ellipse_arc(a=40, b=40, start_angle=0, end_angle=np.pi),\n line=dict(color=main_line_col, width=1), layer='below'), # no-change semi-circle\n dict(type=\"path\",\n path=ellipse_arc(a=237.5, b=237.5, start_angle=0.386283101, end_angle=np.pi - 0.386283101),\n line=dict(color=main_line_col, width=1), layer='below'), # three-point line:arc\n dict(\n type=\"line\", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,\n line=dict(color=three_line_col, width=1), layer='below'\n ), # three-point line:left edge\n # dict(\n # type=\"line\", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,\n # line=dict(color=three_line_col, width=1), layer='below'\n # ),\n dict(\n type=\"line\", x0=220, y0=-52.5, x1=220, y1=threept_break_y,\n line=dict(color=three_line_col, width=1), layer='below'\n ), # three-point line:right edge\n\n dict(\n type=\"line\", x0=-250, y0=227.5, x1=-220, y1=227.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # midcourt area marker:left\n dict(\n type=\"line\", x0=250, y0=227.5, x1=220, y1=227.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # midcourt area marker:right\n dict(\n type=\"line\", x0=-90, y0=17.5, x1=-80, y1=17.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=-90, y0=27.5, x1=-80, y1=27.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=-90, y0=57.5, x1=-80, y1=57.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=-90, y0=87.5, x1=-80, y1=87.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=90, y0=17.5, x1=80, y1=17.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=90, y0=27.5, x1=80, y1=27.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=90, y0=57.5, x1=80, y1=57.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n dict(\n type=\"line\", x0=90, y0=87.5, x1=80, y1=87.5,\n line=dict(color=main_line_col, width=1), layer='below'\n ), # lane line marker\n\n dict(type=\"path\",\n path=ellipse_arc(y_center=417.5, a=60, b=60, start_angle=-0, end_angle=-np.pi),\n line=dict(color=main_line_col, width=1), layer='below'), # center circle: half\n\n\n ## upper\n # dict(\n # type=\"rect\", x0=-250, y0=-52.5, x1=250, y1=417.5,\n # line=dict(color=main_line_col, width=1),\n # # fillcolor='#333333',\n # layer='below'\n # ), ## sideline rect\n # dict(\n # type=\"rect\", x0=-80, y0=-52.5, x1=80, y1=137.5,\n # line=dict(color=main_line_col, width=1),\n # # fillcolor='#333333',\n # layer='below'\n # ), # lane line rect\n # dict(\n # type=\"rect\", x0=-60, y0=-52.5, x1=60, y1=137.5,\n # line=dict(color=main_line_col, width=1),\n # # fillcolor='#333333',\n # layer='below'\n # ), # foul line rect\n # dict(\n # type=\"circle\", x0=-60, y0=77.5, x1=60, y1=197.5, xref=\"x\", yref=\"y\",\n # line=dict(color=main_line_col, width=1),\n # # fillcolor='#dddddd',\n # layer='below'\n # ), # free-throw circle\n # dict(\n # type=\"line\", x0=-60, y0=137.5, x1=60, y1=137.5,\n # line=dict(color=main_line_col, width=1),\n # layer='below'\n # ), # foul line\n #\n # dict(\n # type=\"rect\", x0=-2, y0=-7.25, x1=2, y1=-12.5,\n # line=dict(color=\"#ec7607\", width=1),\n # fillcolor='#ec7607',\n # ), # hoop rect\n # dict(\n # type=\"circle\", x0=-7.5, y0=-7.5, x1=7.5, y1=7.5, xref=\"x\", yref=\"y\",\n # line=dict(color=\"#ec7607\", width=1),\n # ), # hoop circle\n # dict(\n # type=\"line\", x0=-30, y0=-12.5, x1=30, y1=-12.5,\n # line=dict(color=\"#ec7607\", width=1),\n # ), # backboard\n #\n # dict(type=\"path\",\n # path=ellipse_arc(a=40, b=40, start_angle=0, end_angle=np.pi),\n # line=dict(color=main_line_col, width=1), layer='below'), # no-change semi-circle\n # dict(type=\"path\",\n # path=ellipse_arc(a=237.5, b=237.5, start_angle=0.386283101, end_angle=np.pi - 0.386283101),\n # line=dict(color=main_line_col, width=1), layer='below'), # three-point line:arc\n # dict(\n # type=\"line\", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,\n # line=dict(color=three_line_col, width=1), layer='below'\n # ), # three-point line:left edge\n # # dict(\n # # type=\"line\", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,\n # # line=dict(color=three_line_col, width=1), layer='below'\n # # ),\n # dict(\n # type=\"line\", x0=220, y0=-52.5, x1=220, y1=threept_break_y,\n # line=dict(color=three_line_col, width=1), layer='below'\n # ), # three-point line:right edge\n #\n # dict(\n # type=\"line\", x0=-250, y0=227.5, x1=-220, y1=227.5,\n # line=dict(color=main_line_col, width=1), layer='below'\n # ), # midcourt area marker:left\n # dict(\n # type=\"line\", x0=250, y0=227.5, x1=220, y1=227.5,\n # line=dict(color=main_line_col, width=1), layer='below'\n # ), # midcourt area marker:right\n # dict(\n # type=\"line\", x0=-90, y0=17.5, x1=-80, y1=17.5,\n # line=dict(color=main_line_col, width=1), layer='below'\n # ), # lane line marker\n # dict(\n # type=\"line\", x0=-90, y0=27.5, x1=-80, y1=27.5,\n # line=dict(color=main_line_col, width=1), layer='below'\n # ), # lane line marker\n # dict(\n # type=\"line\", x0=-90, y0=57.5, x1=-80, y1=57.5,\n # line=dict(color=main_line_col, width=1), layer='below'\n # ), # lane line marker\n # dict(\n # type=\"line\", x0=-90, y0=87.5, x1=-80, y1=87.5,\n # line=dict(color=main_line_col, width=1), layer='below'\n # ), # lane line marker\n # dict(\n # type=\"line\", x0=90, y0=17.5, x1=80, y1=17.5,\n # line=dict(color=main_line_col, width=1), layer='below'\n # ), # lane line marker\n # dict(\n # type=\"line\", x0=90, y0=27.5, x1=80, y1=27.5,\n # line=dict(color=main_line_col, width=1), layer='below'\n # ), # lane line marker\n # dict(\n # type=\"line\", x0=90, y0=57.5, x1=80, y1=57.5,\n # line=dict(color=main_line_col, width=1), layer='below'\n # ), # lane line marker\n # dict(\n # type=\"line\", x0=90, y0=87.5, x1=80, y1=87.5,\n # line=dict(color=main_line_col, width=1), layer='below'\n # ), # lane line marker\n #\n # dict(type=\"path\",\n # path=ellipse_arc(y_center=417.5, a=60, b=60, start_angle=-0, end_angle=-np.pi),\n # line=dict(color=main_line_col, width=1), layer='below'), # center circle: half\n\n ]\n )\n return True\n\n\n\n\n\n\nmax_freq = 0.002\n# freq_by_hex = np.array([min(max_freq, i) for i in league_hexbin_stats['freq_by_hex']])\ncolorscale = 'YlOrRd'\nmarker_cmin = 0.1\nmarker_cmax = 0.6\nticktexts = [str(marker_cmin*100)+'%-', \"\", str(marker_cmax*100)+'%+']\n\nfig = go.Figure()\n# draw_plotly_half_court(fig)\ndraw_plotly_whole_court(fig)\n\n# fig.add_trace(go.Scatter(\n# x=xlocs, y=ylocs, mode='markers', name='markers',\n# marker=dict(\n# size=freq_by_hex, sizemode='area', sizeref=2. * max(freq_by_hex) / (11. ** 2), sizemin=2.5,\n# color=accs_by_hex, colorscale=colorscale,\n# colorbar=dict(\n# thickness=15,\n# x=0.84,\n# y=0.87,\n# yanchor='middle',\n# len=0.2,\n# title=dict(\n# text=\"<B>Accuracy</B>\",\n# font=dict(\n# size=11,\n# color='#4d4d4d'\n# ),\n# ),\n# tickvals=[marker_cmin, (marker_cmin + marker_cmax) / 2, marker_cmax],\n# ticktext=ticktexts,\n# tickfont=dict(\n# size=11,\n# color='#4d4d4d'\n# )\n# ),\n# cmin=marker_cmin, cmax=marker_cmax,\n# line=dict(width=1, color='#333333'), symbol='hexagon',\n# ),\n# ))\n# fig.show(config=dict(displayModeBar=False))\n\n# fig.show()\n\nvis_dir='/media/felicia/Data/sgan_results/vis/'\nfig.write_image(vis_dir+\"court.svg\")\n"
] | [
[
"numpy.sin",
"numpy.linspace",
"numpy.cos"
]
] |
ChristophReich1996/Mode_Collapse | [
"937ee8bf96510fbf4070fc7e14b78276ab036b8c"
] | [
"utils.py"
] | [
"from typing import Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.utils import spectral_norm\nimport numpy as np\n\n\ndef get_generator(latent_size: int, use_spectral_norm: bool) -> nn.Module:\n \"\"\"\n Returns the generator network.\n :param latent_size: (int) Size of the latent input vector\n :param use_spectral_norm: (bool) If true spectral norm is utilized\n :return: (nn.Module) Simple feed forward neural network with three layers,\n \"\"\"\n if use_spectral_norm:\n return nn.Sequential(spectral_norm(nn.Linear(latent_size, 256, bias=True)),\n nn.LeakyReLU(),\n spectral_norm(nn.Linear(256, 256, bias=True)),\n nn.LeakyReLU(),\n spectral_norm(nn.Linear(256, 256, bias=True)),\n nn.LeakyReLU(),\n spectral_norm(nn.Linear(256, 256, bias=True)),\n nn.Tanh(),\n spectral_norm(nn.Linear(256, 2, bias=True)))\n return nn.Sequential(nn.Linear(latent_size, 256, bias=True),\n nn.LeakyReLU(),\n nn.Linear(256, 256, bias=True),\n nn.LeakyReLU(),\n nn.Linear(256, 256, bias=True),\n nn.LeakyReLU(),\n nn.Linear(256, 256, bias=True),\n nn.Tanh(),\n nn.Linear(256, 2, bias=True))\n\n\ndef get_discriminator(use_spectral_norm: bool) -> nn.Module:\n \"\"\"\n Returns the discriminator network.\n :param use_spectral_norm: (bool) If true spectral norm is utilized\n :return: (nn.Module) Simple feed forward neural network with three layers and probability output.\n \"\"\"\n if use_spectral_norm:\n return nn.Sequential(spectral_norm(nn.Linear(2, 256, bias=True)),\n nn.LeakyReLU(),\n spectral_norm(nn.Linear(256, 256, bias=True)),\n nn.LeakyReLU(),\n spectral_norm(nn.Linear(256, 256, bias=True)),\n nn.LeakyReLU(),\n spectral_norm(nn.Linear(256, 256, bias=True)),\n nn.LeakyReLU(),\n spectral_norm(nn.Linear(256, 1, bias=True)))\n return nn.Sequential(nn.Linear(2, 256, bias=True),\n nn.LeakyReLU(),\n nn.Linear(256, 256, bias=True),\n nn.LeakyReLU(),\n nn.Linear(256, 256, bias=True),\n nn.LeakyReLU(),\n nn.Linear(256, 256, bias=True),\n nn.LeakyReLU(),\n nn.Linear(256, 1, bias=True))\n\n\ndef get_data(samples: Optional[int] = 400, variance: Optional[float] = 0.05) -> torch.Tensor:\n \"\"\"\n Function generates a 2d ring of 8 Gaussians\n :param samples: (Optional[int]) Number of samples including in the resulting dataset. Must be a multiple of 8.\n :param variance: (Optional[float]) Variance of the gaussian\n :return: (torch.Tensor) generated data\n \"\"\"\n assert samples % 8 == 0 and samples > 0, \"Number of samples must be a multiple of 8 and bigger than 0\"\n # Init angels of the means\n angels = torch.cumsum((2 * np.pi / 8) * torch.ones((8)), dim=0)\n # Convert angles to 2D coordinates\n means = torch.stack([torch.cos(angels), torch.sin(angels)], dim=0)\n # Generate data\n data = torch.empty((2, samples))\n counter = 0\n for gaussian in range(means.shape[1]):\n for sample in range(int(samples / 8)):\n data[:, counter] = torch.normal(means[:, gaussian], variance)\n counter += 1\n # Reshape data\n data = data.T\n # Shuffle data\n data = data[torch.randperm(data.shape[0])]\n # Convert numpy array to tensor\n return data.float()\n"
] | [
[
"torch.empty",
"torch.ones",
"torch.nn.Linear",
"torch.cos",
"torch.nn.Tanh",
"torch.normal",
"torch.sin",
"torch.randperm",
"torch.nn.LeakyReLU"
]
] |
galad-loth/LearnDescriptor | [
"30552a699597415a13793eb85d21b5e33a296a99"
] | [
"symbols/symbol_ssdh.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 07 21:00:11 2017\r\n\r\n@author: galad-loth\r\n\"\"\"\r\nimport numpy as npy\r\nimport mxnet as mx\r\n\r\nclass HashLossLayer(mx.operator.NumpyOp):\r\n def __init__(self, w_bin,w_balance):\r\n super(HashLossLayer, self).__init__(False)\r\n self.w_bin=w_bin\r\n self.w_balance=w_balance\r\n \r\n def list_arguments(self):\r\n return ['data']\r\n \r\n def list_outputs(self):\r\n return ['output']\r\n \r\n def infer_shape(self, in_shape):\r\n data_shape=in_shape[0]\r\n return [data_shape],[data_shape]\r\n \r\n def forward(self, in_data, out_data):\r\n x=in_data[0]\r\n# l=in_data[1]\r\n y=out_data[0]\r\n xs=x-0.5\r\n y[:]=1\r\n y[xs<0]=0\r\n# y[:]=npy.ones((x.shape[0],1))-l.reshape((x.shape[0],1))*x \r\n \r\n def backward(self, out_grad, in_data, out_data, in_grad):\r\n x=in_data[0]\r\n dx=in_grad[0]\r\n \r\n grad1=-2*(x-0.5)/x.shape[1]\r\n mu=npy.mean(x,axis=1)\r\n grad2=2*(mu-0.5)/x.shape[1]\r\n \r\n grad=self.w_bin*grad1+self.w_balance*grad2\r\n dx[:]=grad\r\n\r\n\r\ndef get_finetune_symbol(net_pre,arg_params, \r\n num_latent, num_class,layer_name='flatten'):\r\n \"\"\"\r\n net_pre: the pre-trained network symbol\r\n arg_params: the argument parameters of the pre-trained model\r\n num_latent: the number of latent layer units for the fine-tune datasets\r\n layer_name: the layer name before the last fully-connected layer\r\n \"\"\"\r\n all_layers = net_pre.get_internals()\r\n load_net = all_layers[layer_name+'_output']\r\n latent = mx.symbol.FullyConnected(data=load_net, num_hidden=num_latent, name='latent_ssdh')\r\n latent = mx.sym.Activation(data=latent, act_type=\"sigmoid\", name=\"sigmoid_ssdh\")\r\n class_net = mx.symbol.FullyConnected(data=latent, num_hidden=num_class, name='fc_ssdh')\r\n class_net = mx.symbol.SoftmaxOutput(data=class_net, name='softmax')\r\n hash_loss=HashLossLayer(0.1,0.1)\r\n hash_net=hash_loss(data=latent, name=\"hash\")\r\n net = mx.sym.Group([class_net,hash_net])\r\n new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k})\r\n return (net, new_args)\r\n "
] | [
[
"numpy.mean"
]
] |
stevenvandenberghe/pandas | [
"8cbee356da1161c56c64f6f89cb5548bcadc3e44"
] | [
"pandas/tests/reshape/test_tile.py"
] | [
"import os\nimport pytest\n\nimport numpy as np\nfrom pandas.compat import zip\n\nfrom pandas import (Series, isna, to_datetime, DatetimeIndex,\n Timestamp, Interval, IntervalIndex, Categorical,\n cut, qcut, date_range)\nimport pandas.util.testing as tm\nfrom pandas.api.types import CategoricalDtype as CDT\n\nfrom pandas.core.algorithms import quantile\nimport pandas.core.reshape.tile as tmod\n\n\nclass TestCut(object):\n\n def test_simple(self):\n data = np.ones(5, dtype='int64')\n result = cut(data, 4, labels=False)\n expected = np.array([1, 1, 1, 1, 1])\n tm.assert_numpy_array_equal(result, expected,\n check_dtype=False)\n\n def test_bins(self):\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])\n result, bins = cut(data, 3, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3))\n intervals = intervals.take([0, 0, 0, 1, 2, 0])\n expected = Categorical(intervals, ordered=True)\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,\n 6.53333333, 9.7]))\n\n def test_right(self):\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])\n result, bins = cut(data, 4, right=True, retbins=True)\n intervals = IntervalIndex.from_breaks(bins.round(3))\n expected = Categorical(intervals, ordered=True)\n expected = expected.take([0, 0, 0, 2, 3, 0, 0])\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95,\n 7.325, 9.7]))\n\n def test_noright(self):\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])\n result, bins = cut(data, 4, right=False, retbins=True)\n intervals = IntervalIndex.from_breaks(bins.round(3), closed='left')\n intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])\n expected = Categorical(intervals, ordered=True)\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95,\n 7.325, 9.7095]))\n\n def test_arraylike(self):\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n result, bins = cut(data, 3, retbins=True)\n intervals = IntervalIndex.from_breaks(bins.round(3))\n intervals = intervals.take([0, 0, 0, 1, 2, 0])\n expected = Categorical(intervals, ordered=True)\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,\n 6.53333333, 9.7]))\n\n def test_bins_from_intervalindex(self):\n c = cut(range(5), 3)\n expected = c\n result = cut(range(5), bins=expected.categories)\n tm.assert_categorical_equal(result, expected)\n\n expected = Categorical.from_codes(np.append(c.codes, -1),\n categories=c.categories,\n ordered=True)\n result = cut(range(6), bins=expected.categories)\n tm.assert_categorical_equal(result, expected)\n\n # doc example\n # make sure we preserve the bins\n ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])\n c = cut(ages, bins=[0, 18, 35, 70])\n expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)])\n tm.assert_index_equal(c.categories, expected)\n\n result = cut([25, 20, 50], bins=c.categories)\n tm.assert_index_equal(result.categories, expected)\n tm.assert_numpy_array_equal(result.codes,\n np.array([1, 1, 2], dtype='int8'))\n\n def test_bins_not_monotonic(self):\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n pytest.raises(ValueError, cut, data, [0.1, 1.5, 1, 10])\n\n def test_wrong_num_labels(self):\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n pytest.raises(ValueError, cut, data, [0, 1, 10],\n labels=['foo', 'bar', 'baz'])\n\n def test_cut_corner(self):\n # h3h\n pytest.raises(ValueError, cut, [], 2)\n\n pytest.raises(ValueError, cut, [1, 2, 3], 0.5)\n\n def test_cut_out_of_range_more(self):\n # #1511\n s = Series([0, -1, 0, 1, -3], name='x')\n ind = cut(s, [0, 1], labels=False)\n exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name='x')\n tm.assert_series_equal(ind, exp)\n\n def test_labels(self):\n arr = np.tile(np.arange(0, 1.01, 0.1), 4)\n\n result, bins = cut(arr, 4, retbins=True)\n ex_levels = IntervalIndex.from_breaks([-1e-3, 0.25, 0.5, 0.75, 1])\n tm.assert_index_equal(result.categories, ex_levels)\n\n result, bins = cut(arr, 4, retbins=True, right=False)\n ex_levels = IntervalIndex.from_breaks([0, 0.25, 0.5, 0.75, 1 + 1e-3],\n closed='left')\n tm.assert_index_equal(result.categories, ex_levels)\n\n def test_cut_pass_series_name_to_factor(self):\n s = Series(np.random.randn(100), name='foo')\n\n factor = cut(s, 4)\n assert factor.name == 'foo'\n\n def test_label_precision(self):\n arr = np.arange(0, 0.73, 0.01)\n\n result = cut(arr, 4, precision=2)\n ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36,\n 0.54, 0.72])\n tm.assert_index_equal(result.categories, ex_levels)\n\n def test_na_handling(self):\n arr = np.arange(0, 0.75, 0.01)\n arr[::3] = np.nan\n\n result = cut(arr, 4)\n\n result_arr = np.asarray(result)\n\n ex_arr = np.where(isna(arr), np.nan, result_arr)\n\n tm.assert_almost_equal(result_arr, ex_arr)\n\n result = cut(arr, 4, labels=False)\n ex_result = np.where(isna(arr), np.nan, result)\n tm.assert_almost_equal(result, ex_result)\n\n def test_inf_handling(self):\n data = np.arange(6)\n data_ser = Series(data, dtype='int64')\n\n bins = [-np.inf, 2, 4, np.inf]\n result = cut(data, bins)\n result_ser = cut(data_ser, bins)\n\n ex_uniques = IntervalIndex.from_breaks(bins)\n tm.assert_index_equal(result.categories, ex_uniques)\n assert result[5] == Interval(4, np.inf)\n assert result[0] == Interval(-np.inf, 2)\n assert result_ser[5] == Interval(4, np.inf)\n assert result_ser[0] == Interval(-np.inf, 2)\n\n def test_qcut(self):\n arr = np.random.randn(1000)\n\n # We store the bins as Index that have been rounded\n # to comparisons are a bit tricky.\n labels, bins = qcut(arr, 4, retbins=True)\n ex_bins = quantile(arr, [0, .25, .5, .75, 1.])\n result = labels.categories.left.values\n assert np.allclose(result, ex_bins[:-1], atol=1e-2)\n result = labels.categories.right.values\n assert np.allclose(result, ex_bins[1:], atol=1e-2)\n\n ex_levels = cut(arr, ex_bins, include_lowest=True)\n tm.assert_categorical_equal(labels, ex_levels)\n\n def test_qcut_bounds(self):\n arr = np.random.randn(1000)\n\n factor = qcut(arr, 10, labels=False)\n assert len(np.unique(factor)) == 10\n\n def test_qcut_specify_quantiles(self):\n arr = np.random.randn(100)\n\n factor = qcut(arr, [0, .25, .5, .75, 1.])\n expected = qcut(arr, 4)\n tm.assert_categorical_equal(factor, expected)\n\n def test_qcut_all_bins_same(self):\n tm.assert_raises_regex(ValueError, \"edges.*unique\", qcut,\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)\n\n def test_cut_out_of_bounds(self):\n arr = np.random.randn(100)\n\n result = cut(arr, [-1, 0, 1])\n\n mask = isna(result)\n ex_mask = (arr < -1) | (arr > 1)\n tm.assert_numpy_array_equal(mask, ex_mask)\n\n def test_cut_pass_labels(self):\n arr = [50, 5, 10, 15, 20, 30, 70]\n bins = [0, 25, 50, 100]\n labels = ['Small', 'Medium', 'Large']\n\n result = cut(arr, bins, labels=labels)\n exp = Categorical(['Medium'] + 4 * ['Small'] + ['Medium', 'Large'],\n categories=labels,\n ordered=True)\n tm.assert_categorical_equal(result, exp)\n\n result = cut(arr, bins, labels=Categorical.from_codes([0, 1, 2],\n labels))\n exp = Categorical.from_codes([1] + 4 * [0] + [1, 2], labels)\n tm.assert_categorical_equal(result, exp)\n\n # issue 16459\n labels = ['Good', 'Medium', 'Bad']\n result = cut(arr, 3, labels=labels)\n exp = cut(arr, 3, labels=Categorical(labels, categories=labels,\n ordered=True))\n tm.assert_categorical_equal(result, exp)\n\n def test_qcut_include_lowest(self):\n values = np.arange(10)\n\n ii = qcut(values, 4)\n\n ex_levels = IntervalIndex(\n [Interval(-0.001, 2.25),\n Interval(2.25, 4.5),\n Interval(4.5, 6.75),\n Interval(6.75, 9)])\n tm.assert_index_equal(ii.categories, ex_levels)\n\n def test_qcut_nas(self):\n arr = np.random.randn(100)\n arr[:20] = np.nan\n\n result = qcut(arr, 4)\n assert isna(result[:20]).all()\n\n def test_qcut_index(self):\n result = qcut([0, 2], 2)\n intervals = [Interval(-0.001, 1), Interval(1, 2)]\n expected = Categorical(intervals, ordered=True)\n tm.assert_categorical_equal(result, expected)\n\n def test_round_frac(self):\n # it works\n result = cut(np.arange(11.), 2)\n\n result = cut(np.arange(11.) / 1e10, 2)\n\n # #1979, negative numbers\n\n result = tmod._round_frac(-117.9998, precision=3)\n assert result == -118\n result = tmod._round_frac(117.9998, precision=3)\n assert result == 118\n\n result = tmod._round_frac(117.9998, precision=2)\n assert result == 118\n result = tmod._round_frac(0.000123456, precision=2)\n assert result == 0.00012\n\n def test_qcut_binning_issues(self):\n # #1978, 1979\n path = os.path.join(tm.get_data_path(), 'cut_data.csv')\n arr = np.loadtxt(path)\n\n result = qcut(arr, 20)\n\n starts = []\n ends = []\n for lev in np.unique(result):\n s = lev.left\n e = lev.right\n assert s != e\n\n starts.append(float(s))\n ends.append(float(e))\n\n for (sp, sn), (ep, en) in zip(zip(starts[:-1], starts[1:]),\n zip(ends[:-1], ends[1:])):\n assert sp < sn\n assert ep < en\n assert ep <= sn\n\n def test_cut_return_intervals(self):\n s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])\n res = cut(s, 3)\n exp_bins = np.linspace(0, 8, num=4).round(3)\n exp_bins[0] -= 0.008\n exp = Series(IntervalIndex.from_breaks(exp_bins, closed='right').take(\n [0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))\n tm.assert_series_equal(res, exp)\n\n def test_qcut_return_intervals(self):\n s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])\n res = qcut(s, [0, 0.333, 0.666, 1])\n exp_levels = np.array([Interval(-0.001, 2.664),\n Interval(2.664, 5.328), Interval(5.328, 8)])\n exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(\n CDT(ordered=True))\n tm.assert_series_equal(res, exp)\n\n def test_series_retbins(self):\n # GH 8589\n s = Series(np.arange(4))\n result, bins = cut(s, 2, retbins=True)\n expected = Series(IntervalIndex.from_breaks(\n [-0.003, 1.5, 3], closed='right').repeat(2)).astype(\n CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n result, bins = qcut(s, 2, retbins=True)\n expected = Series(IntervalIndex.from_breaks(\n [-0.001, 1.5, 3], closed='right').repeat(2)).astype(\n CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n def test_qcut_duplicates_bin(self):\n # GH 7751\n values = [0, 0, 0, 0, 1, 2, 3]\n expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])\n\n result = qcut(values, 3, duplicates='drop')\n tm.assert_index_equal(result.categories, expected)\n\n pytest.raises(ValueError, qcut, values, 3)\n pytest.raises(ValueError, qcut, values, 3, duplicates='raise')\n\n # invalid\n pytest.raises(ValueError, qcut, values, 3, duplicates='foo')\n\n def test_single_quantile(self):\n # issue 15431\n expected = Series([0, 0])\n\n s = Series([9., 9.])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(8.999, 9.0),\n Interval(8.999, 9.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n s = Series([-9., -9.])\n expected = Series([0, 0])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(-9.001, -9.0),\n Interval(-9.001, -9.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n s = Series([0., 0.])\n expected = Series([0, 0])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(-0.001, 0.0),\n Interval(-0.001, 0.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n s = Series([9])\n expected = Series([0])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(8.999, 9.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n s = Series([-9])\n expected = Series([0])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(-9.001, -9.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n s = Series([0])\n expected = Series([0])\n result = qcut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n result = qcut(s, 1)\n intervals = IntervalIndex([Interval(-0.001, 0.0)], closed='right')\n expected = Series(intervals).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n def test_single_bin(self):\n # issue 14652\n expected = Series([0, 0])\n\n s = Series([9., 9.])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n s = Series([-9., -9.])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n expected = Series([0])\n\n s = Series([9])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n s = Series([-9])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n # issue 15428\n expected = Series([0, 0])\n\n s = Series([0., 0.])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n expected = Series([0])\n\n s = Series([0])\n result = cut(s, 1, labels=False)\n tm.assert_series_equal(result, expected)\n\n def test_datetime_cut(self):\n # GH 14714\n # testing for time data to be present as series\n data = to_datetime(Series(['2013-01-01', '2013-01-02', '2013-01-03']))\n\n result, bins = cut(data, 3, retbins=True)\n expected = (\n Series(IntervalIndex([\n Interval(Timestamp('2012-12-31 23:57:07.200000'),\n Timestamp('2013-01-01 16:00:00')),\n Interval(Timestamp('2013-01-01 16:00:00'),\n Timestamp('2013-01-02 08:00:00')),\n Interval(Timestamp('2013-01-02 08:00:00'),\n Timestamp('2013-01-03 00:00:00'))]))\n .astype(CDT(ordered=True)))\n\n tm.assert_series_equal(result, expected)\n\n # testing for time data to be present as list\n data = [np.datetime64('2013-01-01'), np.datetime64('2013-01-02'),\n np.datetime64('2013-01-03')]\n result, bins = cut(data, 3, retbins=True)\n tm.assert_series_equal(Series(result), expected)\n\n # testing for time data to be present as ndarray\n data = np.array([np.datetime64('2013-01-01'),\n np.datetime64('2013-01-02'),\n np.datetime64('2013-01-03')])\n result, bins = cut(data, 3, retbins=True)\n tm.assert_series_equal(Series(result), expected)\n\n # testing for time data to be present as datetime index\n data = DatetimeIndex(['2013-01-01', '2013-01-02', '2013-01-03'])\n result, bins = cut(data, 3, retbins=True)\n tm.assert_series_equal(Series(result), expected)\n\n def test_datetime_bin(self):\n data = [np.datetime64('2012-12-13'), np.datetime64('2012-12-15')]\n bin_data = ['2012-12-12', '2012-12-14', '2012-12-16']\n expected = (\n Series(IntervalIndex([\n Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),\n Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))]))\n .astype(CDT(ordered=True)))\n\n for conv in [Timestamp, Timestamp, np.datetime64]:\n bins = [conv(v) for v in bin_data]\n result = cut(data, bins=bins)\n tm.assert_series_equal(Series(result), expected)\n\n bin_pydatetime = [Timestamp(v).to_pydatetime() for v in bin_data]\n result = cut(data, bins=bin_pydatetime)\n tm.assert_series_equal(Series(result), expected)\n\n bins = to_datetime(bin_data)\n result = cut(data, bins=bin_pydatetime)\n tm.assert_series_equal(Series(result), expected)\n\n def test_datetime_nan(self):\n\n def f():\n cut(date_range('20130101', periods=3), bins=[0, 2, 4])\n pytest.raises(ValueError, f)\n\n result = cut(date_range('20130102', periods=5),\n bins=date_range('20130101', periods=2))\n mask = result.categories.isna()\n tm.assert_numpy_array_equal(mask, np.array([False]))\n mask = result.isna()\n tm.assert_numpy_array_equal(\n mask, np.array([False, True, True, True, True]))\n\n @pytest.mark.parametrize(\n \"array_1_writeable, array_2_writeable\",\n [(True, True), (True, False), (False, False)])\n def test_cut_read_only(self, array_1_writeable, array_2_writeable):\n # issue 18773\n array_1 = np.arange(0, 100, 10)\n array_1.flags.writeable = array_1_writeable\n\n array_2 = np.arange(0, 100, 10)\n array_2.flags.writeable = array_2_writeable\n\n hundred_elements = np.arange(100)\n\n tm.assert_categorical_equal(cut(hundred_elements, array_1),\n cut(hundred_elements, array_2))\n"
] | [
[
"numpy.ones",
"pandas.Series",
"numpy.asarray",
"pandas.Categorical",
"pandas.compat.zip",
"pandas.util.testing.get_data_path",
"pandas.util.testing.assert_series_equal",
"pandas.util.testing.assert_almost_equal",
"numpy.datetime64",
"pandas.util.testing.assert_categorical_equal",
"numpy.allclose",
"numpy.append",
"pandas.to_datetime",
"pandas.core.reshape.tile._round_frac",
"numpy.linspace",
"numpy.unique",
"pandas.Timestamp",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.api.types.CategoricalDtype",
"pandas.date_range",
"pandas.IntervalIndex.from_breaks",
"numpy.arange",
"pandas.Categorical.from_codes",
"pandas.cut",
"pandas.util.testing.assert_index_equal",
"pandas.qcut",
"pandas.Interval",
"pandas.DatetimeIndex",
"pandas.util.testing.assert_raises_regex",
"numpy.random.randn",
"pandas.IntervalIndex.from_tuples",
"pandas.core.algorithms.quantile",
"numpy.array",
"pandas.isna",
"numpy.loadtxt"
]
] |
Jokeren/RzLinear | [
"d318d95254cd5c3dcf814774d22dc71179450aa0"
] | [
"python/rz_linear/impl/RzLinearBackward.py"
] | [
"from typing import Tuple\nimport torch\nimport triton\nimport triton.language as tl\n\n\ndef rz_linear_backward_tl(input: torch.tensor, hashed_weight: torch.tensor, output_grad: torch.tensor,\n M: int, K: int, N: int, H: int,\n R3: int, R2: int, R1: int, R0: int,\n allow_tf32: bool = True, allow_autotune: bool = False,\n BLOCK_SIZE_M: int = 64, BLOCK_SIZE_N: int = 64, BLOCK_SIZE_K: int = 32,\n GROUP_SIZE: int = 4) -> Tuple[torch.tensor, torch.tensor]:\n input_grad = rz_linear_backward_input_grad_tl(output_grad, hashed_weight, M, K, N, H, R3, R2, R1, R0, allow_tf32=allow_tf32, allow_autotune=allow_autotune,\n BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,\n GROUP_SIZE=GROUP_SIZE)\n weight_grad = rz_linear_backward_weight_grad_tl(input, output_grad, M, K, N, H, R3, R2, R1, R0, allow_tf32=allow_tf32, allow_autotune=allow_autotune,\n BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,\n GROUP_SIZE=GROUP_SIZE)\n return input_grad, weight_grad\n\n\ndef rz_linear_backward_weight_grad_tl(input: torch.tensor, output_grad: torch.tensor,\n M: int, K: int, N: int, H: int,\n R3: int, R2: int, R1: int, R0: int,\n allow_tf32: bool = True, allow_autotune: bool = True,\n BLOCK_SIZE_M: int = 64, BLOCK_SIZE_N: int = 64, BLOCK_SIZE_K: int = 32,\n GROUP_SIZE: int = 8) -> torch.tensor:\n '''\n Compute input^T x output_grad and return a weight_grad tensor\n\n Args:\n input (Tensor): A MxK tensor\n output_grad (Tensor): A MxN tensor\n M, K, N, H (int): Matrix dimensions\n R3, R2, R1, R0 (int): Random numbers\n allow_tf32 (bool): If tensor core is allowed\n BLOCK_SIZE_M, BLOCK_SIZE_N, BLOCK_SIZE_K, GROUP_SIZE: Matrix tiling parameters for performance tunning\n\n Returns:\n hashed_weight_grad (Tensor): A 1xH tensor\n '''\n assert (K % 4 == 0)\n assert (N % 4 == 0)\n # allocates output\n hashed_weight_grad = torch.zeros(\n (H), device=output_grad.device, dtype=output_grad.dtype)\n # 1D launch kernel where each block gets its own program.\n\n def grid(META): return (\n triton.cdiv(K, META['BLOCK_SIZE_K']) *\n triton.cdiv(N, META['BLOCK_SIZE_N']),\n )\n\n if allow_tf32:\n assert (M % 32 == 0)\n else:\n assert (M % 8 == 0)\n\n if allow_autotune:\n if allow_tf32:\n rz_linear_backward_weight_grad_kernel_tf32[grid](\n input, output_grad, hashed_weight_grad,\n M, N, K, H,\n input.stride(1), input.stride(0),\n output_grad.stride(0), output_grad.stride(1),\n R3=R3, R2=R2, R1=R1, R0=R0,\n GROUP_SIZE=GROUP_SIZE\n )\n else:\n rz_linear_backward_weight_grad_kernel_fp32[grid](\n input, output_grad, hashed_weight_grad,\n M, N, K, H,\n input.stride(1), input.stride(0),\n output_grad.stride(0), output_grad.stride(1),\n R3=R3, R2=R2, R1=R1, R0=R0,\n GROUP_SIZE=GROUP_SIZE\n )\n else:\n rz_linear_backward_weight_grad_kernel_notune[grid](\n input, output_grad, hashed_weight_grad,\n M, N, K, H,\n input.stride(1), input.stride(0),\n output_grad.stride(0), output_grad.stride(1),\n R3=R3, R2=R2, R1=R1, R0=R0,\n allow_tf32=allow_tf32,\n GROUP_SIZE=GROUP_SIZE,\n BLOCK_SIZE_K=BLOCK_SIZE_K,\n BLOCK_SIZE_M=BLOCK_SIZE_M,\n BLOCK_SIZE_N=BLOCK_SIZE_N\n )\n\n return hashed_weight_grad\n\n\[email protected](\n configs=[\n # basic configs for compute-bound matmuls\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=8),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32,\n 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32,\n 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),\n ],\n key=['M', 'N', 'K'],\n)\[email protected]\ndef rz_linear_backward_weight_grad_kernel_fp32(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K, H,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension.\n stride_am, stride_ak,\n stride_bm, stride_bn,\n # Random numbers\n R3, R2, R1, R0,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE: tl.constexpr\n):\n rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,\n stride_am=stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=stride_bn,\n R3=R3, R2=R2, R1=R1, R0=R0, allow_tf32=False,\n BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,\n GROUP_SIZE=GROUP_SIZE)\n\n\[email protected](\n configs=[\n # basic configs for compute-bound matmuls\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=8),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=8),\n triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),\n ], key=['M', 'N', 'K'],\n)\[email protected]\ndef rz_linear_backward_weight_grad_kernel_tf32(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K, H,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension.\n stride_am, stride_ak,\n stride_bm, stride_bn,\n # Random numbers\n R3, R2, R1, R0,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE: tl.constexpr\n):\n rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,\n stride_am=stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=stride_bn,\n R3=R3, R2=R2, R1=R1, R0=R0, allow_tf32=True,\n BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,\n GROUP_SIZE=GROUP_SIZE)\n\n\[email protected]\ndef rz_linear_backward_weight_grad_kernel_notune(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K, H,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension.\n stride_am, stride_ak,\n stride_bm, stride_bn,\n # Random numbers\n R3, R2, R1, R0,\n allow_tf32: tl.constexpr,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE: tl.constexpr\n):\n rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,\n stride_am=stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=stride_bn,\n R3=R3, R2=R2, R1=R1, R0=R0, allow_tf32=allow_tf32,\n BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,\n GROUP_SIZE=GROUP_SIZE)\n\n\[email protected]\ndef rz_linear_backward_weight_grad_core(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K, H,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension.\n stride_am, stride_ak,\n stride_bm, stride_bn,\n # Random numbers\n R3, R2, R1, R0,\n allow_tf32: tl.constexpr,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE: tl.constexpr\n):\n \"\"\"Kernel for computing the matmul C = A^T x B.\n A has shape (M, K), B has shape (M, N) and C has shape (K, N)\n \"\"\"\n pid = tl.program_id(axis=0)\n num_pid_k = tl.cdiv(K, BLOCK_SIZE_K)\n num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)\n num_pid_in_group = GROUP_SIZE * num_pid_n\n group_id = pid // num_pid_in_group\n first_pid_k = group_id * GROUP_SIZE\n group_size_k = min(num_pid_k - first_pid_k, GROUP_SIZE)\n pid_k = first_pid_k + (pid % group_size_k)\n pid_n = (pid % num_pid_in_group) // group_size_k\n\n # [BLOCK_SIZE_K, BLOCK_SIZE_M]\n offs_ak = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)\n offs_am = tl.arange(0, BLOCK_SIZE_M)\n a_ptrs = a_ptr + offs_ak[:, None] * \\\n stride_am + offs_am[None, :] * stride_ak\n\n # [BLOCK_SIZE_M, BLOCK_SIZE_N]\n offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)\n offs_bm = tl.arange(0, BLOCK_SIZE_M)\n b_ptrs = b_ptr + offs_bm[:, None] * \\\n stride_bm + offs_bn[None, :] * stride_bn\n\n # [BLOCK_SIZE_K, BLOCK_SIZE_N]\n c = tl.zeros((BLOCK_SIZE_K, BLOCK_SIZE_N), dtype=tl.float32)\n for _ in range(0, M//BLOCK_SIZE_M):\n # Note that for simplicity, we don't apply a mask here.\n # This means that if M is not a multiple of BLOCK_SIZE_M,\n # this will access out-of-bounds memory and produce an\n # error or (worse!) incorrect results.\n # TODO(Keren): Add M checks\n a = tl.load(a_ptrs)\n b = tl.load(b_ptrs)\n # We accumulate along the M dimension\n c += tl.dot(a, b, allow_tf32=allow_tf32)\n # Advance the ptrs to the next M block\n a_ptrs += BLOCK_SIZE_M * stride_ak\n b_ptrs += BLOCK_SIZE_M * stride_bm\n\n # -----------------------------------------------------------\n # Write back the block of the output matrix C\n c_offset = c_ptr + tl.arange(0, BLOCK_SIZE_K)[:, None] * \\\n BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)[None, :]\n c_ptrs = c_offset + (pid_k * R3 + pid_n * R2 +\n R1) % R0 % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)\n tl.atomic_add(c_ptrs, c)\n\n\ndef rz_linear_backward_input_grad_tl(output_grad: torch.tensor, hashed_weight: torch.tensor,\n M: int, K: int, N: int, H: int,\n R3: int, R2: int, R1: int, R0: int,\n allow_tf32: bool = True, allow_autotune: bool = True,\n BLOCK_SIZE_M: int = 64, BLOCK_SIZE_N: int = 64, BLOCK_SIZE_K: int = 32,\n GROUP_SIZE: int = 4) -> torch.tensor:\n '''\n Compute output_grad x hashed_weight^T and return an input_grad tensor\n\n Args:\n output_grad (Tensor): A MxN tensor\n hashed_weight (Tensor): A 1xH (KxN) tensor\n M, K, N, H (int): matrix dimensions\n R3, R2, R1, R0 (int): random numbers\n allow_tf32 (bool): If tensor core is allowed\n BLOCK_SIZE_M, BLOCK_SIZE_N, BLOCK_SIZE_K, GROUP_SIZE: Matrix tiling parameters for performance tunning\n\n Returns:\n input_grad (Tensor): A MxK tensor\n '''\n assert (M % 4 == 0)\n assert (K % 4 == 0)\n # allocates output\n input_grad = torch.empty(\n (M, K), device=output_grad.device, dtype=output_grad.dtype)\n\n if allow_tf32:\n assert (N % 32 == 0)\n else:\n assert (N % 8 == 0)\n\n # 1D launch kernel where each block gets its own program.\n def grid(META): return (\n triton.cdiv(M, META['BLOCK_SIZE_M']) *\n triton.cdiv(K, META['BLOCK_SIZE_K']),\n )\n\n if allow_autotune:\n if allow_tf32:\n rz_linear_backward_input_grad_kernel_tf32[grid](\n output_grad, hashed_weight, input_grad,\n M, N, K, H,\n output_grad.stride(0), output_grad.stride(1),\n input_grad.stride(0), input_grad.stride(1),\n R3=R3, R2=R2, R1=R1, R0=R0,\n GROUP_SIZE=GROUP_SIZE\n )\n else:\n rz_linear_backward_input_grad_kernel_fp32[grid](\n output_grad, hashed_weight, input_grad,\n M, N, K, H,\n output_grad.stride(0), output_grad.stride(1),\n input_grad.stride(0), input_grad.stride(1),\n R3=R3, R2=R2, R1=R1, R0=R0,\n GROUP_SIZE=GROUP_SIZE\n )\n else:\n rz_linear_backward_input_grad_kernel_notune[grid](\n output_grad, hashed_weight, input_grad,\n M, N, K, H,\n output_grad.stride(0), output_grad.stride(1),\n input_grad.stride(0), input_grad.stride(1),\n R3=R3, R2=R2, R1=R1, R0=R0,\n allow_tf32=allow_tf32,\n num_warps=4,\n num_stages=3,\n BLOCK_SIZE_M=BLOCK_SIZE_M,\n BLOCK_SIZE_N=BLOCK_SIZE_N,\n BLOCK_SIZE_K=BLOCK_SIZE_K,\n GROUP_SIZE=GROUP_SIZE\n )\n return input_grad\n\n\[email protected](\n configs=[\n # basic configs for compute-bound matmuls\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 32,\n 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 32,\n 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),\n ],\n key=['M', 'N', 'K'],\n)\[email protected]\ndef rz_linear_backward_input_grad_kernel_fp32(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K, H,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension.\n stride_am, stride_an,\n stride_cm, stride_ck,\n # Random numbers\n R3, R2, R1, R0,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE: tl.constexpr\n):\n rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr,\n M=M, N=N, K=K, H=H,\n stride_am=stride_am, stride_an=stride_an,\n stride_cm=stride_cm, stride_ck=stride_ck,\n R3=R3, R2=R2, R1=R1, R0=R0,\n allow_tf32=False,\n BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,\n GROUP_SIZE=GROUP_SIZE)\n\n\[email protected](\n configs=[\n # basic configs for compute-bound matmuls\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,\n 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,\n 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,\n 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),\n ], key=['M', 'N', 'K'],\n)\[email protected]\ndef rz_linear_backward_input_grad_kernel_tf32(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K, H,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension.\n stride_am, stride_an,\n stride_cm, stride_ck,\n # Random numbers\n R3, R2, R1, R0,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE: tl.constexpr\n):\n rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr,\n M=M, N=N, K=K, H=H,\n stride_am=stride_am, stride_an=stride_an,\n stride_cm=stride_cm, stride_ck=stride_ck,\n R3=R3, R2=R2, R1=R1, R0=R0,\n allow_tf32=True,\n BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,\n GROUP_SIZE=GROUP_SIZE)\n\n\[email protected]\ndef rz_linear_backward_input_grad_kernel_notune(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K, H,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension.\n stride_am, stride_an,\n stride_cm, stride_ck,\n # Random numbers\n R3, R2, R1, R0,\n allow_tf32: tl.constexpr,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE: tl.constexpr\n):\n rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr,\n M=M, N=N, K=K, H=H,\n stride_am=stride_am, stride_an=stride_an,\n stride_cm=stride_cm, stride_ck=stride_ck,\n R3=R3, R2=R2, R1=R1, R0=R0,\n allow_tf32=allow_tf32,\n BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,\n GROUP_SIZE=GROUP_SIZE)\n\n\[email protected]\ndef rz_linear_backward_input_grad_core(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K, H,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension.\n stride_am, stride_an,\n stride_cm, stride_ck,\n # Random numbers\n R3, R2, R1, R0,\n allow_tf32: tl.constexpr,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE: tl.constexpr\n):\n \"\"\"Kernel for computing the matmul C = (A x B^T)\n A has shape (M, N), B has shape H->(K, N) and C has shape (M, K)\n \"\"\"\n pid = tl.program_id(axis=0)\n num_pid_k = tl.cdiv(K, BLOCK_SIZE_K)\n num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)\n pid_m = pid // num_pid_k\n pid_k = pid % num_pid_k\n\n # [BLOCK_SIZE_M, BLOCK_SIZE_N]\n offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n offs_an = tl.arange(0, BLOCK_SIZE_N)\n a_ptrs = a_ptr + offs_am[:, None] * \\\n stride_am + offs_an[None, :] * stride_an\n\n # [BLOCK_SIZE_N, BLOCK_SIZE_K]\n # Compute hash\n b_offset = b_ptr + \\\n tl.arange(0, BLOCK_SIZE_N)[\n :, None] + tl.arange(0, BLOCK_SIZE_K)[None, :] * BLOCK_SIZE_N\n b_ptrs = b_offset + (pid_k * R3 + 0 * R2 +\n R1) % R0 % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)\n\n # [BLOCK_SIZE_M, BLOCK_SIZE_K]\n c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_K), dtype=tl.float32)\n for n in range(0, N//BLOCK_SIZE_N):\n # Note that for simplicity, we don't apply a mask here.\n # This means that if N is not a multiple of BLOCK_SIZE_N,\n # this will access out-of-bounds memory and produce an\n # error or (worse!) incorrect results.\n # TODO(Keren): Add N checks\n a = tl.load(a_ptrs)\n b = tl.load(b_ptrs)\n # We accumulate along the N dimension\n c += tl.dot(a, b, allow_tf32=allow_tf32)\n # Advance the ptrs to the next N block\n a_ptrs += BLOCK_SIZE_N * stride_an\n b_ptrs = b_offset + (pid_k * R3 + (n + 1) * R2 +\n R1) % R0 % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)\n\n # -----------------------------------------------------------\n # Write back the block of the output matrix C\n # [BLOCK_SIZE_M, BLOCK_SIZE_K]\n offs_ck = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)\n offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n c_ptrs = c_ptr + stride_cm * \\\n offs_cm[:, None] + stride_ck * offs_ck[None, :]\n c_mask = (offs_cm[:, None] < M) & (offs_ck[None, :] < K)\n tl.store(c_ptrs, c, mask=c_mask)\n"
] | [
[
"torch.zeros",
"torch.empty"
]
] |
cdgreenidge/gpytorch | [
"d4cc610963bd812052e43e3aed84fb8b2ec94aa6"
] | [
"test/lazy/test_added_diag_lazy_tensor.py"
] | [
"#!/usr/bin/env python3\n\nimport torch\nimport unittest\nfrom gpytorch.lazy import NonLazyTensor, DiagLazyTensor, AddedDiagLazyTensor\nfrom test.lazy._lazy_tensor_test_case import LazyTensorTestCase\n\n\nclass TestAddedDiagLazyTensor(LazyTensorTestCase, unittest.TestCase):\n seed = 0\n should_test_sample = True\n\n def create_lazy_tensor(self):\n tensor = torch.randn(5, 5)\n tensor = tensor.transpose(-1, -2).matmul(tensor)\n tensor.requires_grad_(True)\n diag = torch.tensor([1.0, 2.0, 4.0, 2.0, 3.0], requires_grad=True)\n return AddedDiagLazyTensor(NonLazyTensor(tensor), DiagLazyTensor(diag))\n\n def evaluate_lazy_tensor(self, lazy_tensor):\n diag = lazy_tensor._diag_tensor._diag\n tensor = lazy_tensor._lazy_tensor.tensor\n return tensor + diag.diag()\n\n\nclass TestAddedDiagLazyTensorBatch(LazyTensorTestCase, unittest.TestCase):\n seed = 4\n should_test_sample = True\n\n def create_lazy_tensor(self):\n tensor = torch.randn(3, 5, 5)\n tensor = tensor.transpose(-1, -2).matmul(tensor)\n tensor.requires_grad_(True)\n diag = torch.tensor(\n [[1.0, 2.0, 4.0, 2.0, 3.0], [2.0, 1.0, 2.0, 1.0, 4.0], [1.0, 2.0, 2.0, 3.0, 4.0]], requires_grad=True\n )\n return AddedDiagLazyTensor(NonLazyTensor(tensor), DiagLazyTensor(diag))\n\n def evaluate_lazy_tensor(self, lazy_tensor):\n diag = lazy_tensor._diag_tensor._diag\n tensor = lazy_tensor._lazy_tensor.tensor\n return tensor + torch.cat([diag[i].diag().unsqueeze(0) for i in range(3)])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"torch.randn",
"torch.tensor"
]
] |
UKPLab/emnlp2019-dualgraph | [
"0c58fb7f3ad3b9da3b92b2d2841558807fc79fd0"
] | [
"onmt/models/model_saver.py"
] | [
"import os\nimport torch\nimport torch.nn as nn\n\nfrom collections import deque\nfrom onmt.utils.logging import logger\n\nfrom copy import deepcopy\n\n\ndef build_model_saver(model_opt, opt, model, fields, optim):\n model_saver = ModelSaver(opt.save_model,\n model,\n model_opt,\n fields,\n optim,\n opt.keep_checkpoint)\n return model_saver\n\n\nclass ModelSaverBase(object):\n \"\"\"Base class for model saving operations\n\n Inherited classes must implement private methods:\n * `_save`\n * `_rm_checkpoint\n \"\"\"\n\n def __init__(self, base_path, model, model_opt, fields, optim,\n keep_checkpoint=-1):\n self.base_path = base_path\n self.model = model\n self.model_opt = model_opt\n self.fields = fields\n self.optim = optim\n self.last_saved_step = None\n self.keep_checkpoint = keep_checkpoint\n if keep_checkpoint > 0:\n self.checkpoint_queue = deque([], maxlen=keep_checkpoint)\n\n def save(self, step, moving_average=None):\n \"\"\"Main entry point for model saver\n\n It wraps the `_save` method with checks and apply `keep_checkpoint`\n related logic\n \"\"\"\n\n if self.keep_checkpoint == 0 or step == self.last_saved_step:\n return\n\n if moving_average:\n save_model = deepcopy(self.model)\n for avg, param in zip(moving_average, save_model.parameters()):\n param.data.copy_(avg.data)\n else:\n save_model = self.model\n\n chkpt, chkpt_name = self._save(step, save_model)\n self.last_saved_step = step\n\n if moving_average:\n del save_model\n\n if self.keep_checkpoint > 0:\n if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:\n todel = self.checkpoint_queue.popleft()\n self._rm_checkpoint(todel)\n self.checkpoint_queue.append(chkpt_name)\n\n def _save(self, step):\n \"\"\"Save a resumable checkpoint.\n\n Args:\n step (int): step number\n\n Returns:\n (object, str):\n\n * checkpoint: the saved object\n * checkpoint_name: name (or path) of the saved checkpoint\n \"\"\"\n\n raise NotImplementedError()\n\n def _rm_checkpoint(self, name):\n \"\"\"Remove a checkpoint\n\n Args:\n name(str): name that indentifies the checkpoint\n (it may be a filepath)\n \"\"\"\n\n raise NotImplementedError()\n\n\nclass ModelSaver(ModelSaverBase):\n \"\"\"Simple model saver to filesystem\"\"\"\n\n def _save(self, step, model):\n real_model = (model.module\n if isinstance(model, nn.DataParallel)\n else model)\n real_generator = (real_model.generator.module\n if isinstance(real_model.generator, nn.DataParallel)\n else real_model.generator)\n\n model_state_dict = real_model.state_dict()\n model_state_dict = {k: v for k, v in model_state_dict.items()\n if 'generator' not in k}\n generator_state_dict = real_generator.state_dict()\n\n # NOTE: We need to trim the vocab to remove any unk tokens that\n # were not originally here.\n\n vocab = deepcopy(self.fields)\n\n if hasattr(model.encoder, 'is_graph_encoder'):\n sides = [\"src\", \"node1\", \"node2\", \"tgt\"]\n else:\n sides = [\"src\", \"tgt\"]\n\n for side in sides:\n keys_to_pop = []\n if hasattr(vocab[side], \"fields\"):\n unk_token = vocab[side].fields[0][1].vocab.itos[0]\n for key, value in vocab[side].fields[0][1].vocab.stoi.items():\n if value == 0 and key != unk_token:\n keys_to_pop.append(key)\n for key in keys_to_pop:\n vocab[side].fields[0][1].vocab.stoi.pop(key, None)\n\n checkpoint = {\n 'model': model_state_dict,\n 'generator': generator_state_dict,\n 'vocab': vocab,\n 'opt': self.model_opt,\n 'optim': self.optim.state_dict(),\n }\n\n logger.info(\"Saving checkpoint %s_step_%d.pt\" % (self.base_path, step))\n checkpoint_path = '%s_step_%d.pt' % (self.base_path, step)\n torch.save(checkpoint, checkpoint_path)\n return checkpoint, checkpoint_path\n\n def _rm_checkpoint(self, name):\n os.remove(name)\n"
] | [
[
"torch.save"
]
] |
hsk9767/mesh_rcnn_copy | [
"6dd4d9ea8af33c03a084e34c7d16eeaddfe924ae"
] | [
"meshrcnn/modeling/roi_heads/roi_heads.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Dict\nimport torch\nfrom detectron2.layers import ShapeSpec, cat\nfrom detectron2.modeling import ROI_HEADS_REGISTRY\nfrom detectron2.modeling.poolers import ROIPooler\nfrom detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs\nfrom detectron2.modeling.roi_heads.roi_heads import StandardROIHeads, select_foreground_proposals\nfrom pytorch3d.ops import cubify\nfrom pytorch3d.structures import Meshes\nfrom pytorch3d.utils import ico_sphere\n\nfrom meshrcnn.modeling.roi_heads.mask_head import mask_rcnn_loss\nfrom meshrcnn.modeling.roi_heads.mesh_head import (\n build_mesh_head,\n mesh_rcnn_inference,\n mesh_rcnn_loss,\n)\nfrom meshrcnn.modeling.roi_heads.voxel_head import (\n build_voxel_head,\n voxel_rcnn_inference,\n voxel_rcnn_loss,\n)\nfrom meshrcnn.modeling.roi_heads.z_head import build_z_head, z_rcnn_inference, z_rcnn_loss\nfrom meshrcnn.utils import vis as vis_utils\n\n\n@ROI_HEADS_REGISTRY.register()\nclass MeshRCNNROIHeads(StandardROIHeads):\n \"\"\"\n The ROI specific heads for Mesh R-CNN\n \"\"\"\n\n def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):\n super().__init__(cfg, input_shape)\n self._init_z_head(cfg, input_shape)\n self._init_voxel_head(cfg, input_shape)\n self._init_mesh_head(cfg, input_shape)\n # If MODEL.VIS_MINIBATCH is True we store minibatch targets\n # for visualization purposes\n self._vis = cfg.MODEL.VIS_MINIBATCH\n self._misc = {}\n self._vis_dir = cfg.OUTPUT_DIR\n\n def _init_z_head(self, cfg, input_shape):\n # fmt: off\n self.zpred_on = cfg.MODEL.ZPRED_ON\n if not self.zpred_on:\n return\n z_pooler_resolution = cfg.MODEL.ROI_Z_HEAD.POOLER_RESOLUTION\n z_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)\n z_sampling_ratio = cfg.MODEL.ROI_Z_HEAD.POOLER_SAMPLING_RATIO\n z_pooler_type = cfg.MODEL.ROI_Z_HEAD.POOLER_TYPE\n # fmt: on\n\n self.z_loss_weight = cfg.MODEL.ROI_Z_HEAD.Z_REG_WEIGHT\n self.z_smooth_l1_beta = cfg.MODEL.ROI_Z_HEAD.SMOOTH_L1_BETA\n\n in_channels = [input_shape[f].channels for f in self.in_features][0]\n\n self.z_pooler = ROIPooler(\n output_size=z_pooler_resolution,\n scales=z_pooler_scales,\n sampling_ratio=z_sampling_ratio,\n pooler_type=z_pooler_type,\n )\n shape = ShapeSpec(\n channels=in_channels, width=z_pooler_resolution, height=z_pooler_resolution\n )\n self.z_head = build_z_head(cfg, shape)\n\n def _init_voxel_head(self, cfg, input_shape):\n # fmt: off\n self.voxel_on = cfg.MODEL.VOXEL_ON\n if not self.voxel_on:\n return\n voxel_pooler_resolution = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_RESOLUTION\n voxel_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)\n voxel_sampling_ratio = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_SAMPLING_RATIO\n voxel_pooler_type = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_TYPE\n # fmt: on\n\n self.voxel_loss_weight = cfg.MODEL.ROI_VOXEL_HEAD.LOSS_WEIGHT\n self.cls_agnostic_voxel = cfg.MODEL.ROI_VOXEL_HEAD.CLS_AGNOSTIC_VOXEL\n self.cubify_thresh = cfg.MODEL.ROI_VOXEL_HEAD.CUBIFY_THRESH\n\n in_channels = [input_shape[f].channels for f in self.in_features][0]\n\n self.voxel_pooler = ROIPooler(\n output_size=voxel_pooler_resolution,\n scales=voxel_pooler_scales,\n sampling_ratio=voxel_sampling_ratio,\n pooler_type=voxel_pooler_type,\n )\n shape = ShapeSpec(\n channels=in_channels, width=voxel_pooler_resolution, height=voxel_pooler_resolution\n )\n self.voxel_head = build_voxel_head(cfg, shape)\n\n def _init_mesh_head(self, cfg, input_shape):\n # fmt: off\n self.mesh_on = cfg.MODEL.MESH_ON\n if not self.mesh_on:\n return\n mesh_pooler_resolution = cfg.MODEL.ROI_MESH_HEAD.POOLER_RESOLUTION\n mesh_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)\n mesh_sampling_ratio = cfg.MODEL.ROI_MESH_HEAD.POOLER_SAMPLING_RATIO\n mesh_pooler_type = cfg.MODEL.ROI_MESH_HEAD.POOLER_TYPE\n # fmt: on\n\n self.chamfer_loss_weight = cfg.MODEL.ROI_MESH_HEAD.CHAMFER_LOSS_WEIGHT\n self.normals_loss_weight = cfg.MODEL.ROI_MESH_HEAD.NORMALS_LOSS_WEIGHT\n self.edge_loss_weight = cfg.MODEL.ROI_MESH_HEAD.EDGE_LOSS_WEIGHT\n self.gt_num_samples = cfg.MODEL.ROI_MESH_HEAD.GT_NUM_SAMPLES\n self.pred_num_samples = cfg.MODEL.ROI_MESH_HEAD.PRED_NUM_SAMPLES\n self.gt_coord_thresh = cfg.MODEL.ROI_MESH_HEAD.GT_COORD_THRESH\n self.ico_sphere_level = cfg.MODEL.ROI_MESH_HEAD.ICO_SPHERE_LEVEL\n\n in_channels = [input_shape[f].channels for f in self.in_features][0]\n\n self.mesh_pooler = ROIPooler(\n output_size=mesh_pooler_resolution,\n scales=mesh_pooler_scales,\n sampling_ratio=mesh_sampling_ratio,\n pooler_type=mesh_pooler_type,\n )\n self.mesh_head = build_mesh_head(\n cfg,\n ShapeSpec(\n channels=in_channels, height=mesh_pooler_resolution, width=mesh_pooler_resolution\n ),\n )\n\n def forward(self, images, features, proposals, targets=None):\n \"\"\"\n See :class:`ROIHeads.forward`.\n \"\"\"\n if self._vis:\n self._misc[\"images\"] = images\n del images\n\n if self.training:\n proposals = self.label_and_sample_proposals(proposals, targets)\n del targets\n\n if self._vis:\n self._misc[\"proposals\"] = proposals\n\n if self.training:\n losses = self._forward_box(features, proposals)\n # During training the proposals used by the box head are\n # used by the z, mask, voxel & mesh head.\n losses.update(self._forward_z(features, proposals))\n losses.update(self._forward_mask(features, proposals))\n losses.update(self._forward_shape(features, proposals))\n # print minibatch examples\n if self._vis:\n vis_utils.visualize_minibatch(self._misc[\"images\"], self._misc, self._vis_dir, True)\n\n return [], losses\n else:\n pred_instances = self._forward_box(features, proposals)\n # During inference cascaded prediction is used: the mask and keypoints heads are only\n # applied to the top scoring box detections.\n pred_instances = self.forward_with_given_boxes(features, pred_instances)\n return pred_instances, {}\n\n def forward_with_given_boxes(self, features, instances):\n \"\"\"\n Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.\n\n Args:\n features: same as in `forward()`\n instances (list[Instances]): instances to predict other outputs. Expect the keys\n \"pred_boxes\" and \"pred_classes\" to exist.\n\n Returns:\n instances (Instances): the same `Instances` object, with extra\n fields such as `pred_masks` or `pred_voxels`.\n \"\"\"\n assert not self.training\n assert instances[0].has(\"pred_boxes\") and instances[0].has(\"pred_classes\")\n\n instances = self._forward_z(features, instances)\n instances = self._forward_mask(features, instances)\n instances = self._forward_shape(features, instances)\n return instances\n\n def _forward_z(self, features, instances):\n \"\"\"\n Forward logic of the z prediction branch.\n \"\"\"\n if not self.zpred_on:\n return {} if self.training else instances\n features = [features[f] for f in self.in_features]\n\n if self.training:\n # The loss is only defined on positive proposals.\n proposals, _ = select_foreground_proposals(instances, self.num_classes)\n proposal_boxes = [x.proposal_boxes for x in proposals]\n z_features = self.z_pooler(features, proposal_boxes)\n z_pred = self.z_head(z_features)\n src_boxes = cat([p.tensor for p in proposal_boxes])\n loss_z_reg = z_rcnn_loss(\n z_pred,\n proposals,\n src_boxes,\n loss_weight=self.z_loss_weight,\n smooth_l1_beta=self.z_smooth_l1_beta,\n )\n return {\"loss_z_reg\": loss_z_reg}\n else:\n pred_boxes = [x.pred_boxes for x in instances]\n z_features = self.z_pooler(features, pred_boxes)\n z_pred = self.z_head(z_features)\n z_rcnn_inference(z_pred, instances)\n return instances\n\n def _forward_mask(self, features, instances):\n \"\"\"\n Forward logic of the mask prediction branch.\n\n Args:\n features (dict[str,Tensor]): mapping from names to backbone features\n instances (list[Instances]): the per-image instances to train/predict masks.\n In training, they can be the proposals.\n In inference, they can be the predicted boxes.\n\n Returns:\n In training, a dict of losses.\n In inference, update `instances` with new fields \"pred_masks\" and return it.\n \"\"\"\n if not self.mask_on:\n return {} if self.training else instances\n\n features = [features[f] for f in self.in_features]\n\n if self.training:\n # The loss is only defined on positive proposals.\n proposals, _ = select_foreground_proposals(instances, self.num_classes)\n proposal_boxes = [x.proposal_boxes for x in proposals]\n mask_features = self.mask_pooler(features, proposal_boxes)\n mask_logits = self.mask_head.layers(mask_features)\n loss_mask, target_masks = mask_rcnn_loss(mask_logits, proposals)\n if self._vis:\n self._misc[\"target_masks\"] = target_masks\n self._misc[\"fg_proposals\"] = proposals\n return {\"loss_mask\": loss_mask}\n else:\n pred_boxes = [x.pred_boxes for x in instances]\n mask_features = self.mask_pooler(features, pred_boxes)\n return self.mask_head(mask_features, instances)\n\n def _forward_shape(self, features, instances):\n \"\"\"\n Forward logic for the voxel and mesh refinement branch.\n\n Args:\n features (list[Tensor]): #level input features for voxel prediction\n instances (list[Instances]): the per-image instances to train/predict meshes.\n In training, they can be the proposals.\n In inference, they can be the predicted boxes.\n Returns:\n In training, a dict of losses.\n In inference, update `instances` with new fields \"pred_voxels\" & \"pred_meshes\" and return it.\n \"\"\"\n if not self.voxel_on and not self.mesh_on:\n return {} if self.training else instances\n\n features = [features[f] for f in self.in_features]\n if self.training:\n # The loss is only defined on positive proposals.\n proposals, _ = select_foreground_proposals(instances, self.num_classes)\n proposal_boxes = [x.proposal_boxes for x in proposals]\n\n losses = {}\n if self.voxel_on:\n voxel_features = self.voxel_pooler(features, proposal_boxes)\n voxel_logits = self.voxel_head(voxel_features)\n loss_voxel, target_voxels = voxel_rcnn_loss(\n voxel_logits, proposals, loss_weight=self.voxel_loss_weight\n )\n losses.update({\"loss_voxel\": loss_voxel})\n if self._vis:\n self._misc[\"target_voxels\"] = target_voxels\n if self.cls_agnostic_voxel:\n with torch.no_grad():\n vox_in = voxel_logits.sigmoid().squeeze(1) # (N, V, V, V)\n init_mesh = cubify(vox_in, self.cubify_thresh) # 1\n else:\n raise ValueError(\"No support for class specific predictions\")\n\n if self.mesh_on:\n mesh_features = self.mesh_pooler(features, proposal_boxes)\n if not self.voxel_on:\n if mesh_features.shape[0] > 0:\n init_mesh = ico_sphere(self.ico_sphere_level, mesh_features.device)\n init_mesh = init_mesh.extend(mesh_features.shape[0])\n else:\n init_mesh = Meshes(verts=[], faces=[])\n pred_meshes = self.mesh_head(mesh_features, init_mesh)\n\n # loss weights\n loss_weights = {\n \"chamfer\": self.chamfer_loss_weight,\n \"normals\": self.normals_loss_weight,\n \"edge\": self.edge_loss_weight,\n }\n\n if not pred_meshes[0].isempty():\n loss_chamfer, loss_normals, loss_edge, target_meshes = mesh_rcnn_loss(\n pred_meshes,\n proposals,\n loss_weights=loss_weights,\n gt_num_samples=self.gt_num_samples,\n pred_num_samples=self.pred_num_samples,\n gt_coord_thresh=self.gt_coord_thresh,\n )\n if self._vis:\n self._misc[\"init_meshes\"] = init_mesh\n self._misc[\"target_meshes\"] = target_meshes\n else:\n loss_chamfer = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0\n loss_normals = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0\n loss_edge = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0\n\n losses.update(\n {\n \"loss_chamfer\": loss_chamfer,\n \"loss_normals\": loss_normals,\n \"loss_edge\": loss_edge,\n }\n )\n\n return losses\n else:\n pred_boxes = [x.pred_boxes for x in instances]\n\n if self.voxel_on:\n voxel_features = self.voxel_pooler(features, pred_boxes)\n voxel_logits = self.voxel_head(voxel_features)\n voxel_rcnn_inference(voxel_logits, instances)\n if self.cls_agnostic_voxel:\n with torch.no_grad():\n vox_in = voxel_logits.sigmoid().squeeze(1) # (N, V, V, V)\n init_mesh = cubify(vox_in, self.cubify_thresh) # 1\n else:\n raise ValueError(\"No support for class specific predictions\")\n\n if self.mesh_on:\n mesh_features = self.mesh_pooler(features, pred_boxes)\n if not self.voxel_on:\n if mesh_features.shape[0] > 0:\n init_mesh = ico_sphere(self.ico_sphere_level, mesh_features.device)\n init_mesh = init_mesh.extend(mesh_features.shape[0])\n else:\n init_mesh = Meshes(verts=[], faces=[])\n pred_meshes = self.mesh_head(mesh_features, init_mesh)\n mesh_rcnn_inference(pred_meshes[-1], instances)\n else:\n assert self.voxel_on\n mesh_rcnn_inference(init_mesh, instances)\n\n return instances\n"
] | [
[
"torch.no_grad"
]
] |
J-Massey/postproc | [
"4552b0ad79072f5d217cf62632c08617ea3d2d82"
] | [
"circular_cylinder/figures/plot.py"
] | [
"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom itertools import product\nimport os\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib import ticker, cm\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib.ticker import FormatStrFormatter\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom matplotlib.colors import BoundaryNorm\n\n\ncolors = sns.color_palette(\"husl\", 4)\nplt.style.use(['science', 'grid'])\n\n\ndef plot_loss(epochs, cost, fn='cost.pdf'):\n fig, ax = plt.subplots(figsize=(5, 3))\n ax.tick_params(bottom=\"on\", top=\"on\", right=\"on\", which='both', direction='in', length=2)\n ax.set_xlabel(r\"Epochs\")\n ax.set_ylabel(r'$L_2$ loss')\n ax.plot_fill(np.linspace(0, epochs, len(cost)), cost, label=r'$L_{2}$')\n ax.legend()\n plt.savefig(fn)\n plt.show()\n\n\ndef plot_model(cd_hat, fos, Y, fn='model.pdf'):\n fig, ax = plt.subplots(figsize=(5, 3))\n ax.tick_params(bottom=\"on\", top=\"on\", right=\"on\", which='both', direction='in', length=2)\n ax.set_xlabel(r\"$t/D$\")\n ax.set_ylabel(r'$C_{D_f}$')\n ax.plot_fill(fos['t'], Y, label=r'Ground truth')\n ax.plot_fill(fos['t'], cd_hat, label=r'$\\hat{C_{D_f}}$')\n ax.legend()\n plt.savefig(fn)\n plt.show()\n\n\ndef plot_BL_corruption():\n fig, ax = plt.subplots(figsize=(5, 5))\n ax.set_xlabel(r'$x_n$')\n ax.set_ylabel(r'$y_n$', rotation=0)\n # Define grid\n D = 32\n eps = 2\n r = D / 2\n x, y = np.arange(-D, D + 1, 1), np.arange(-D, D + 1, 1)\n X, Y = np.meshgrid(x, y)\n\n # Body coordinates\n theta = np.linspace(0, 2 * np.pi, int(D * np.pi))\n\n Bx, By = r * np.cos(theta), r * np.sin(theta)\n ax.plot_fill(Bx, By, color='k', linewidth=2., label=r'Hard body boundary')\n\n Bepx, Bepy = (r + eps) * np.cos(theta), (r + eps) * np.sin(theta)\n ax.plot_fill(Bepx, Bepy, c='blue', linewidth=0.5, label=r'$D+\\epsilon$')\n\n # Distance function from eps away from body edge\n dis = np.sqrt(X ** 2 + Y ** 2)\n\n # Cmap definition\n bs = iter((np.array([14, 15.8, 18.7, 22]) - 4.5) / D)\n colours = [(0, 'midnightblue'),\n (next(bs), 'midnightblue'),\n (next(bs), 'red'),\n (next(bs), 'green'),\n (next(bs), 'royalblue'),\n (1, 'royalblue')]\n cmap = LinearSegmentedColormap.from_list('corruption', colours, 256)\n\n cs = ax.imshow(dis, zorder=0, aspect=\"auto\", extent=(-D, D, -D, D),\n cmap=cmap, interpolation='bicubic')\n make_axes_locatable(ax)\n divider = make_axes_locatable(ax)\n ax_cb = divider.new_horizontal(size=\"5%\", pad=0.05)\n fig.add_axes(ax_cb)\n cbar = plt.colorbar(cs, cax=ax_cb, ticks=[8, 16.4, 21, 32], extend='max')\n # ax_cb.yaxis.tick_right()\n cbar.ax.set_yticklabels([r'$\\vec{b}$', r'$\\vec{b}*\\vec{f}$', r'$d|_{n \\approx 0}$', r'$\\vec{f}$'])\n cbar.ax.tick_params(which='both', size=0)\n ax.legend()\n plt.savefig('../figures/bl_corruption.pdf', dpi=300)\n plt.close()\n\n\ndef plot_pressure():\n data_root = '/home/masseyjmo/Workspace/Lotus/projects/cylinder_dns/validation'\n p = np.loadtxt(os.path.join(data_root, 'fort.10'), unpack=True)\n p = np.mean(p, axis=1)\n\n fig, ax = plt.subplots(figsize=(4, 4))\n ax.tick_params(bottom=\"on\", top=\"on\", right=\"on\", which='both', direction='in', length=2)\n ax.set_xlabel(r\"$\\theta$\")\n ax.set_ylabel(r'$C_{p}$')\n ax.scatter(np.linspace(0, np.pi / 2, len(p)), p * 2, label=r'Pressure distribution', color='k', marker='+')\n ax.set_ylim(-2, 1)\n ax.legend()\n plt.savefig('pressure_theta.pdf')\n plt.show()\n\n\nif __name__ == \"__main__\":\n plot_BL_corruption()\n"
] | [
[
"matplotlib.pyplot.style.use",
"numpy.sin",
"matplotlib.pyplot.savefig",
"numpy.cos",
"matplotlib.pyplot.subplots",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"numpy.sqrt",
"matplotlib.pyplot.colorbar",
"numpy.meshgrid",
"numpy.array",
"numpy.mean"
]
] |
zhengzx-nlp/REDER | [
"7035e089e4d30b8090a2c3caa937b1e0ba27cedc"
] | [
"fairseq/modules/fairseq_dropout.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nfrom typing import List, Optional\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass FairseqDropout(nn.Module):\n\n def __init__(self, p, module_name=None):\n super().__init__()\n self.p = p\n self.module_name = module_name\n self.apply_during_inference = False\n\n def forward(self, x, inplace: bool = False):\n if self.training or self.apply_during_inference:\n return F.dropout(x, p=self.p, training=True, inplace=inplace)\n else:\n return x\n\n def extra_repr(self) -> str:\n return 'p={}'.format(self.p)\n\n def make_generation_fast_(\n self,\n name: str,\n retain_dropout: bool = False,\n retain_dropout_modules: Optional[List[str]] = None,\n **kwargs\n ):\n if retain_dropout:\n if retain_dropout_modules is not None and self.module_name is None:\n logger.warning(\n 'Cannot enable dropout during inference for module {} '\n 'because module_name was not set'.format(name)\n )\n elif (\n retain_dropout_modules is None # if None, apply to all modules\n or self.module_name in retain_dropout_modules\n ):\n logger.info(\n 'Enabling dropout during inference for module: {}'.format(name)\n )\n self.apply_during_inference = True\n else:\n logger.info('Disabling dropout for module: {}'.format(name))\n"
] | [
[
"torch.nn.functional.dropout"
]
] |
EricPedley/FCRN-DepthPrediction | [
"93aaed329e9e071c6d5c5a59e77a73a09684b156"
] | [
"tensorflow/network.py"
] | [
"import numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\n# ----------------------------------------------------------------------------------\n# Commonly used layers and operations based on ethereon's implementation \n# https://github.com/ethereon/caffe-tensorflow\n# Slight modifications may apply. FCRN-specific operations have also been appended. \n# ----------------------------------------------------------------------------------\n# Thanks to *Helisa Dhamo* for the model conversion and integration into TensorFlow.\n# ----------------------------------------------------------------------------------\n\nDEFAULT_PADDING = 'SAME'\n\n\ndef get_incoming_shape(incoming):\n \"\"\" Returns the incoming data shape \"\"\"\n if isinstance(incoming, tf.Tensor):\n return incoming.get_shape().as_list()\n elif type(incoming) in [np.array, list, tuple]:\n return np.shape(incoming)\n else:\n raise Exception(\"Invalid incoming layer.\")\n\n\ndef interleave(tensors, axis):\n old_shape = get_incoming_shape(tensors[0])[1:]\n new_shape = [-1] + old_shape\n new_shape[axis] *= len(tensors)\n return tf.reshape(tf.stack(tensors, axis + 1), new_shape)\n\ndef layer(op):\n '''Decorator for composable network layers.'''\n\n def layer_decorated(self, *args, **kwargs):\n # Automatically set a name if not provided.\n name = kwargs.setdefault('name', self.get_unique_name(op.__name__))\n\n # Figure out the layer inputs.\n if len(self.terminals) == 0:\n raise RuntimeError('No input variables found for layer %s.' % name)\n elif len(self.terminals) == 1:\n layer_input = self.terminals[0]\n else:\n layer_input = list(self.terminals)\n # Perform the operation and get the output.\n layer_output = op(self, layer_input, *args, **kwargs)\n # Add to layer LUT.\n self.layers[name] = layer_output\n # This output is now the input for the next layer.\n self.feed(layer_output)\n # Return self for chained calls.\n return self\n\n return layer_decorated\n\n\nclass Network(object):\n\n def __init__(self, inputs, batch, keep_prob, is_training, trainable = True):\n # The input nodes for this network\n self.inputs = inputs\n # The current list of terminal nodes\n self.terminals = []\n # Mapping from layer names to layers\n self.layers = dict(inputs)\n # If true, the resulting variables are set as trainable\n self.trainable = trainable\n self.batch_size = batch\n self.keep_prob = keep_prob\n self.is_training = is_training\n self.setup()\n\n\n def setup(self):\n '''Construct the network. '''\n raise NotImplementedError('Must be implemented by the subclass.')\n\n def load(self, data_path, session, ignore_missing=False):\n '''Load network weights.\n data_path: The path to the numpy-serialized network weights\n session: The current TensorFlow session\n ignore_missing: If true, serialized weights for missing layers are ignored.\n '''\n data_dict = np.load(data_path, encoding='latin1').item()\n for op_name in data_dict: \n with tf.variable_scope(op_name, reuse=True):\n for param_name, data in iter(data_dict[op_name].items()): \n try:\n var = tf.get_variable(param_name)\n session.run(var.assign(data))\n\n except ValueError:\n if not ignore_missing:\n raise\n\n def feed(self, *args):\n '''Set the input(s) for the next operation by replacing the terminal nodes.\n The arguments can be either layer names or the actual layers.\n '''\n assert len(args) != 0\n self.terminals = []\n for fed_layer in args:\n if isinstance(fed_layer, str):\n try:\n fed_layer = self.layers[fed_layer]\n except KeyError:\n raise KeyError('Unknown layer name fed: %s' % fed_layer)\n self.terminals.append(fed_layer)\n return self\n\n def get_output(self):\n '''Returns the current network output.'''\n return self.terminals[-1]\n\n def get_layer_output(self, name):\n return self.layers[name]\n\n def get_unique_name(self, prefix):\n '''Returns an index-suffixed unique name for the given prefix.\n This is used for auto-generating layer names based on the type-prefix.\n '''\n ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1\n return '%s_%d' % (prefix, ident)\n\n def make_var(self, name, shape):\n '''Creates a new TensorFlow variable.'''\n return tf.get_variable(name, shape, dtype = 'float32', trainable=self.trainable)\n\n def validate_padding(self, padding):\n '''Verifies that the padding is one of the supported ones.'''\n assert padding in ('SAME', 'VALID')\n\n @layer\n def conv(self,\n input_data,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n name,\n relu=True,\n padding=DEFAULT_PADDING,\n group=1,\n biased=True):\n\n # Verify that the padding is acceptable\n self.validate_padding(padding)\n # Get the number of channels in the input\n c_i = input_data.get_shape()[-1]\n\n if (padding == 'SAME'):\n input_data = tf.pad(input_data, [[0, 0], [(k_h - 1)//2, (k_h - 1)//2], [(k_w - 1)//2, (k_w - 1)//2], [0, 0]], \"CONSTANT\")\n \n # Verify that the grouping parameter is valid\n assert c_i % group == 0\n assert c_o % group == 0\n # Convolution for a given input and kernel\n convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding='VALID')\n \n with tf.variable_scope(name) as scope:\n kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])\n\n if group == 1:\n # This is the common-case. Convolve the input without any further complications.\n output = convolve(input_data, kernel)\n else:\n # Split the input into groups and then convolve each of them independently\n\n input_groups = tf.split(3, group, input_data)\n kernel_groups = tf.split(3, group, kernel)\n output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]\n # Concatenate the groups\n output = tf.concat(3, output_groups)\n\n # Add the biases\n if biased:\n biases = self.make_var('biases', [c_o])\n output = tf.nn.bias_add(output, biases)\n if relu:\n # ReLU non-linearity\n output = tf.nn.relu(output, name=scope.name)\n\n return output\n\n @layer\n def relu(self, input_data, name):\n return tf.nn.relu(input_data, name=name)\n\n @layer\n def max_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):\n self.validate_padding(padding)\n return tf.nn.max_pool(input_data,\n ksize=[1, k_h, k_w, 1],\n strides=[1, s_h, s_w, 1],\n padding=padding,\n name=name)\n\n @layer\n def avg_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):\n self.validate_padding(padding)\n return tf.nn.avg_pool(input_data,\n ksize=[1, k_h, k_w, 1],\n strides=[1, s_h, s_w, 1],\n padding=padding,\n name=name)\n\n @layer\n def lrn(self, input_data, radius, alpha, beta, name, bias=1.0):\n return tf.nn.local_response_normalization(input_data,\n depth_radius=radius,\n alpha=alpha,\n beta=beta,\n bias=bias,\n name=name)\n\n @layer\n def concat(self, inputs, axis, name):\n return tf.concat(concat_dim=axis, values=inputs, name=name)\n\n @layer\n def add(self, inputs, name):\n return tf.add_n(inputs, name=name)\n\n @layer\n def fc(self, input_data, num_out, name, relu=True):\n with tf.variable_scope(name) as scope:\n input_shape = input_data.get_shape()\n if input_shape.ndims == 4:\n # The input is spatial. Vectorize it first.\n dim = 1\n for d in input_shape[1:].as_list():\n dim *= d\n feed_in = tf.reshape(input_data, [-1, dim])\n else:\n feed_in, dim = (input_data, input_shape[-1].value)\n weights = self.make_var('weights', shape=[dim, num_out])\n biases = self.make_var('biases', [num_out])\n op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b\n fc = op(feed_in, weights, biases, name=scope.name)\n return fc\n\n @layer\n def softmax(self, input_data, name):\n input_shape = map(lambda v: v.value, input_data.get_shape())\n if len(input_shape) > 2:\n # For certain models (like NiN), the singleton spatial dimensions\n # need to be explicitly squeezed, since they're not broadcast-able\n # in TensorFlow's NHWC ordering (unlike Caffe's NCHW).\n if input_shape[1] == 1 and input_shape[2] == 1:\n input_data = tf.squeeze(input_data, squeeze_dims=[1, 2])\n else:\n raise ValueError('Rank 2 tensor input expected for softmax!')\n return tf.nn.softmax(input_data, name)\n\n @layer\n def batch_normalization(self, input_data, name, scale_offset=True, relu=False):\n\n with tf.variable_scope(name) as scope:\n shape = [input_data.get_shape()[-1]]\n pop_mean = tf.get_variable(\"mean\", shape, initializer = tf.constant_initializer(0.0), trainable=False)\n pop_var = tf.get_variable(\"variance\", shape, initializer = tf.constant_initializer(1.0), trainable=False)\n epsilon = 1e-4\n decay = 0.999\n if scale_offset:\n scale = tf.get_variable(\"scale\", shape, initializer = tf.constant_initializer(1.0))\n offset = tf.get_variable(\"offset\", shape, initializer = tf.constant_initializer(0.0))\n else:\n scale, offset = (None, None)\n if self.is_training:\n batch_mean, batch_var = tf.nn.moments(input_data, [0, 1, 2])\n\n train_mean = tf.assign(pop_mean,\n pop_mean * decay + batch_mean * (1 - decay))\n train_var = tf.assign(pop_var,\n pop_var * decay + batch_var * (1 - decay))\n with tf.control_dependencies([train_mean, train_var]):\n output = tf.nn.batch_normalization(input_data,\n batch_mean, batch_var, offset, scale, epsilon, name = name)\n else:\n output = tf.nn.batch_normalization(input_data,\n pop_mean, pop_var, offset, scale, epsilon, name = name)\n\n if relu:\n output = tf.nn.relu(output)\n\n return output\n\n @layer\n def dropout(self, input_data, keep_prob, name):\n return tf.nn.dropout(input_data, keep_prob, name=name)\n \n\n def unpool_as_conv(self, size, input_data, id, stride = 1, ReLU = False, BN = True):\n\n\t\t# Model upconvolutions (unpooling + convolution) as interleaving feature\n\t\t# maps of four convolutions (A,B,C,D). Building block for up-projections. \n\n\n # Convolution A (3x3)\n # --------------------------------------------------\n layerName = \"layer%s_ConvA\" % (id)\n self.feed(input_data)\n self.conv( 3, 3, size[3], stride, stride, name = layerName, padding = 'SAME', relu = False)\n outputA = self.get_output()\n\n # Convolution B (2x3)\n # --------------------------------------------------\n layerName = \"layer%s_ConvB\" % (id)\n padded_input_B = tf.pad(input_data, [[0, 0], [1, 0], [1, 1], [0, 0]], \"CONSTANT\")\n self.feed(padded_input_B)\n self.conv(2, 3, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)\n outputB = self.get_output()\n\n # Convolution C (3x2)\n # --------------------------------------------------\n layerName = \"layer%s_ConvC\" % (id)\n padded_input_C = tf.pad(input_data, [[0, 0], [1, 1], [1, 0], [0, 0]], \"CONSTANT\")\n self.feed(padded_input_C)\n self.conv(3, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)\n outputC = self.get_output()\n\n # Convolution D (2x2)\n # --------------------------------------------------\n layerName = \"layer%s_ConvD\" % (id)\n padded_input_D = tf.pad(input_data, [[0, 0], [1, 0], [1, 0], [0, 0]], \"CONSTANT\")\n self.feed(padded_input_D)\n self.conv(2, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)\n outputD = self.get_output()\n\n # Interleaving elements of the four feature maps\n # --------------------------------------------------\n left = interleave([outputA, outputB], axis=1) # columns\n right = interleave([outputC, outputD], axis=1) # columns\n Y = interleave([left, right], axis=2) # rows\n \n if BN:\n layerName = \"layer%s_BN\" % (id)\n self.feed(Y)\n self.batch_normalization(name = layerName, scale_offset = True, relu = False)\n Y = self.get_output()\n\n if ReLU:\n Y = tf.nn.relu(Y, name = layerName)\n \n return Y\n\n\n def up_project(self, size, id, stride = 1, BN = True):\n \n # Create residual upsampling layer (UpProjection)\n\n input_data = self.get_output()\n\n # Branch 1\n id_br1 = \"%s_br1\" % (id)\n\n # Interleaving Convs of 1st branch\n out = self.unpool_as_conv(size, input_data, id_br1, stride, ReLU=True, BN=True)\n\n # Convolution following the upProjection on the 1st branch\n layerName = \"layer%s_Conv\" % (id)\n self.feed(out)\n self.conv(size[0], size[1], size[3], stride, stride, name = layerName, relu = False)\n\n if BN:\n layerName = \"layer%s_BN\" % (id)\n self.batch_normalization(name = layerName, scale_offset=True, relu = False)\n\n # Output of 1st branch\n branch1_output = self.get_output()\n\n \n # Branch 2\n id_br2 = \"%s_br2\" % (id)\n # Interleaving convolutions and output of 2nd branch\n branch2_output = self.unpool_as_conv(size, input_data, id_br2, stride, ReLU=False)\n\n \n # sum branches\n layerName = \"layer%s_Sum\" % (id)\n output = tf.add_n([branch1_output, branch2_output], name = layerName)\n # ReLU\n layerName = \"layer%s_ReLU\" % (id)\n output = tf.nn.relu(output, name=layerName)\n\n self.feed(output)\n return self\n"
] | [
[
"tensorflow.compat.v1.constant_initializer",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.nn.avg_pool",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.nn.max_pool",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.nn.relu",
"tensorflow.compat.v1.nn.bias_add",
"tensorflow.compat.v1.pad",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.add_n",
"numpy.load",
"tensorflow.compat.v1.nn.softmax",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.split",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.nn.conv2d",
"tensorflow.compat.v1.nn.local_response_normalization",
"tensorflow.compat.v1.nn.batch_normalization",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.nn.moments",
"numpy.shape",
"tensorflow.compat.v1.assign",
"tensorflow.compat.v1.nn.dropout"
]
] |
arjunchandra/continuous-rl | [
"8f3c655c6a4b2e9d15a6b052e5466c0a75191a08"
] | [
"code/nn.py"
] | [
"\"\"\"Some nn utilities.\"\"\"\nimport torch\nfrom abstract import ParametricFunction\n\ndef copy_buffer(net: ParametricFunction, target_net: ParametricFunction):\n \"\"\"Copy all buffers from net to target_net.\"\"\"\n with torch.no_grad():\n for target_buf, buf in zip(target_net.buffers(), net.buffers()): # type: ignore\n target_buf.copy_(buf)\n\ndef soft_update(net: ParametricFunction, target_net: ParametricFunction, tau: float):\n \"\"\"Soft update of the parameters of target_net with those of net.\n\n Precisely\n theta_targetnet <- tau * theta_targetnet + (1 - tau) * theta_net\n \"\"\"\n copy_buffer(net, target_net)\n with torch.no_grad():\n for target_param, param in zip(target_net.parameters(), net.parameters()):\n target_param.add_(1 - tau, param - target_param)\n\ndef hard_update(net: ParametricFunction, target_net: ParametricFunction):\n \"\"\"Hard update (i.e. copy) of the parameters of target_net with those of net.\"\"\"\n copy_buffer(net, target_net)\n with torch.no_grad():\n for target_param, param in zip(target_net.parameters(), net.parameters()):\n target_param.copy_(param)\n"
] | [
[
"torch.no_grad"
]
] |
sotudian/Natural-Language-Processing | [
"61ba2ac78e440683519d2121ca2b29a17277e46b"
] | [
"LSTM for language modeling/Question2_Part_1_To_2.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nTrain the language model on texts from the file pride And Prejudice. Before using it to train the language model,\nyou need to first sentence segment, then tokenize, then lower case each line of the file using Spacy. Append \nstart-of-sentence token ’<s>’ and end-of-sentence ’</s>’ token to each sentence and put each sentence in its own line.\n Use only words that appear more than once in this corpus and assign UNK tokens for the rest; you may also need to\n pad sentences that are shorter than 5. Train the language model and save the trained model. Generate 10 examples\n of text from it, starting from ’<s>’ token and ending at ’</s>’ token.\n\n\n\n\n@author: shahab Sotudian\n\"\"\"\n\nimport re\nimport pickle\nimport random\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize \nfrom nltk.lm.preprocessing import pad_both_ends\nfrom collections import Counter\nimport math\n# Functions ###########================-------------------\n'''\n############################################################\n#### Piazza calculate Preplexity\nnet.cuda()\nnet.eval()\nH = 0\nTOTAL_PROBs = 1\nwith torch.no_grad():\n for Test_Sentence in Test_1_Preprocessed_Pride_Text_Perplexity:\n H += len(Test_Sentence)\n # Calculate for each sentence\n Total_prob_Sentence = 1\n for i,word in enumerate(Test_Sentence):\n if i == len(Test_Sentence)-1:\n continue\n else:\n if i==0:\n h = net.init_hidden(1)\n h = tuple([each.data for each in h])\n else:\n h = h_new\n \n x = np.array([[word2idx[word]]])\n inputs = torch.from_numpy(x)\n inputs = inputs.cuda()\n \n out, h_new = net(inputs, h)\n # get the token probabilities\n p = F.softmax(out, dim=1).data\n p = p.cpu()\n p = p.numpy()\n p = p.reshape(p.shape[1],)\n Prob_next_Word = p[word2idx[Test_Sentence[i+1]]] # P(w4|w1,w2,w3)\n Total_prob_Sentence = Prob_next_Word * Total_prob_Sentence\n \n TOTAL_PROBs = TOTAL_PROBs * Total_prob_Sentence\n\nPreplexity = (1/TOTAL_PROBs)**(1/float(H)) \n############################################################\n''' \ndef NLP_PreProcessing(text_main): \n # sentence segmenting \n sentences = nltk.sent_tokenize(text_main) \n # Tokenization + lower casing \n Tokenized_sentences = [word_tokenize(S.lower()) for S in sentences] \n # Padding \n Pad_Tokenized_sentences = [list(pad_both_ends(TS, n=2)) for TS in Tokenized_sentences]\n \n return Pad_Tokenized_sentences\n\ndef NLP_PreProcessing_Test(text_main): \n # Tokenization + lower casing \n Tokenized_sentences = word_tokenize(text_main.lower())\n # Padding \n Pad_Tokenized_sentences = [list(pad_both_ends(Tokenized_sentences, n=2))]\n \n return Pad_Tokenized_sentences \n \ndef Equal_seq(text, seq_len):\n sequences = []\n if len(text) > seq_len:\n for i in range(seq_len, (len(text)+1)):\n seq = text[i-seq_len:i]\n sequences.append(seq)\n else:\n sequences = [['_PAD']*(seq_len-len(text)) + text ]\n \n return sequences \n \n\n\n\ndef get_batches(arr_x, arr_y, batch_size):\n \n # iterate through the arrays\n prv = 0\n for n in range(batch_size, arr_x.shape[0], batch_size):\n x = arr_x[prv:n,:]\n y = arr_y[prv:n,:]\n prv = n\n yield x, y\n \n \nclass WordLSTM(nn.Module):\n \n def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):\n super().__init__()\n\n self.drop_prob = drop_prob\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.lr = lr\n \n self.emb_layer = nn.Embedding(vocab_size, 200)\n\n ## define the LSTM\n self.lstm = nn.LSTM(200, n_hidden, n_layers, \n dropout=drop_prob, batch_first=True)\n \n ## define a dropout layer\n self.dropout = nn.Dropout(drop_prob)\n \n ## define the fully-connected layer\n self.fc = nn.Linear(n_hidden, vocab_size) \n \n def forward(self, x, hidden):\n ''' Forward pass through the network. \n These inputs are x, and the hidden/cell state `hidden`. '''\n\n ## pass input through embedding layer\n embedded = self.emb_layer(x) \n \n ## Get the outputs and the new hidden state from the lstm\n lstm_output, hidden = self.lstm(embedded, hidden)\n \n ## pass through a dropout layer\n out = self.dropout(lstm_output)\n \n #out = out.contiguous().view(-1, self.n_hidden) \n out = out.reshape(-1, self.n_hidden) \n\n ## put \"out\" through the fully-connected layer\n out = self.fc(out)\n\n # return the final output and the hidden state\n return out, hidden\n \n \n def init_hidden(self, batch_size):\n ''' initializes hidden state '''\n # Create two new tensors with sizes n_layers x batch_size x n_hidden,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n\n # if GPU is available\n if (torch.cuda.is_available()):\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())\n \n # if GPU is not available\n else:\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_())\n \n return hidden \n \ndef train(net, epochs, batch_size, lr, clip, print_every,XX,YY):\n \n # optimizer\n opt = torch.optim.Adam(net.parameters(), lr=lr)\n \n # loss\n criterion = nn.CrossEntropyLoss()\n \n # push model to GPU\n net.cuda()\n \n counter = 0\n\n net.train()\n\n for e in range(epochs):\n\n # initialize hidden state\n h = net.init_hidden(batch_size)\n \n for x, y in get_batches(XX, YY, batch_size):\n counter+= 1\n \n # convert numpy arrays to PyTorch arrays\n inputs, targets = torch.from_numpy(x), torch.from_numpy(y)\n \n # push tensors to GPU\n inputs, targets = inputs.cuda(), targets.cuda()\n\n # detach hidden states\n h = tuple([each.data for each in h])\n\n # zero accumulated gradients\n net.zero_grad()\n \n # get the output from the model\n output, h = net(inputs, h)\n \n # calculate the loss and perform backprop\n loss = criterion(output, targets.view(-1))\n\n # back-propagate error\n loss.backward()\n\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n nn.utils.clip_grad_norm_(net.parameters(), clip)\n\n # update weigths\n opt.step() \n \n if counter % print_every == 0:\n \n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Step: {}...\".format(counter)) \n \ndef predict(net, tkn, h=None, word2idx_Inp = None, idx2word_Inp =None ): \n # tensor inputs\n x = np.array([[word2idx_Inp[tkn]]])\n inputs = torch.from_numpy(x)\n \n # push to GPU\n inputs = inputs.cuda()\n\n # detach hidden state from history\n h = tuple([each.data for each in h])\n\n # get the output of the model\n out, h = net(inputs, h)\n\n # get the token probabilities\n p = F.softmax(out, dim=1).data\n\n p = p.cpu()\n\n p = p.numpy()\n p = p.reshape(p.shape[1],)\n\n # get indices of top 3 values\n top_n_idx = p.argsort()[-3:][::-1]\n\n # randomly select one of the three indices\n sampled_token_index = top_n_idx[random.sample([0,1,2],1)[0]]\n\n # return the encoded value of the predicted char and the hidden state\n return idx2word_Inp[sampled_token_index], h\n\n\n# function to generate text\ndef sample(net, size, prime=\"<s>\",word2idx_Inp = None, idx2word_Inp =None ):\n \n # push to GPU\n net.cuda()\n \n net.eval()\n\n # batch size is 1\n h = net.init_hidden(1)\n\n toks = prime.split()\n\n # predict next token\n for t in prime.split():\n token, h = predict(net, t, h,word2idx_Inp,idx2word_Inp)\n \n toks.append(token)\n\n # predict subsequent tokens\n if size == '</s>':\n while(token!='</s>'):\n token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)\n toks.append(token)\n else: \n for i in range(size-1):\n token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)\n toks.append(token)\n\n return ' '.join(toks) \n\n\n\n\n\ndef Testing(net, batch_size,Test_X,Test_Y):\n net.eval()\n criterion = nn.CrossEntropyLoss()\n # initialize hidden state\n h = net.init_hidden(batch_size)\n test_loss = 0.\n \n with torch.no_grad():\n for x, y in get_batches(Test_X, Test_Y, batch_size):\n # convert numpy arrays to PyTorch arrays\n inputs, targets = torch.from_numpy(x), torch.from_numpy(y) \n # push tensors to GPU\n inputs, targets = inputs.cuda(), targets.cuda() \n # detach hidden states\n h = tuple([each.data for each in h])\n # get the output from the model\n output, h = net(inputs, h) \n test_loss += criterion(output, targets.view(-1)).item()\n\n test_loss = test_loss / ((len(Test_X) // batch_size)+1)\n print('-' * 40)\n print('Test loss {:5.2f} ------ Test perplexity {:8.2f}'.format(test_loss, math.exp(test_loss)))\n print('-' * 40)\n\n\n \nclass WordLSTM_with_Glove(nn.Module): \n \n def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):\n super().__init__()\n\n self.drop_prob = drop_prob\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.lr = lr\n \n self.emb_layer = nn.Embedding(vocab_size_Q6,100, padding_idx=0) \n self.emb_layer.weight.data.copy_(torch.from_numpy(embedding_matrix))\n self.emb_layer.weight.requires_grad = False ## freeze embeddings \n '''\n self.emb_layer = nn.Embedding(vocab_size_Q6,100) \n self.emb_layer.weight = nn.Parameter(torch.from_numpy(embedding_matrix).float())\n '''\n ## define the LSTM\n self.lstm = nn.LSTM(100, n_hidden, n_layers, \n dropout=drop_prob, batch_first=True)\n \n ## define a dropout layer\n self.dropout = nn.Dropout(drop_prob)\n \n ## define the fully-connected layer\n self.fc = nn.Linear(n_hidden, vocab_size_Q6) \n \n def forward(self, x, hidden):\n ''' Forward pass through the network. \n These inputs are x, and the hidden/cell state `hidden`. '''\n\n ## pass input through embedding layer\n embedded = self.emb_layer(x) \n \n ## Get the outputs and the new hidden state from the lstm\n lstm_output, hidden = self.lstm(embedded, hidden)\n \n ## pass through a dropout layer\n out = self.dropout(lstm_output)\n \n #out = out.contiguous().view(-1, self.n_hidden) \n out = out.reshape(-1, self.n_hidden) \n\n ## put \"out\" through the fully-connected layer\n out = self.fc(out)\n\n # return the final output and the hidden state\n return out, hidden\n \n \n def init_hidden(self, batch_size):\n ''' initializes hidden state '''\n # Create two new tensors with sizes n_layers x batch_size x n_hidden,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n\n # if GPU is available\n if (torch.cuda.is_available()):\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())\n \n # if GPU is not available\n else:\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_())\n \n return hidden \n \n \n \n\n\n# Data ###########================-------------------\nwith open('prideAndPrejudice.txt') as f:\n Pride_Text = [line.rstrip() for line in f]\n\n\n\n# Q2.1 ###########================-------------------\n\n# sentence segmenting + lower casing + Tokenization + Padding using function NLP_PreProcessing\nPreprocessed_Pride_Text = []\nfor t in range(len(Pride_Text)):\n Preprocessed_Pride_Text = Preprocessed_Pride_Text + NLP_PreProcessing(Pride_Text[t]) \n\nLength_of_Sequences = 5\n\nPride_Text_Equal_seqs_L5 = sum([Equal_seq(i,Length_of_Sequences) for i in Preprocessed_Pride_Text], [])\n\ndel t,f\n\n# Create Vocab\nwords = Counter() \nfor i, sentence in enumerate(Preprocessed_Pride_Text):\n for word in sentence: \n words.update([word]) \nwords = {k:v for k,v in words.items() if v>1} # Removing the words that only appear once\ndel i,sentence,word\nwords = sorted(words, key=words.get, reverse=True) # Sorting the words\nwords = ['_PAD','_UNK'] + words\nword2idx = {o:i for i,o in enumerate(words)}\nidx2word = {i:o for i,o in enumerate(words)}\n# Looking up the mapping dictionary and assigning the index to the respective words\nPride_Text_Equal_seqs_INDICES_L5 =[]\nfor i, sentence in enumerate(Pride_Text_Equal_seqs_L5):\n Pride_Text_Equal_seqs_INDICES_L5.append([word2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])\ndel i, sentence\n\n\nX = []\nY = []\nfor S in Pride_Text_Equal_seqs_INDICES_L5:\n X.append(S[:-1])\n Y.append(S[1:])\n\nx_int_L5 = np.array(X)\ny_int_L5 = np.array(Y)\n\n\nvocab_size = len(word2idx)\n\n\n# Train Or Load LSTM\nDo_want_To_Train = 0\nbatch_size = 320\nepochs=20\nlr=0.001\nif Do_want_To_Train == 1:\n net1 = WordLSTM() # instantiate the model\n net1.cuda() # push the model to GPU\n train(net1, epochs, batch_size, lr, 1, 50,x_int_L5,y_int_L5) # train the model\n torch.save(net1, 'Q2_Part_1_Network.pt')\nelse:\n net1 = torch.load('Q2_Part_1_Network.pt')\n net1.eval()\n \n\n\n\nprint(net1)\n\n# Generate text\nfor i in range(10):\n print('=======================================')\n print(\"- Example \"+str(i+1)+\": \",sample(net1, size='</s>' , prime=\"<s>\", word2idx_Inp = word2idx, idx2word_Inp =idx2word ),'\\n')\n\n\n\ndel X,Y,i,S,Do_want_To_Train\n\n\n\nprint('=' * 60)\n\n\n\n\n# Q2.2 ###########================-------------------\n\nwith open('test_1.txt') as f:\n test_1 = [line.rstrip() for line in f]\n\n# sentence segmenting + lower casing + Tokenization + Padding using function NLP_PreProcessing_Test\nTest_1_Preprocessed_Pride_Text = []\nfor t in range(len(test_1)):\n Test_1_Preprocessed_Pride_Text = Test_1_Preprocessed_Pride_Text + NLP_PreProcessing_Test((test_1[t])[4:-5]) \n\nTest_1_Pride_Text_Equal_seqs = sum([Equal_seq(i,Length_of_Sequences) for i in Test_1_Preprocessed_Pride_Text], []) \n\ndel t,f\n# Looking up the mapping dictionary and assigning the index to the respective words\nTest_1_Pride_Text_Equal_seqs_INDICES =[]\nfor i, sentence in enumerate(Test_1_Pride_Text_Equal_seqs):\n Test_1_Pride_Text_Equal_seqs_INDICES.append([word2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])\ndel i, sentence\n\n\nTest_1_X = []\nTest_1_Y = []\nfor S in Test_1_Pride_Text_Equal_seqs_INDICES:\n Test_1_X.append(S[:-1])\n Test_1_Y.append(S[1:])\n\nTest_1_x_int = np.array(Test_1_X)\nTest_1_y_int = np.array(Test_1_Y)\n\ndel Test_1_X,Test_1_Y,S\n# Calculate Perplexity\nTesting(net1, batch_size ,Test_1_x_int,Test_1_y_int) \n\n\ndel Pride_Text,Length_of_Sequences\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"torch.nn.LSTM",
"torch.nn.Linear",
"torch.load",
"torch.nn.functional.softmax",
"torch.save",
"torch.no_grad",
"torch.nn.Embedding",
"torch.nn.CrossEntropyLoss",
"torch.from_numpy",
"torch.cuda.is_available",
"numpy.array",
"torch.nn.Dropout"
]
] |
ChenRocks/Distill-BERT-Textgen | [
"a3b0b22ce16febc4d3ffdbd8791ea3374110a892"
] | [
"dump_teacher_hiddens.py"
] | [
"\"\"\"\nCopyright (c) Microsoft Corporation.\nLicensed under the MIT license.\n\nprecompute hidden states of CMLM teacher to speedup KD training\n\"\"\"\nimport argparse\nimport io\nimport os\nimport shelve\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom tqdm import tqdm\nfrom pytorch_pretrained_bert import BertTokenizer\nfrom toolz.sandbox import unzip\n\nfrom cmlm.model import BertForSeq2seq\nfrom cmlm.data import convert_token_to_bert, CLS, SEP, MASK\n\n\ndef tensor_dumps(tensor):\n with io.BytesIO() as writer:\n np.save(writer, tensor.cpu().numpy().astype(np.float16),\n allow_pickle=False)\n dump = writer.getvalue()\n return dump\n\n\ndef gather_hiddens(hiddens, masks):\n outputs = []\n for hid, mask in zip(hiddens.split(1, dim=1), masks.split(1, dim=1)):\n if mask.sum().item() == 0:\n continue\n mask = mask.unsqueeze(-1).expand_as(hid)\n outputs.append(hid.masked_select(mask))\n output = torch.stack(outputs, dim=0)\n return output\n\n\nclass BertSampleDataset(Dataset):\n def __init__(self, corpus_path, tokenizer, num_samples=7):\n self.db = shelve.open(corpus_path, 'r')\n self.ids = []\n for i, ex in self.db.items():\n if len(ex['src']) + len(ex['tgt']) + 3 <= 512:\n self.ids.append(i)\n self.toker = tokenizer\n self.num_samples = num_samples\n\n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, i):\n id_ = self.ids[i]\n example = self.db[id_]\n features = convert_example(example['src'], example['tgt'],\n self.toker, self.num_samples)\n return (id_, ) + features\n\n\ndef convert_example(src, tgt, toker, num_samples):\n src = [convert_token_to_bert(tok) for tok in src]\n tgt = [convert_token_to_bert(tok) for tok in tgt] + [SEP]\n\n # build the random masks\n tgt_len = len(tgt)\n if tgt_len <= num_samples:\n masks = torch.eye(tgt_len).byte()\n num_samples = tgt_len\n else:\n mask_inds = [list(range(i, tgt_len, num_samples))\n for i in range(num_samples)]\n masks = torch.zeros(num_samples, tgt_len).byte()\n for i, indices in enumerate(mask_inds):\n for j in indices:\n masks.data[i, j] = 1\n assert (masks.sum(dim=0) != torch.ones(tgt_len).long()).sum().item() == 0\n assert masks.sum().item() == tgt_len\n masks = torch.cat([torch.zeros(num_samples, len(src)+2).byte(), masks],\n dim=1)\n\n # make BERT inputs\n input_ids = toker.convert_tokens_to_ids([CLS] + src + [SEP] + tgt)\n mask_id = toker.convert_tokens_to_ids([MASK])[0]\n input_ids = torch.tensor([input_ids for _ in range(num_samples)])\n input_ids.data.masked_fill_(masks, mask_id)\n token_ids = torch.tensor([[0] * (len(src) + 2) + [1] * len(tgt)\n for _ in range(num_samples)])\n return input_ids, token_ids, masks\n\n\ndef batch_features(features):\n ids, all_input_ids, all_token_ids, all_masks = map(list, unzip(features))\n batch_size = sum(input_ids.size(0) for input_ids in all_input_ids)\n max_len = max(input_ids.size(1) for input_ids in all_input_ids)\n input_ids = torch.zeros(batch_size, max_len).long()\n token_ids = torch.zeros(batch_size, max_len).long()\n attn_mask = torch.zeros(batch_size, max_len).long()\n i = 0\n for inp, tok in zip(all_input_ids, all_token_ids):\n block, len_ = inp.size()\n input_ids.data[i: i+block, :len_] = inp.data\n token_ids.data[i: i+block, :len_] = tok.data\n attn_mask.data[i: i+block, :len_].fill_(1)\n i += block\n return ids, input_ids, token_ids, attn_mask, all_masks\n\n\ndef process_batch(batch, bert, toker, num_samples=7):\n input_ids, token_ids, attn_mask, all_masks = batch\n input_ids = input_ids.cuda()\n token_ids = token_ids.cuda()\n attn_mask = attn_mask.cuda()\n hiddens, _ = bert.bert(input_ids, token_ids, attn_mask,\n output_all_encoded_layers=False)\n hiddens = bert.cls.predictions.transform(hiddens)\n i = 0\n outputs = []\n for masks in all_masks:\n block, len_ = masks.size()\n hids = hiddens[i:i+block, :len_, :]\n masks = masks.cuda()\n outputs.append(gather_hiddens(hids, masks))\n i += block\n return outputs\n\n\ndef build_db_batched(corpus, out_db, bert, toker, batch_size=8):\n dataset = BertSampleDataset(corpus, toker)\n loader = DataLoader(dataset, batch_size=batch_size,\n num_workers=4, collate_fn=batch_features)\n with tqdm(desc='computing BERT features', total=len(dataset)) as pbar:\n for ids, *batch in loader:\n outputs = process_batch(batch, bert, toker)\n for id_, output in zip(ids, outputs):\n out_db[id_] = tensor_dumps(output)\n pbar.update(len(ids))\n\n\ndef main(opts):\n # load BERT\n state_dict = torch.load(opts.ckpt)\n vsize = state_dict['cls.predictions.decoder.weight'].size(0)\n bert = BertForSeq2seq.from_pretrained(opts.bert).eval().half().cuda()\n bert.update_output_layer_by_size(vsize)\n bert.load_state_dict(state_dict)\n toker = BertTokenizer.from_pretrained(opts.bert,\n do_lower_case='uncased' in opts.bert)\n\n # save the final projection layer\n linear = torch.nn.Linear(bert.config.hidden_size, bert.config.vocab_size)\n linear.weight.data = state_dict['cls.predictions.decoder.weight']\n linear.bias.data = state_dict['cls.predictions.bias']\n os.makedirs(opts.output)\n torch.save(linear, f'{opts.output}/linear.pt')\n\n # create DB\n with shelve.open(f'{opts.output}/db') as out_db, \\\n torch.no_grad():\n build_db_batched(opts.db, out_db, bert, toker)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--bert', required=True,\n choices=['bert-base-uncased',\n 'bert-base-multilingual-cased'],\n help='BERT model')\n parser.add_argument('--ckpt', required=True, help='BERT checkpoint')\n parser.add_argument('--db', required=True, help='dataset to compute')\n parser.add_argument('--output', required=True, help='path to dump output')\n args = parser.parse_args()\n\n main(args)\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.ones",
"torch.stack",
"torch.nn.Linear",
"torch.load",
"torch.save",
"torch.no_grad",
"torch.zeros",
"torch.eye"
]
] |
mikimaus78/ml_monorepo | [
"b2c2627ff0e86e27f6829170d0dac168d8e5783b",
"b2c2627ff0e86e27f6829170d0dac168d8e5783b"
] | [
"trading-with-python/util/trendy.py",
"BiBloSA/exp_SQuAD_sim/src/nn_utils/baselines/block_attention.py"
] | [
"import numpy as np\nfrom filter import movingaverage\n\ndef gentrends(x, window=1/3.0, charts=True):\n \"\"\"\n Returns a Pandas dataframe with support and resistance lines.\n\n :param x: One-dimensional data set\n :param window: How long the trendlines should be. If window < 1, then it\n will be taken as a percentage of the size of the data\n :param charts: Boolean value saying whether to print chart to screen\n\n \"\"\"\n import numpy as np\n import pandas.io.data as pd\n\n x = np.array(x)\n\n if window < 1:\n window = int(window * len(x))\n\n max1 = np.where(x == max(x))[0][0] # find the index of the abs max\n min1 = np.where(x == min(x))[0][0] # find the index of the abs min\n\n # First the max\n if max1 + window > len(x):\n max2 = max(x[0:(max1 - window)])\n else:\n max2 = max(x[(max1 + window):])\n\n # Now the min\n if min1 - window < 0:\n min2 = min(x[(min1 + window):])\n else:\n min2 = min(x[0:(min1 - window)])\n\n # Now find the indices of the secondary extrema\n max2 = np.where(x == max2)[0][0] # find the index of the 2nd max\n min2 = np.where(x == min2)[0][0] # find the index of the 2nd min\n\n # Create & extend the lines\n maxslope = (x[max1] - x[max2]) / (max1 - max2) # slope between max points\n minslope = (x[min1] - x[min2]) / (min1 - min2) # slope between min points\n a_max = x[max1] - (maxslope * max1) # y-intercept for max trendline\n a_min = x[min1] - (minslope * min1) # y-intercept for min trendline\n b_max = x[max1] + (maxslope * (len(x) - max1)) # extend to last data pt\n b_min = x[min1] + (minslope * (len(x) - min1)) # extend to last data point\n maxline = np.linspace(a_max, b_max, len(x)) # Y values between max's\n minline = np.linspace(a_min, b_min, len(x)) # Y values between min's\n\n # OUTPUT\n trends = np.transpose(np.array((x, maxline, minline)))\n trends = pd.DataFrame(trends, index=np.arange(0, len(x)),\n columns=['Data', 'Max Line', 'Min Line'])\n\n if charts is True:\n from matplotlib.pyplot import plot, grid, show, figure\n figure()\n plot(trends)\n grid()\n show()\n\n return trends, maxslope, minslope\n\ndef segtrends(x, segments=2, charts=True, window=7):\n \"\"\"\n Turn minitrends to iterative process more easily adaptable to\n implementation in simple trading systems; allows backtesting functionality.\n\n :param x: One-dimensional data set\n :param window: How long the trendlines should be. If window < 1, then it\n will be taken as a percentage of the size of the data\n :param charts: Boolean value saying whether to print chart to screen\n \"\"\"\n\n import numpy as np\n y = np.array(x)\n n=len(y)\n movy = movingaverage(y, window)\n # Implement trendlines and Find the indexes of these maxima in the data\n segments = int(segments)\n maxima = np.ones(segments)\n minima = np.ones(segments) \n x_maxima = np.ones(segments)\n x_minima = np.ones(segments)\n segsize = int(len(y)/segments)\n for i in range(1, segments+1):\n ind2 = i*segsize\n ind1 = ind2 - segsize\n seg = y[ind1:ind2]\n maxima[i-1] = max(seg)\n minima[i-1] = min(seg)\n x_maxima[i-1] = ind1 + (np.where(seg == maxima[i-1])[0][0])\n x_minima[i-1] = ind1 + (np.where(seg == minima[i-1])[0][0])\n\n if charts:\n import matplotlib.pyplot as plt\n plt.plot(y)\n plt.grid(True)\n\n for i in range(0, segments-1):\n maxslope = (maxima[i+1] - maxima[i]) / (x_maxima[i+1] - x_maxima[i])\n a_max = maxima[i] - (maxslope * x_maxima[i])\n b_max = maxima[i] + (maxslope * (len(y) - x_maxima[i]))\n maxline = np.linspace(a_max, b_max, len(y))\n\n minslope = (minima[i+1] - minima[i]) / (x_minima[i+1] - x_minima[i])\n a_min = minima[i] - (minslope * x_minima[i])\n b_min = minima[i] + (minslope * (len(y) - x_minima[i]))\n minline = np.linspace(a_min, b_min, len(y))\n\n if charts:\n #plt.plot(maxline, 'g')\n #plt.plot(minline, 'r')\n pass\n\n if charts:\n plt.plot(range(n), movy, 'b')\n plt.plot(x_maxima, maxima, 'g')\n plt.plot(x_minima, minima, 'r')\n plt.show()\n\n # OUTPUT\n return x_maxima, maxima, x_minima, minima\n\ndef minitrends(x, window=20, charts=True):\n \"\"\"\n Turn minitrends to iterative process more easily adaptable to\n implementation in simple trading systems; allows backtesting functionality.\n\n :param x: One-dimensional data set\n :param window: How long the trendlines should be. If window < 1, then it\n will be taken as a percentage of the size of the data\n :param charts: Boolean value saying whether to print chart to screen\n \"\"\"\n\n import numpy as np\n\n y = np.array(x)\n if window < 1: # if window is given as fraction of data length\n window = float(window)\n window = int(window * len(y))\n x = np.arange(0, len(y))\n dy = y[window:] - y[:-window]\n crit = dy[:-1] * dy[1:] < 0\n\n # Find whether max's or min's\n maxi = (y[x[crit]] - y[x[crit] + window] > 0) & \\\n (y[x[crit]] - y[x[crit] - window] > 0) * 1\n mini = (y[x[crit]] - y[x[crit] + window] < 0) & \\\n (y[x[crit]] - y[x[crit] - window] < 0) * 1\n maxi = maxi.astype(float)\n mini = mini.astype(float)\n maxi[maxi == 0] = np.nan\n mini[mini == 0] = np.nan\n xmax = x[crit] * maxi\n xmax = xmax[~np.isnan(xmax)]\n xmax = xmax.astype(int)\n xmin = x[crit] * mini\n xmin = xmin[~np.isnan(xmin)]\n xmin = xmin.astype(int)\n\n # See if better max or min in region\n yMax = np.array([])\n xMax = np.array([])\n for i in xmax:\n indx = np.where(xmax == i)[0][0] + 1\n try:\n Y = y[i:xmax[indx]]\n yMax = np.append(yMax, Y.max())\n xMax = np.append(xMax, np.where(y == yMax[-1])[0][0])\n except:\n pass\n yMin = np.array([])\n xMin = np.array([])\n for i in xmin:\n indx = np.where(xmin == i)[0][0] + 1\n try:\n Y = y[i:xmin[indx]]\n yMin = np.append(yMin, Y.min())\n xMin = np.append(xMin, np.where(y == yMin[-1])[0][0])\n except:\n pass\n if y[-1] > yMax[-1]:\n yMax = np.append(yMax, y[-1])\n xMax = np.append(xMax, x[-1])\n if y[0] not in yMax:\n yMax = np.insert(yMax, 0, y[0])\n xMax = np.insert(xMax, 0, x[0])\n if y[-1] < yMin[-1]:\n yMin = np.append(yMin, y[-1])\n xMin = np.append(xMin, x[-1])\n if y[0] not in yMin:\n yMin = np.insert(yMin, 0, y[0])\n xMin = np.insert(xMin, 0, x[0])\n\n # Plot results if desired\n if charts is True:\n from matplotlib.pyplot import plot, show, grid\n plot(x, y)\n plot(xMax, yMax, '-o')\n plot(xMin, yMin, '-o')\n grid(True)\n show()\n # Return arrays of critical points\n return xMax, yMax, xMin, yMin\n\ndef iterlines(x, window=30, charts=True):\n \"\"\"\n Turn minitrends to iterative process more easily adaptable to\n implementation in simple trading systems; allows backtesting functionality.\n\n :param x: One-dimensional data set\n :param window: How long the trendlines should be. If window < 1, then it\n will be taken as a percentage of the size of the data\n :param charts: Boolean value saying whether to print chart to screen\n \"\"\"\n\n import numpy as np\n\n x = np.array(x)\n n = len(x)\n if window < 1:\n window = int(window * n)\n sigs = np.zeros(n, dtype=float)\n\n i = window\n while i != n:\n if x[i] > max(x[i-window:i]): sigs[i] = 1\n elif x[i] < min(x[i-window:i]): sigs[i] = -1\n i += 1\n\n xmin = np.where(sigs == -1.0)[0]\n xmax = np.where(sigs == 1.0)[0]\n ymin = x[xmin]\n ymax = x[xmax]\n if charts is True:\n from matplotlib.pyplot import plot, grid, show\n plot(x)\n plot(xmin, ymin, 'ro')\n plot(xmax, ymax, 'go')\n grid(True)\n show()\n\n return sigs\n",
"import tensorflow as tf\n\nfrom src.nn_utils.general import exp_mask_for_high_rank, mask_for_high_rank\nfrom src.nn_utils.integration_func import directional_attention_with_dense\nfrom src.nn_utils.nn import bn_dense_layer, linear\n\n\ndef bi_directional_simple_block_attention(\n rep_tensor, rep_mask, block_len=5, scope=None,\n keep_prob=1., is_train=None, wd=0., activation='elu', hn=None):\n with tf.variable_scope(scope or 'bi_directional_simple_block_attn'):\n\n fw_attn_res = simple_block_attention(\n rep_tensor, rep_mask, block_len, \"forward_attn\", \"forward\",\n keep_prob, is_train, wd, activation, hn)\n bw_attn_res = simple_block_attention(\n rep_tensor, rep_mask, block_len, \"backward_attn\", \"backward\",\n keep_prob, is_train, wd, activation, hn)\n attn_res = tf.concat([fw_attn_res, bw_attn_res], -1)\n return attn_res\n\n\ndef simple_block_attention(\n rep_tensor, rep_mask, block_len=5, scope=None, direction=None,\n keep_prob=1., is_train=None, wd=0., activation='elu', hn=None):\n assert direction is not None\n\n def scaled_tanh(x, scale=5.):\n return scale * tf.nn.tanh(1. / scale * x)\n\n bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2]\n ivec = hn or rep_tensor.get_shape().as_list()[2]\n input_dim = rep_tensor.get_shape().as_list()[2]\n with tf.variable_scope(scope or 'block_simple'):\n # @1. split sequence\n with tf.variable_scope('split_seq'):\n block_num = tf.cast(tf.ceil(tf.divide(tf.cast(sl, tf.float32), tf.cast(block_len, tf.float32))), tf.int32)\n comp_len = block_num * block_len - sl\n\n rep_tensor_comp = tf.concat([rep_tensor, tf.zeros([bs, comp_len, input_dim], tf.float32)], 1)\n rep_mask_comp = tf.concat([rep_mask, tf.cast(tf.zeros([bs, comp_len], tf.int32), tf.bool)], 1)\n\n rep_tensor_split = tf.reshape(rep_tensor_comp, [bs, block_num, block_len, input_dim]) # bs,bn,bl,d\n rep_mask_split = tf.reshape(rep_mask_comp, [bs, block_num, block_len]) # bs,bn,bl\n\n # non-linear\n rep_map = bn_dense_layer(rep_tensor_split, ivec, True, 0., 'bn_dense_map', activation,\n False, wd, keep_prob, is_train) # bs,bn,bl,vec\n rep_map_tile = tf.tile(tf.expand_dims(rep_map, 2), [1, 1, block_len, 1, 1]) # bs,bn,bl,bl,vec\n # rep_map_dp = dropout(rep_map, keep_prob, is_train)\n bn = block_num\n bl = block_len\n\n with tf.variable_scope('self_attention'):\n # @2.self-attention in block\n # mask generation\n sl_indices = tf.range(block_len, dtype=tf.int32)\n sl_col, sl_row = tf.meshgrid(sl_indices, sl_indices)\n if direction == 'forward':\n direct_mask = tf.greater(sl_row, sl_col) # bl,bl\n else:\n direct_mask = tf.greater(sl_col, sl_row) # bl,bl\n direct_mask_tile = tf.tile(\n tf.expand_dims(tf.expand_dims(direct_mask, 0), 0), [bs, bn, 1, 1]) # bs,bn,bl,bl\n rep_mask_tile_1 = tf.tile(tf.expand_dims(rep_mask_split, 2), [1, 1, bl, 1]) # bs,bn,bl,bl\n rep_mask_tile_2 = tf.tile(tf.expand_dims(rep_mask_split, 3), [1, 1, 1, bl]) # bs,bn,bl,bl\n rep_mask_tile = tf.logical_and(rep_mask_tile_1, rep_mask_tile_2)\n attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile, name='attn_mask') # bs,bn,bl,bl\n\n # attention\n f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.))\n dependent_head = linear(\n rep_map, 2 * ivec, False, 0., 'linear_dependent_head', False, wd, keep_prob, is_train) # bs,bn,bl,2vec\n dependent, head = tf.split(dependent_head, 2, 3)\n dependent_etd = tf.expand_dims(dependent, 2) # bs,bn,1,bl,vec\n head_etd = tf.expand_dims(head, 3) # bs,bn,bl,1,vec\n logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,bn,bl,bl,vec\n logits_masked = exp_mask_for_high_rank(logits, attn_mask)\n attn_score = tf.nn.softmax(logits_masked, 3) # bs,bn,bl,bl,vec\n attn_score = mask_for_high_rank(attn_score, attn_mask) # bs,bn,bl,bl,vec\n self_attn_result = tf.reduce_sum(attn_score * rep_map_tile, 3) # bs,bn,bl,vec\n\n with tf.variable_scope('source2token_self_attn'):\n inter_block_logits = bn_dense_layer(self_attn_result, ivec, True, 0., 'bn_dense_map', 'linear',\n False, wd, keep_prob, is_train) # bs,bn,bl,vec\n inter_block_logits_masked = exp_mask_for_high_rank(inter_block_logits, rep_mask_split) # bs,bn,bl,vec\n inter_block_soft = tf.nn.softmax(inter_block_logits_masked, 2) # bs,bn,bl,vec\n inter_block_attn_output = tf.reduce_sum(self_attn_result * inter_block_soft, 2) # bs,bn,vec\n\n with tf.variable_scope('self_attn_inter_block'):\n inter_block_attn_output_mask = tf.cast(tf.ones([bs, bn], tf.int32), tf.bool)\n block_ct_res = directional_attention_with_dense(\n inter_block_attn_output, inter_block_attn_output_mask, direction, 'disa',\n keep_prob, is_train, wd, activation\n ) # [bs,bn,vec]\n\n block_ct_res_tile = tf.tile(tf.expand_dims(block_ct_res, 2), [1, 1, bl, 1])#[bs,bn,vec]->[bs,bn,bl,vec]\n\n with tf.variable_scope('combination'):\n # input:1.rep_map[bs,bn,bl,vec]; 2.self_attn_result[bs,bn,bl,vec]; 3.rnn_res_tile[bs,bn,bl,vec]\n rep_tensor_with_ct = tf.concat([rep_map, self_attn_result, block_ct_res_tile], -1) # [bs,bn,bl,3vec]\n new_context_and_gate = linear(rep_tensor_with_ct, 2 * ivec, True, 0., 'linear_new_context_and_gate',\n False, wd, keep_prob, is_train) # [bs,bn,bl,2vec]\n new_context, gate = tf.split(new_context_and_gate, 2, 3) # bs,bn,bl,vec\n if activation == \"relu\":\n new_context_act = tf.nn.relu(new_context)\n elif activation == \"elu\":\n new_context_act = tf.nn.elu(new_context)\n elif activation == \"linear\":\n new_context_act = tf.identity(new_context)\n else:\n raise RuntimeError\n gate_sig = tf.nn.sigmoid(gate)\n combination_res = gate_sig * new_context_act + (1 - gate_sig) * rep_map # bs,bn,bl,vec\n\n with tf.variable_scope('restore_original_length'):\n combination_res_reshape = tf.reshape(combination_res, [bs, bn * bl, ivec]) # bs,bn*bl,vec\n output = combination_res_reshape[:, :sl, :]\n return output"
] | [
[
"numpy.ones",
"numpy.append",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.grid",
"numpy.insert",
"matplotlib.pyplot.show",
"numpy.isnan",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.where"
],
[
"tensorflow.nn.tanh",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.variable_scope",
"tensorflow.logical_and",
"tensorflow.concat",
"tensorflow.identity",
"tensorflow.nn.softmax",
"tensorflow.reduce_sum",
"tensorflow.split",
"tensorflow.greater",
"tensorflow.meshgrid",
"tensorflow.constant_initializer",
"tensorflow.shape",
"tensorflow.expand_dims",
"tensorflow.nn.elu",
"tensorflow.cast",
"tensorflow.nn.sigmoid",
"tensorflow.zeros",
"tensorflow.range",
"tensorflow.nn.relu"
]
] |
jbushago/GamestonkTerminal | [
"73a2b419664bf62bbdc59aa8402c8cd6a913a518"
] | [
"gamestonk_terminal/stocks/insider/openinsider_view.py"
] | [
"import itertools\nimport logging\nimport os\nimport textwrap\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom gamestonk_terminal.decorators import log_start_end\nfrom gamestonk_terminal.helper_funcs import (\n export_data,\n patch_pandas_text_adjustment,\n print_rich_table,\n)\nfrom gamestonk_terminal.rich_config import console\nfrom gamestonk_terminal.stocks.insider.openinsider_model import (\n get_open_insider_data,\n get_open_insider_link,\n)\nfrom gamestonk_terminal import rich_config\n\nlogger = logging.getLogger(__name__)\n\nd_open_insider = {\n \"lcb\": \"latest-cluster-buys\",\n \"lpsb\": \"latest-penny-stock-buys\",\n \"lit\": \"latest-insider-trading\",\n \"lip\": \"insider-purchases\",\n \"blip\": \"latest-insider-purchases-25k\",\n \"blop\": \"latest-officer-purchases-25k\",\n \"blcp\": \"latest-ceo-cfo-purchases-25k\",\n \"lis\": \"insider-sales\",\n \"blis\": \"latest-insider-sales-100k\",\n \"blos\": \"latest-officer-sales-100k\",\n \"blcs\": \"latest-ceo-cfo-sales-100k\",\n \"topt\": \"top-officer-purchases-of-the-day\",\n \"toppw\": \"top-officer-purchases-of-the-week\",\n \"toppm\": \"top-officer-purchases-of-the-month\",\n \"tipt\": \"top-insider-purchases-of-the-day\",\n \"tippw\": \"top-insider-purchases-of-the-week\",\n \"tippm\": \"top-insider-purchases-of-the-month\",\n \"tist\": \"top-insider-sales-of-the-day\",\n \"tispw\": \"top-insider-sales-of-the-week\",\n \"tispm\": \"top-insider-sales-of-the-month\",\n}\n\nd_notes = {\n \"A\": \"A: Amended filing\",\n \"D\": \"D: Derivative transaction in filing (usually option exercise)\",\n \"E\": \"E: Error detected in filing\",\n \"M\": \"M: Multiple transactions in filing; earliest reported transaction date & weighted average transaction price\",\n}\n\nd_trade_types = {\n \"S - Sale\": \"[red]S - Sale: Sale of securities on an exchange or to another person[/red]\",\n \"S - Sale+OE\": \"[yellow]S - Sale+OE: Sale of securities \"\n \"on an exchange or to another person (after option exercise)[/yellow]\",\n \"F - Tax\": \"[magenta]F - Tax: Payment of exercise price or \"\n \"tax liability using portion of securities received from the company[/magenta]\",\n \"P - Purchase\": \"[green]P - Purchase: Purchase of securities on \"\n \"an exchange or from another person[/green]\",\n}\n\n\ndef lambda_red_highlight(values) -> List[str]:\n \"\"\"Red highlight\n\n Parameters\n ----------\n values : List[str]\n dataframe values to color\n\n Returns\n ----------\n List[str]\n colored dataframes values\n \"\"\"\n return [f\"[red]{val}[/red]\" for val in values]\n\n\ndef lambda_yellow_highlight(values) -> List[str]:\n \"\"\"Yellow highlight\n\n Parameters\n ----------\n values : List[str]\n dataframe values to color\n\n Returns\n ----------\n List[str]\n colored dataframes values\n \"\"\"\n return [f\"[yellow]{val}[/yellow]\" for val in values]\n\n\ndef lambda_magenta_highlight(values):\n \"\"\"Magenta highlight\n\n Parameters\n ----------\n values : List[str]\n dataframe values to color\n\n Returns\n ----------\n List[str]\n colored dataframes values\n \"\"\"\n return [f\"[magenta]{val}[/magenta]\" for val in values]\n\n\ndef lambda_green_highlight(values):\n \"\"\"Green highlight\n\n Parameters\n ----------\n values : List[str]\n dataframe values to color\n\n Returns\n ----------\n List[str]\n colored dataframes values\n \"\"\"\n return [f\"[green]{val}[/green]\" for val in values]\n\n\n@log_start_end(log=logger)\ndef print_insider_data(type_insider: str, limit: int = 10, export: str = \"\"):\n \"\"\"Print insider data\n\n Parameters\n ----------\n type_insider: str\n Insider type of data\n limit: int\n Limit of data rows to display\n export: str\n Export data format\n \"\"\"\n response = requests.get(f\"http://openinsider.com/{d_open_insider[type_insider]}\")\n soup = BeautifulSoup(response.text, \"html.parser\")\n table = soup.find(\"table\", {\"class\": \"tinytable\"})\n\n if not table:\n console.print(\"No insider information found\", \"\\n\")\n return\n\n table_rows = table.find_all(\"tr\")\n\n res = []\n for tr in table_rows:\n td = tr.find_all(\"td\")\n row = [tr.text.strip() for tr in td if tr.text.strip()]\n res.append(row)\n\n df = pd.DataFrame(res).dropna().head(n=limit)\n columns = [\n \"X\",\n \"Filing Date\",\n \"Trade Date\",\n \"Ticker\",\n \"Company Name\",\n \"Industry\" if type_insider == \"lcb\" else \"Insider Name\",\n \"Title\",\n \"Trade Type\",\n \"Price\",\n \"Qty\",\n \"Owned\",\n \"Diff Own\",\n \"Value\",\n ]\n\n if df.shape[1] == 13:\n df.columns = columns\n else:\n df.columns = columns[1:]\n\n df[\"Filing Date\"] = df[\"Filing Date\"].apply(\n lambda x: \"\\n\".join(textwrap.wrap(x, width=10)) if isinstance(x, str) else x\n )\n df[\"Company Name\"] = df[\"Company Name\"].apply(\n lambda x: \"\\n\".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x\n )\n df[\"Title\"] = df[\"Title\"].apply(\n lambda x: \"\\n\".join(textwrap.wrap(x, width=10)) if isinstance(x, str) else x\n )\n if type_insider == \"lcb\":\n df[\"Industry\"] = df[\"Industry\"].apply(\n lambda x: \"\\n\".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x\n )\n else:\n df[\"Insider Name\"] = df[\"Insider Name\"].apply(\n lambda x: \"\\n\".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x\n )\n\n print_rich_table(\n df,\n headers=[x.title() for x in df.columns],\n show_index=False,\n title=\"Insider Data\",\n )\n\n export_data(export, os.path.dirname(os.path.abspath(__file__)), type_insider, df)\n\n if df.shape[1] == 13:\n l_chars = [list(chars) for chars in df[\"X\"].values]\n l_uchars = np.unique(list(itertools.chain(*l_chars)))\n\n for char in l_uchars:\n console.print(d_notes[char])\n console.print(\"\")\n\n\n@log_start_end(log=logger)\ndef print_insider_filter(\n preset_loaded: str,\n ticker: str,\n limit: int = 10,\n links: bool = False,\n export: str = \"\",\n):\n \"\"\"Print insider filter based on loaded preset. [Source: OpenInsider]\n\n Parameters\n ----------\n preset_loaded : str\n Loaded preset filter\n ticker : str\n Stock ticker\n limit : int\n Limit of rows of data to display\n links : bool\n Flag to show hyperlinks\n export : str\n Format to export data\n \"\"\"\n if ticker:\n link = f\"http://openinsider.com/screener?s={ticker}\"\n else:\n link = get_open_insider_link(preset_loaded)\n\n if not link:\n console.print(\"\")\n return\n\n df_insider = get_open_insider_data(link, has_company_name=bool(not ticker))\n df_insider_orig = df_insider.copy()\n\n if df_insider.empty:\n console.print(\"No insider data found\\n\")\n return\n\n if links:\n df_insider = df_insider[[\"Ticker Link\", \"Insider Link\", \"Filing Link\"]].head(\n limit\n )\n else:\n df_insider = df_insider.drop(\n columns=[\"Filing Link\", \"Ticker Link\", \"Insider Link\"]\n ).head(limit)\n\n if rich_config.USE_COLOR and not links:\n if not df_insider[df_insider[\"Trade Type\"] == \"S - Sale\"].empty:\n df_insider[df_insider[\"Trade Type\"] == \"S - Sale\"] = df_insider[\n df_insider[\"Trade Type\"] == \"S - Sale\"\n ].apply(lambda_red_highlight)\n if not df_insider[df_insider[\"Trade Type\"] == \"S - Sale+OE\"].empty:\n df_insider[df_insider[\"Trade Type\"] == \"S - Sale+OE\"] = df_insider[\n df_insider[\"Trade Type\"] == \"S - Sale+OE\"\n ].apply(lambda_yellow_highlight)\n if not df_insider[df_insider[\"Trade Type\"] == \"F - Tax\"].empty:\n df_insider[df_insider[\"Trade Type\"] == \"F - Tax\"] = df_insider[\n df_insider[\"Trade Type\"] == \"F - Tax\"\n ].apply(lambda_magenta_highlight)\n if not df_insider[df_insider[\"Trade Type\"] == \"P - Purchase\"].empty:\n df_insider[df_insider[\"Trade Type\"] == \"P - Purchase\"] = df_insider[\n df_insider[\"Trade Type\"] == \"P - Purchase\"\n ].apply(lambda_green_highlight)\n\n patch_pandas_text_adjustment()\n pd.set_option(\"display.max_colwidth\", 0)\n pd.set_option(\"display.max_rows\", None)\n\n # needs to be done because table is too large :(\n df_insider = df_insider.drop(columns=[\"Filing Date\", \"Trade Type\"])\n\n else:\n # needs to be done because table is too large :(\n df_insider = df_insider.drop(columns=[\"Filing Date\"])\n\n console.print(\"\")\n print_rich_table(\n df_insider,\n headers=[x.title() for x in df_insider.columns],\n title=\"Insider filtered\",\n )\n\n if export:\n if preset_loaded:\n cmd = \"filter\"\n if ticker:\n cmd = \"lis\"\n\n export_data(export, os.path.dirname(os.path.abspath(__file__)), cmd, df_insider)\n\n if not links:\n l_chars = [list(chars) for chars in df_insider_orig[\"X\"].values]\n l_uchars = np.unique(list(itertools.chain(*l_chars)))\n console.print(\"\")\n for char in l_uchars:\n console.print(d_notes[char])\n\n l_tradetype = df_insider_orig[\"Trade Type\"].values\n l_utradetype = np.unique(l_tradetype)\n console.print(\"\")\n for tradetype in l_utradetype:\n console.print(d_trade_types[tradetype])\n\n console.print(\"\")\n"
] | [
[
"pandas.DataFrame",
"numpy.unique",
"pandas.set_option"
]
] |
drkostas/COSC525-Project2 | [
"a33c786621e6047b0a586c7c3a3b5b85cb51fd6d"
] | [
"main.py"
] | [
"import traceback\nimport argparse\nimport numpy as np\nfrom src import NeuralNetwork, generateExample, getTensorExample\nfrom typing import *\n\n\ndef get_args() -> argparse.Namespace:\n \"\"\"Set-up the argument parser\n\n Returns:\n argparse.Namespace:\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Project 2 for the Deep Learning class (COSC 525). '\n 'Involves the development of a Convolutional Neural Network.',\n add_help=False)\n # Required Args\n required_args = parser.add_argument_group('Required Arguments')\n required_args.add_argument('-d', '--dataset', required=True,\n help=\"The datasets to train the network on. \"\n \"Options: [example1, example2, example3]\")\n # Optional args\n optional_args = parser.add_argument_group('Optional Arguments')\n optional_args.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Show this help message and exit\")\n\n return parser.parse_args()\n\n\ndef main():\n \"\"\"This is the main function of main.py\n\n Example:\n python main.py --dataset example1\n \"\"\"\n\n # Initializing\n args = get_args()\n # Load the configurations\n dataset_type = args.dataset\n if dataset_type in ('example1', 'example2', 'example3'):\n example_num = int(dataset_type[-1])\n inputs, targets, layers = generateExample(example_num)\n getTensorExample(example_num)\n else:\n raise ValueError('Invalid dataset type')\n\n # ------- Start of Code ------- #\n # # Initialize the network # #\n netWork = NeuralNetwork(input_size=inputs.shape, loss_function=\"square_error\",\n learning_rate=100, input_channels=1)\n # Add layers\n for layer in layers:\n if layer['type'] == 'Conv':\n weights = []\n for k_ind in range(layer['num_kernels']):\n kernels = [k_w.flatten() for k_w in layer['weights'][k_ind]]\n kernel_weights = np.concatenate((*kernels,\n layer['biases'][k_ind]))\n weights.append(kernel_weights)\n weights = np.array(weights)\n netWork.addConvLayer(num_kernels=layer['num_kernels'],\n kernel_size=layer['kernel_size'],\n activation=layer['activation'],\n weights=weights)\n elif layer['type'] == 'Flat':\n netWork.addFlattenLayer()\n elif layer['type'] == 'MaxPool':\n netWork.addMaxPoolLayer(kernel_size=layer['kernel_size'])\n elif layer['type'] == 'Dense':\n weights = np.array([np.concatenate((layer['weights'].flatten(), layer['bias']))])\n netWork.addFCLayer(num_neurons=targets.shape[0],\n activation=layer['activation'],\n weights=weights)\n else:\n raise ValueError(f'Invalid layer type: {layer[\"type\"]}')\n\n # # Train the network # #\n # First Feed forward\n outputs = netWork.calculate(inputs=inputs)\n print(\"----------- Custom Model -----------\")\n print(f\"model output before:\\n{outputs}\")\n\n # Calculate Loss derivative\n loss_der = netWork.loss_derivative(outputs, targets)\n loss = netWork.calculate_loss(np.array([inputs]), targets)\n netWork.train(np.array([inputs]), targets) # Train the network\n\n outputs = netWork.calculate(inputs=inputs)\n print(f\"model output after: \\n{outputs}\")\n\n if example_num == 1:\n print('1st convolutional layer, kernel weights:')\n print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))\n print('1st convolutional layer, kernel bias:')\n print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))\n print('fully connected layer weights:')\n\n print(netWork.layers[2].neurons[0].weights[:-1])\n print('fully connected layer bias:')\n print(np.array([netWork.layers[2].neurons[0].weights[-1]]))\n elif example_num == 2:\n print('1st convolutional layer, 1st kernel weights:')\n print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))\n print('1st convolutional layer, 1st kernel bias:')\n print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))\n\n print('1st convolutional layer, 2st kernel weights:')\n print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))\n print('1st convolutional layer, 2st kernel bias:')\n print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))\n\n print('2nd convolutional layer, 1st kernel weights:')\n print(netWork.layers[1].kernels[0][0][0].weights[:-1].reshape((2, 3, 3)))\n print('2nd convolutional layer, 1st kernel bias:')\n print(np.array([netWork.layers[1].kernels[0][0][0].weights[-1]]))\n\n print('fully connected layer weights:')\n\n print(netWork.layers[3].neurons[0].weights[:-1])\n print('fully connected layer bias:')\n print(np.array([netWork.layers[3].neurons[0].weights[-1]]))\n elif example_num == 3:\n print('1st convolutional layer, 1st kernel weights:')\n print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))\n print('1st convolutional layer, 1st kernel bias:')\n print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))\n\n print('1st convolutional layer, 2st kernel weights:')\n print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))\n print('1st convolutional layer, 2st kernel bias:')\n print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))\n\n print('fully connected layer weights:')\n\n print(netWork.layers[3].neurons[0].weights[:-1])\n print('fully connected layer bias:')\n print(np.array([netWork.layers[3].neurons[0].weights[-1]]))\n else:\n raise ValueError(f'Invalid example number: {example_num}')\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n print(str(e) + '\\n' + str(traceback.format_exc()))\n raise e\n\n# # First Layer (Convolutional)\n# weights_L1 = np.array(\n# [np.concatenate((l1k1.flatten(), l1b1)), np.concatenate((l1k2.flatten(), l1b2))])\n# netWork.addConvLayer(num_kernels=2, kernel_size=3, activation=\"logistic\", weights=weights_L1)\n# # Second Layer (Convolutional)\n# weights_L2 = np.array([np.concatenate((l2c1.flatten(), l2c2.flatten(), l2b))])\n# netWork.addConvLayer(num_kernels=1, kernel_size=3, activation=\"logistic\", weights=weights_L2)\n# # Third Layer (Fully Connected)\n# netWork.addFlattenLayer()\n# weights_L3 = np.array([np.concatenate((l3.flatten(), l3b))])\n# netWork.addFCLayer(num_neurons=1, activation=\"logistic\", weights=weights_L3)\n"
] | [
[
"numpy.array",
"numpy.concatenate"
]
] |
suo/pytext | [
"400c80b4c040de12028970a85ce0af864931e0f4"
] | [
"pytext/trainers/trainer.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport itertools\nimport time\nfrom contextlib import ExitStack as contextlib_ExitStack\nfrom typing import Any, Iterable, List, Optional, Tuple\n\nimport torch\nfrom pytext.common.constants import BatchContext, Stage\nfrom pytext.config import PyTextConfig\nfrom pytext.config.component import (\n Component,\n ComponentType,\n create_optimizer,\n create_scheduler,\n create_sparsifier,\n)\nfrom pytext.config.pytext_config import ConfigBase\nfrom pytext.data.data_handler import BatchIterator\nfrom pytext.metric_reporters import MetricReporter\nfrom pytext.models.distributed_model import DistributedModel\nfrom pytext.models.model import Model\nfrom pytext.optimizer import Adam, Optimizer, learning_rates\nfrom pytext.optimizer.scheduler import Scheduler\nfrom pytext.optimizer.sparsifier import Sparsifier\nfrom pytext.task.serialize import save\nfrom pytext.trainers.training_state import TrainingState\nfrom pytext.utils import cuda, precision, timing\n\n\nclass TrainerBase(Component):\n __COMPONENT_TYPE__ = ComponentType.TRAINER\n\n\ndef cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n \"\"\"Like itertools.cycle, but will call iter on the original iterable instead.\n This limits it to not be able to run on say raw generators, but also doesn't\n store a copy of the iterable in memory for repetition.\"\"\"\n while True:\n yield from iterator\n\n\ndef maybe_accumulate_gradients(exit_stack, model, index, sample_size):\n # index == sample_size - 1 represents the last backward pass\n if (\n cuda.DISTRIBUTED_WORLD_SIZE > 1\n and hasattr(model, \"no_sync\")\n and index < sample_size - 1\n ):\n \"\"\"\n Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),\n we want to accumulate gradients locally and only call all-reduce in the\n last backwards pass.\n \"\"\"\n exit_stack.enter_context(model.no_sync())\n\n if precision._FP16_ENABLED and index < sample_size - 1:\n \"\"\"\n Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),\n we want to accumulate gradients in FP16 parameters (e.g delay unscale)\n and only unscale to FP32 parameters after the last backward pass.\n \"\"\"\n exit_stack.enter_context(precision.delay_unscale())\n\n\nclass Trainer(TrainerBase):\n \"\"\"\n Base Trainer class that provide ways to\n 1 Train model, compute metrics against eval set and use the metrics for\n model selection.\n 2 Test trained model, compute and publish metrics against a blind test set.\n\n Attributes:\n epochs (int): Training epochs\n early_stop_after (int): Stop after how many epochs when the eval metric\n is not improving\n max_clip_norm (Optional[float]): Clip gradient norm if set\n report_train_metrics (bool): Whether metrics on training data should be\n computed and reported.\n target_time_limit_seconds (float): Target time limit for training in seconds. If\n the expected time to train another epoch exceeds this limit, stop training.\n \"\"\"\n\n class Config(ConfigBase):\n #: Training epochs\n epochs: int = 10\n #: Stop after how many epochs when the eval metric is not improving\n early_stop_after: int = 0\n #: Clip gradient norm if set\n max_clip_norm: Optional[float] = None\n #: Whether metrics on training data should be computed and reported.\n report_train_metrics: bool = True\n #: Target time limit for training, default (None) to no time limit.\n target_time_limit_seconds: Optional[int] = None\n #: Whether to do evaluation and model selection based on it.\n do_eval: bool = True\n #: Number of samples for logging training progress.\n num_samples_to_log_progress: int = 1000\n #: Number of forward & backward per batch before update gradients, the\n #: actual_batch_size = batch_size x num_accumulated_batches\n num_accumulated_batches: int = 1\n #: Define epoch as a fixed number of batches. Subsequent epochs will continue\n #: to iterate through the data, cycling through it when they reach the end.\n #: If not set, use exactly one pass through the dataset as one epoch.\n #: This configuration only affects the train epochs, test and eval\n #: will always test their entire datasets.\n num_batches_per_epoch: Optional[int] = None\n #: config for optimizer, used in parameter update\n optimizer: Optimizer.Config = Adam.Config()\n scheduler: Optional[Scheduler.Config] = None\n sparsifier: Optional[Sparsifier.Config] = None\n\n def __init__(self, config: Config, model: torch.nn.Module):\n if config.early_stop_after > 0:\n assert config.do_eval, \"can't do early stopping when not running evalution\"\n optimizer: torch.optim.Optimizer = create_optimizer(config.optimizer, model)\n self.scheduler: torch.optim.lr_scheduler = (\n create_scheduler(config.scheduler, optimizer)\n if config.scheduler\n else Scheduler()\n )\n self.sparsifier: Sparsifier = (\n create_sparsifier(config.sparsifier) if config.sparsifier else Sparsifier()\n )\n model, self.optimizer = precision.initialize(model, optimizer)\n self.config = config\n\n @classmethod\n def from_config(cls, config: Config, model: torch.nn.Module, *args, **kwargs):\n return cls(config, model)\n\n @timing.time(\"Trainer.test\")\n def test(self, test_iter, model, metric_reporter: MetricReporter):\n state = TrainingState(stage=Stage.TEST, model=model, epoch=1)\n if cuda.CUDA_ENABLED:\n state.model.cuda()\n state.model.eval()\n with torch.no_grad():\n return self.run_epoch(state, test_iter, metric_reporter)\n\n @timing.time(\"pre-training\")\n def set_up_training(self, state: TrainingState, training_data: BatchIterator):\n if cuda.CUDA_ENABLED:\n state.model.cuda()\n state.scheduler.prepare(training_data, self.config.epochs)\n\n if cuda.DISTRIBUTED_WORLD_SIZE > 1:\n device_id = torch.cuda.current_device()\n state.model = DistributedModel(\n module=state.model,\n device_ids=[device_id],\n output_device=device_id,\n broadcast_buffers=False,\n find_unused_parameters=state.model.find_unused_parameters,\n )\n state.start_time = time.time()\n\n if self.config.num_batches_per_epoch:\n # Set the training_data iterator to cycle, so it will never run out,\n # but rather after reaching the end will loop back to the beginning.\n training_data = cycle(training_data)\n return training_data\n\n @timing.time(\"zero gradients\")\n def zero_grads(self, state):\n if state.stage != Stage.TRAIN:\n return\n state.optimizer.zero_grad()\n\n @timing.time(\"backprop\")\n def backprop(self, state, loss):\n if state.stage != Stage.TRAIN:\n return\n\n with timing.time(\"loss.backward\"):\n precision.backward(state.optimizer, loss)\n\n @timing.time(\"optimizer\")\n def optimizer_step(self, state):\n if state.stage != Stage.TRAIN:\n return\n\n state.scheduler.step_batch()\n\n if self.config.max_clip_norm is not None:\n grad_norm = precision.clip_grad_norm(\n state.model, state.optimizer, self.config.max_clip_norm\n )\n else:\n grad_norm = None\n\n with timing.time(\"optimizer.step\"):\n state.optimizer.step()\n\n state.step_counter += 1\n # grad_norm could be used to check grads sync in distributed training\n return grad_norm\n\n @timing.time(\"sparsifier\")\n def sparsification_step(self, state):\n # sparsification only if sparifier is used\n if not self.config.sparsifier:\n return\n\n if state.stage != Stage.TRAIN:\n return\n\n if state.sparsifier.sparsification_condition(state):\n state.sparsifier.sparsify(state)\n\n if state.rank == 0:\n current_sparsity = state.sparsifier.get_current_sparsity(state.model)\n print(f\"sparsity in the model: {current_sparsity}\")\n\n def continue_training(self, state: TrainingState) -> bool:\n # Are we done?\n if state.epoch >= self.config.epochs:\n return False\n\n # Check whether the model has improved recently enough\n # Only do this if we're bothering to evaluate the model\n if self.config.do_eval and state.epochs_since_last_improvement >= (\n self.config.early_stop_after or float(\"inf\")\n ):\n print(\n f\"Worker {state.rank}: Eval metric hasn't changed for \"\n + f\"{state.epochs_since_last_improvement} epochs. Stopping now.\"\n )\n return False\n\n # Check whether we think the next epoch will put us over the configured\n # time limit.\n epochs_run = state.epoch + 1\n time_elapsed = time.time() - state.start_time\n mean_epoch_time = time_elapsed / epochs_run\n expected_next_epoch_time = time_elapsed + mean_epoch_time\n target_time_limit = (\n float(\"inf\")\n if self.config.target_time_limit_seconds is None\n else self.config.target_time_limit_seconds\n )\n if expected_next_epoch_time > target_time_limit:\n print(\n f\"Worker {state.rank}: Stopping training after {epochs_run} epochs \"\n f\"and {int(time_elapsed)} seconds, due to the target max training \"\n f\"time of {self.config.target_time_limit_seconds} seconds.\"\n )\n return False\n\n return True\n\n def update_best_model(\n self, state: TrainingState, train_config: PyTextConfig, eval_metric\n ):\n # This should be updated by all workers so they agree on when to stop training\n # when `early_stop_after` is specified.\n state.epochs_since_last_improvement = 0\n state.best_model_metric = eval_metric\n print(f\"Found a better model!\")\n\n # Only one worker should save checkpoints\n if state.rank != 0:\n return\n\n model_state = state.model.state_dict()\n # save to cpu to avoid multiple model copies in gpu memory\n if cuda.CUDA_ENABLED:\n for key, parameter in model_state.items():\n model_state[key] = parameter.cpu()\n state.best_model_state = model_state\n\n @timing.time(\"save checkpoint\")\n def save_checkpoint(self, state: TrainingState, train_config: PyTextConfig) -> str:\n # Only one worker should save checkpoints\n if state.rank != 0:\n return\n\n if train_config.save_module_checkpoints or train_config.save_all_checkpoints:\n # saves per-epoch sub-modules when save_all_checkpoints or\n # save_module_checkpoints is enabled\n state.model.save_modules(\n base_path=train_config.modules_save_dir, suffix=f\"-ep{state.epoch}\"\n )\n if state.epochs_since_last_improvement == 0:\n # state.epochs_since_last_improvement == 0 means found a better\n # model in current epoch, thus update best model's sub-modules\n state.model.save_modules(base_path=train_config.modules_save_dir)\n\n # next to add new config and implementation of frequency on checkpointing\n if train_config.save_all_checkpoints:\n return save(\n config=train_config,\n model=state.model,\n meta=None,\n tensorizers=None,\n training_state=state,\n identifier=str(state.epoch),\n )\n\n def load_best_model(self, state: TrainingState):\n if cuda.CUDA_ENABLED:\n # Move current model to CPU to avoid multiple models in GPU memory\n state.model.cpu()\n state.model.load_state_dict(\n {k: v.cuda() for k, v in state.best_model_state.items()}\n )\n # Move model back to GPU\n state.model.cuda()\n else:\n state.model.load_state_dict(state.best_model_state)\n\n def train(\n self,\n training_data: BatchIterator,\n eval_data: BatchIterator,\n model: Model,\n metric_reporter: MetricReporter,\n train_config: PyTextConfig,\n rank: int = 0,\n ) -> Tuple[torch.nn.Module, Any]:\n \"\"\"\n Train and eval a model, the model states will be modified.\n Args:\n train_iter (BatchIterator): batch iterator of training data\n eval_iter (BatchIterator): batch iterator of evaluation data\n model (Model): model to be trained\n metric_reporter (MetricReporter): compute metric based on training\n output and report results to console, file.. etc\n train_config (PyTextConfig): training config\n training_result (Optional): only meaningful for Hogwild training. default\n is None\n rank (int): only used in distributed training, the rank of the current\n training thread, evaluation will only be done in rank 0\n\n Returns:\n model, best_metric: the trained model together with the best metric\n \"\"\"\n state = TrainingState(\n model=model,\n optimizer=self.optimizer,\n scheduler=self.scheduler,\n sparsifier=self.sparsifier,\n rank=rank,\n )\n return self.train_from_state(\n state, training_data, eval_data, metric_reporter, train_config\n )\n\n @timing.time(\"Trainer.train_from_state\")\n def train_from_state(\n self,\n state: TrainingState,\n training_data: BatchIterator,\n eval_data: BatchIterator,\n metric_reporter: MetricReporter,\n train_config: PyTextConfig,\n ) -> Tuple[torch.nn.Module, Any]:\n \"\"\"\n Train and eval a model from a given training state will be modified.\n This function iterates epochs specified in config, and for each epoch do:\n\n 1. Train model using training data, aggregate and report training results\n 2. Adjust learning rate if scheduler is specified\n 3. Evaluate model using evaluation data\n 4. Calculate metrics based on evaluation results and select best model\n\n Args:\n training_state (TrainingState): contrains stateful information to be\n able to restore a training job\n train_iter (BatchIterator): batch iterator of training data\n eval_iter (BatchIterator): batch iterator of evaluation data\n model (Model): model to be trained\n metric_reporter (MetricReporter): compute metric based on training\n output and report results to console, file.. etc\n train_config (PyTextConfig): training config\n\n Returns:\n model, best_metric: the trained model together with the best metric\n \"\"\"\n training_data = self.set_up_training(state, training_data)\n model = state.model\n rank = state.rank\n trainable_params = sum(\n p.numel() for p in state.model.parameters() if p.requires_grad\n )\n print(f\"Num trainable parameters: {trainable_params}\")\n\n while self.continue_training(state):\n state.epoch += 1\n state.epochs_since_last_improvement += 1\n lrs = learning_rates(state.optimizer)\n print(f\"\\nWorker {state.rank} starting epoch {state.epoch}\")\n print(f\"Learning rate(s): {', '.join(map(str, lrs))}\")\n\n with timing.time(\"train epoch\"):\n state.stage = Stage.TRAIN\n state.model.train()\n print(f\"start training epoch {state.epoch}\")\n epoch_data = training_data\n if self.config.num_batches_per_epoch:\n # We want to limit the number of batches in the epoch;\n # equivalent to epoch_data[:num_batches_per_epoch] for iterators.\n # In this case we set the training data iterator to cycle earlier\n # in the training process, so when it reaches the end it will\n # loop back to the beginning.\n epoch_data = itertools.islice(\n epoch_data, self.config.num_batches_per_epoch\n )\n self.run_epoch(state, epoch_data, metric_reporter)\n\n if not self.config.do_eval:\n continue\n\n with timing.time(\"eval epoch\"):\n state.stage = Stage.EVAL\n model.eval(Stage.EVAL)\n print(f\"start evaluating epoch {state.epoch}\")\n with torch.no_grad():\n eval_metric = self.run_epoch(state, eval_data, metric_reporter)\n\n # Step the learning rate scheduler(s)\n assert eval_metric is not None\n state.scheduler.step_epoch(\n metrics=metric_reporter.get_model_select_metric(eval_metric),\n epoch=state.epoch,\n )\n\n # Did we train a better model?\n better_model = metric_reporter.compare_metric(\n eval_metric, state.best_model_metric\n )\n if better_model:\n self.update_best_model(state, train_config, eval_metric)\n if better_model or train_config.save_all_checkpoints:\n self.save_checkpoint(state, train_config)\n\n if self.optimizer.finalize():\n state.stage = Stage.EVAL\n model.eval(Stage.EVAL)\n print(f\"start evaluating finalized state\")\n with torch.no_grad():\n eval_metric = self.run_epoch(state, eval_data, metric_reporter)\n better_model = metric_reporter.compare_metric(\n eval_metric, state.best_model_metric\n )\n if better_model:\n self.update_best_model(state, train_config, eval_metric)\n if better_model or train_config.save_all_checkpoints:\n self.save_checkpoint(state, train_config)\n # Only bother loading the best model for master worker\n if rank == 0 and state.best_model_state is not None:\n self.load_best_model(state)\n\n return state.model, state.best_model_metric\n\n @timing.report_snapshot\n def run_epoch(\n self, state: TrainingState, data: BatchIterator, metric_reporter: MetricReporter\n ):\n # This method is due for some refactoring, pushing it off because it interacts\n # with the metric reporter too much. Much of the logic here either changes in\n # the NewTaskTrainer or should change with a better metric reporter design.\n report_metric = state.stage != Stage.TRAIN or self.config.report_train_metrics\n model = state.model\n samples = []\n\n \"\"\"\n Sometimes, a batch of inputs is too large to fit into GPU, which has to\n be split into several micro-batches. However, to improve efficiency,\n it would be helpful to only apply params/gradients sync at original batch\n boundaries instead of micro-batch boundaries.\n num_accumulated_batches specified the number of accumulating gradients\n locally before sync gradients, total training_batch_size =\n train_batch_size x num_accumulated_batches and it will improve the system\n performance by reduce the total network transfer bytes.\n \"\"\"\n for sample in enumerate(data):\n samples.append(sample)\n if (\n state.stage != Stage.TRAIN\n or len(samples) == self.config.num_accumulated_batches\n ):\n self.run_step(samples, state, metric_reporter, report_metric)\n samples = []\n if samples:\n self.run_step(samples, state, metric_reporter, report_metric)\n samples = []\n\n metrics = None\n if report_metric:\n with timing.time(\"report metrics\"):\n metrics = metric_reporter.report_metric(\n model, state.stage, state.epoch, print_to_channels=(state.rank == 0)\n )\n else:\n metric_reporter._reset()\n\n return metrics\n\n @timing.time(\"run_step\")\n def run_step(\n self,\n samples: List[Any],\n state: TrainingState,\n metric_reporter: MetricReporter,\n report_metric: bool,\n ):\n sample_size = len(samples)\n assert sample_size <= self.config.num_accumulated_batches\n\n model = state.model\n self.zero_grads(state)\n for idx, (batch_id, (inputs, targets, context)) in enumerate(samples):\n with contextlib_ExitStack() as exit_stack:\n maybe_accumulate_gradients(exit_stack, model, idx, sample_size)\n # pass context to model to use in forward call if needed\n model.contextualize(context)\n with timing.time(\"model.forward\"):\n logits = model(*inputs)\n\n with timing.time(\"compute loss\"):\n loss = precision.maybe_float(\n model.get_loss(logits, targets, context)\n )\n if BatchContext.IGNORE_LOSS in context:\n loss *= 0\n elif sample_size > 1:\n # gradients averaged per batch and accumulated across samples.\n # divide sample_size to let gradients averaged per example\n loss = loss / sample_size\n\n self.backprop(state, loss)\n\n if report_metric:\n with timing.time(\"get pred\"):\n preds, scores = model.get_pred(\n logits, targets, context, state.stage, *inputs\n )\n\n with timing.time(\"add metrics\"):\n metric_reporter.add_batch_stats(\n batch_id, preds, targets, scores, loss.item(), inputs, **context\n )\n\n if batch_id % self.config.num_samples_to_log_progress == 0:\n print(\n f\"Running batch {batch_id} for epoch {state.epoch} in {state.stage} stage\",\n flush=True,\n )\n # update gradients after len(samples) forward & backward\n self.optimizer_step(state)\n self.sparsification_step(state)\n\n\nclass TaskTrainer(Trainer):\n __EXPANSIBLE__ = True\n\n class Config(Trainer.Config):\n \"\"\"Make mypy happy\"\"\"\n\n @timing.time(\"run_step\")\n def run_step(\n self,\n samples: List[Any],\n state: TrainingState,\n metric_reporter: MetricReporter,\n report_metric: bool,\n ):\n \"\"\"Our run_step is a bit different, because we're wrapping the model forward\n call with model.train_batch, which arranges tensors and gets loss, etc.\n\n Whenever \"samples\" contains more than one mini-batch (sample_size > 1),\n we want to accumulate gradients locally and only call all-reduce in the\n last backwards pass.\n \"\"\"\n sample_size = len(samples)\n assert sample_size <= self.config.num_accumulated_batches\n\n model = state.model\n self.zero_grads(state)\n for idx, (batch_id, (raw_batch, batch)) in enumerate(samples):\n with contextlib_ExitStack() as exit_stack:\n # enter ddp no_sync context and fp16 delay_scale context if needed\n maybe_accumulate_gradients(exit_stack, model, idx, sample_size)\n with timing.time(\"model.train_batch\"):\n loss, metric_data = model.train_batch(model, batch, state)\n if sample_size > 1:\n # gradients averaged per batch and accumulated across samples.\n # divide sample_size to let gradients averaged per example\n loss = loss / sample_size\n self.backprop(state, loss)\n\n if report_metric:\n with timing.time(\"add metrics\"):\n metric_reporter.add_batch_stats(\n batch_id,\n *metric_data,\n # TODO merge this step into add_batch_stats once all data\n # migration is done\n **metric_reporter.batch_context(raw_batch, batch),\n )\n if batch_id % self.config.num_samples_to_log_progress == 0:\n metric_reporter.report_realtime_metric(state.stage)\n # update gradients after #len(samples) forward & backward\n self.optimizer_step(state)\n self.sparsification_step(state)\n\n def _prepare_scheduler(self, training_batches, scheduler=None):\n \"\"\"Batch based schedulers require knowing the number of batches in\n the data. We're not supporting that yet with the Data api, need to figure out\n how to expose this info or restructure batch-based schedulers to not need it.\"\"\"\n if scheduler.batch_based_schedulers:\n raise Exception(\"New tasks don't yet support batch-based scheduling\")\n return scheduler\n"
] | [
[
"torch.no_grad",
"torch.cuda.current_device"
]
] |
a1600012888/fairseq | [
"dbd2cd08fc396f919d2e737513095fcb966896c0"
] | [
"fairseq/criterions/masked_adlm.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\n\nimport torch\nimport torch.nn.functional as F\n\nfrom fairseq import metrics, utils\nfrom fairseq.criterions import FairseqCriterion, register_criterion\n\n\n@register_criterion('masked_adlm')\nclass MaskedAdLmLoss(FairseqCriterion):\n \"\"\"\n Implementation for the loss used in masked language model (MLM) training.\n \"\"\"\n\n\n def __init__(self, args, task):\n super(MaskedAdLmLoss, self).__init__(args, task)\n\n self.vocab = self.task.source_dictionary\n print(len(self.vocab.count))\n self.register_buffer('margins', torch.zeros((len(self.vocab.count), 1)))\n self.margins.requires_grad = False\n\n self.margin_lambda = args.margin_lambda\n self.margin_lr = args.margin_lr\n self.margin_norm = args.margin_norm\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add criterion-specific arguments to the parser.\"\"\"\n super(MaskedAdLmLoss,\n MaskedAdLmLoss).add_args(parser)\n parser.add_argument('--margin_lambda', default=0.5, type=float, metavar='D',\n help='weight for the adaptive margin loss')\n parser.add_argument('--margin_lr', default=0.0001, type=float, metavar='D',\n help='weight for the adaptive margin loss')\n parser.add_argument('--margin-norm', default='l1', type=str,\n help='Type of margin norm in the loss')\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n # compute MLM loss\n #self.margins.requires_grad = model.training\n\n masked_tokens = sample['target'].ne(self.padding_idx)\n sample_size = masked_tokens.int().sum().item()\n\n # (Rare case) When all tokens are masked, the model results in empty\n # tensor and gives CUDA error.\n if sample_size == 0:\n masked_tokens = None\n\n logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0]\n targets = model.get_targets(sample, [logits])\n\n #import IPython\n #IPython.embed()\n if sample_size != 0:\n targets = targets[masked_tokens]\n\n\n # targets shape: [x]\n # logits.shape: [x, 32769]\n one_hot = F.one_hot(targets, len(self.vocab.count)) # [x, 32769]\n\n #import IPython\n #IPython.embed()\n\n m = F.embedding(targets, self.margins) # [x, 1]\n #m = self.margins(targets).squeeze(dim=-1)\n margin = m * one_hot # [x, 32769]\n\n #import IPython\n #IPython.embed()\n\n logits_minus_margin = logits - margin\n log_softmax = F.log_softmax(\n logits_minus_margin.view(-1, logits.size(-1)),\n dim=-1,\n dtype=torch.float32,\n ) # [x, 32769]\n\n\n adm_loss = F.nll_loss(\n log_softmax, \n targets.view(-1),\n reduction='sum',\n ignore_index=self.padding_idx,\n )\n\n # cal margin grad\n with torch.no_grad():\n margin_log_grad = torch.gather(log_softmax.detach(), dim=-1,\n index=targets.unsqueeze(-1)) # [x, 1]\n margin_grad_cross = torch.exp(margin_log_grad) - \\\n torch.ones_like(margin_log_grad)\n\n if self.margin_norm == 'l1':\n margin_grad = margin_grad_cross - torch.ones_like(m) * self.margin_lambda\n else:\n # l2 norm\n margin_grad = margin_grad_cross - m * self.margin_lambda * 2.0\n margin_update = -1.0 * margin_grad * self.margin_lr\n\n self.margins.scatter_add_(0, targets.unsqueeze(-1), margin_update.half())\n\n # for logging below! margin_norm; normal loss\n margin_norm = torch.mean(self.margins) * sample['nsentences']# used for log!\n\n normal_loss = F.nll_loss(\n F.log_softmax(\n logits.view(-1, logits.size(-1)),\n dim=-1,\n dtype=torch.float32,\n ),\n targets.view(-1),\n reduction='sum',\n ignore_index=self.padding_idx,\n )\n\n logging_output = {\n 'loss': utils.item(normal_loss.data) if reduce else normal_loss.data,\n 'margin_n':utils.item(margin_norm.data) if reduce else margin_norm.data,\n 'ntokens': sample['ntokens'],\n 'nsentences': sample['nsentences'],\n 'sample_size': sample_size,\n 'admloss': utils.item(adm_loss.data) if reduce else adm_loss.data,\n }\n return adm_loss, sample_size, logging_output\n\n @staticmethod\n def reduce_metrics(logging_outputs) -> None:\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n admloss_sum = sum(log.get('admloss', 0) for log in logging_outputs)\n margin_n = sum(log.get('margin_n', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)\n metrics.log_scalar('admloss', admloss_sum / sample_size / math.log(2), sample_size, round=3)\n metrics.log_scalar('margin_norm', margin_n / nsentences, 32, round=3)\n metrics.log_derived('ppl', lambda meters: round(2**meters['loss'].avg, 3))\n\n @staticmethod\n def logging_outputs_can_be_summed() -> bool:\n \"\"\"\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n \"\"\"\n return True\n"
] | [
[
"torch.ones_like",
"torch.nn.functional.embedding",
"torch.no_grad",
"torch.exp",
"torch.mean"
]
] |
terra-submersa/opensfm-camera-coverage | [
"a9ad2bff799a5d0d07d7900fc7d1bf10bc489632"
] | [
"src/odm_report_shot_coverage/models/reconstruction.py"
] | [
"import json\nimport logging\n\nimport geojson\nimport numpy as np\nfrom tqdm import tqdm\nfrom scipy import stats\n\nfrom odm_report_shot_coverage.models.camera import Camera, json_parse_camera\nfrom odm_report_shot_coverage.models.shot import Shot, shot_boundaries_from_points, Boundaries\nfrom odm_report_shot_coverage.models.wavefront_25d import Wavefront25D, parse_wavefront_25d_obj\n\n\nclass Reconstruction:\n cameras: 'dict[str, Camera]' = {}\n _shots: 'list[Shot]' = []\n mesh = Wavefront25D\n orthophoto_boundaries: Boundaries\n\n @property\n def shots(self) -> 'list[Shot]':\n self._shots.sort(key=lambda s: s.image_name)\n return self._shots\n\n def add_camera(self, name: str, camera: Camera):\n self.cameras[name] = camera\n\n def add_shot(self, shot: Shot):\n self._shots.append(shot)\n\n def to_json(self) -> dict:\n return {\n 'cameras': {n: c.to_json() for n, c in self.cameras.items()},\n 'shots': [s.to_json() for s in self.shots],\n # 'mesh': self.mesh.to_json(),\n 'boundaries': self.mesh.boundaries.to_json(),\n 'orthophotoBoundaries': self.orthophoto_boundaries.to_json(),\n }\n\n def compute_shot_boundaries(self):\n \"\"\"\n From shots and points, fill the shot_boundaries\n :rtype: None\n \"\"\"\n\n for shot in tqdm(self.shots, desc='Computing shot boundaries'):\n points = []\n for i, point in enumerate(self.mesh.points):\n pixel = shot.camera_pixel(point)\n if shot.camera.in_frame(pixel):\n points.append(point)\n shot.boundaries = shot_boundaries_from_points(points)\n\n def find_camera_by_width_height(self, width: int, height: int) -> Camera:\n cs = [c for c in self.cameras.values() if c.width == width and c.height == height]\n if len(cs) != 1:\n raise Exception('Not exactly one camera found with size %s x %s' % (width, height))\n return cs[0]\n\n\nclass ReconstructionCollection:\n reconstructions: 'list[Reconstruction]' = []\n\n def append(self, reconstruction: Reconstruction):\n self.reconstructions.append(reconstruction)\n\n def __getitem__(self, i: int):\n return self.reconstructions[i]\n\n def __len__(self):\n return len(self.reconstructions)\n\n\ndef lin_reg(pairs: 'list[(float, float)]') -> (float, float, float, float):\n x = [p[0] for p in pairs]\n y = [p[1] for p in pairs]\n return stats.linregress(x, y)\n\n\ndef _parse_point_cloud_boundaries(path: str) -> Boundaries:\n with open('%s/odm_report/stats.json' % path, 'r') as fd:\n stats_json = json.load(fd)\n bbox = stats_json['point_cloud_statistics']['stats']['bbox']['native']['bbox']\n return Boundaries(\n x_min=bbox['minx'],\n x_max=bbox['maxx'],\n y_min=bbox['miny'],\n y_max=bbox['maxy'],\n z_min=bbox['minz'],\n z_max=bbox['maxz'],\n )\n\n\ndef _parse_camera_shotgeojson(path: str, reconstruction: Reconstruction, native_to_25d_coordinates):\n with open('%s/cameras.json' % path, 'r') as fd:\n cameras_json = json.load(fd)\n for n, j in cameras_json.items():\n camera = json_parse_camera(n, j)\n reconstruction.add_camera(n, camera)\n\n (tr_x, tr_y, tr_z) = native_to_25d_coordinates\n with open('%s/odm_report/shots.geojson' % path, 'r') as fd:\n shots_geojson = geojson.load(fd)\n for feat in shots_geojson['features']:\n shot = Shot()\n props = feat['properties']\n shot.camera = reconstruction.find_camera_by_width_height(props['width'], props['height'])\n shot.image_name = props['filename']\n translation = props['translation']\n shot.translation = (tr_x(translation[0]), tr_y(translation[1]), tr_z(translation[2]))\n shot.rotation = props['rotation']\n reconstruction.add_shot(shot)\n\n\ndef _native_to_model_25d_coordinates(native_boundaries: Boundaries, model_25d_boundaries: Boundaries):\n width_25d = model_25d_boundaries.x_max - model_25d_boundaries.x_min\n height_25d = model_25d_boundaries.y_max - model_25d_boundaries.y_min\n elevation_25d = model_25d_boundaries.y_max - model_25d_boundaries.y_min\n width_native = native_boundaries.x_max - native_boundaries.x_min\n height_native = native_boundaries.y_max - native_boundaries.y_min\n elevation_native = native_boundaries.y_max - native_boundaries.y_min\n width_ratio = np.abs(1 - width_native / width_25d)\n height_ratio = np.abs(1 - height_native / height_25d)\n elevation_ratio = np.abs(1 - elevation_native / elevation_25d)\n logging.info(\n 'native/25d model boundaries discrepancies width=%.2f%% height=%.2f%% elevation=%.2f%%' % (\n width_ratio * 100, height_ratio * 100, elevation_ratio * 100))\n\n return (\n lambda x: (x - (native_boundaries.x_max + native_boundaries.x_min) / 2) + (\n model_25d_boundaries.x_max + model_25d_boundaries.x_min) / 2,\n lambda y: (y - (native_boundaries.y_max + native_boundaries.y_min) / 2) + (\n model_25d_boundaries.y_max + model_25d_boundaries.y_min) / 2,\n lambda z: (z - (native_boundaries.z_max + native_boundaries.z_min) / 2) + (\n model_25d_boundaries.z_max + model_25d_boundaries.z_min) / 2\n )\n\n\ndef parse_reconstruction(path: str) -> Reconstruction:\n reconstruction = Reconstruction()\n\n wf = parse_wavefront_25d_obj('%s/odm_texturing_25d/odm_textured_model_geo.obj' % path)\n reconstruction.mesh = wf\n reconstruction.orthophoto_boundaries = wf.boundaries\n\n native_boundaries = _parse_point_cloud_boundaries(path)\n _parse_camera_shotgeojson(path, reconstruction,\n _native_to_model_25d_coordinates(native_boundaries, wf.boundaries))\n\n return reconstruction\n"
] | [
[
"scipy.stats.linregress",
"numpy.abs"
]
] |
SeongSuKim95/ReID-Baseline-swin | [
"f30db86eb2690c20c4fbb0189eb52b57358705df"
] | [
"demo.py"
] | [
"import argparse\nimport scipy.io\nimport torch\nimport numpy as np\nimport os\nfrom torchvision import datasets\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n#######################################################################\n# Evaluate\nparser = argparse.ArgumentParser(description='Demo')\nparser.add_argument('--query_index', default=777, type=int, help='test_image_index')\nparser.add_argument('--test_dir',default='/mnt/hdd_data/Dataset/market1501_ss/pytorch',type=str, help='./test_data')\nopts = parser.parse_args()\n\ndata_dir = opts.test_dir\nimage_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ) for x in ['gallery','query']}\n\n#####################################################################\n#Show result\ndef imshow(path, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n im = plt.imread(path)\n plt.imshow(im)\n if title is not None:\n plt.title(title)\n plt.pause(0.001) # pause a bit so that plots are updated\n\n######################################################################\nresult = scipy.io.loadmat('pytorch_result.mat')\nquery_feature = torch.FloatTensor(result['query_f'])\nquery_cam = result['query_cam'][0]\nquery_label = result['query_label'][0]\ngallery_feature = torch.FloatTensor(result['gallery_f'])\ngallery_cam = result['gallery_cam'][0]\ngallery_label = result['gallery_label'][0]\n\nmulti = os.path.isfile('multi_query.mat')\n\nif multi:\n m_result = scipy.io.loadmat('multi_query.mat')\n mquery_feature = torch.FloatTensor(m_result['mquery_f'])\n mquery_cam = m_result['mquery_cam'][0]\n mquery_label = m_result['mquery_label'][0]\n mquery_feature = mquery_feature.cuda()\n\nquery_feature = query_feature.cuda()\ngallery_feature = gallery_feature.cuda()\n\n#######################################################################\n# sort the images\ndef sort_img(qf, ql, qc, gf, gl, gc):\n query = qf.view(-1,1)\n # print(query.shape)\n score = torch.mm(gf,query)\n score = score.squeeze(1).cpu()\n score = score.numpy()\n # predict index\n index = np.argsort(score) #from small to large\n index = index[::-1]\n # index = index[0:2000]\n # good index\n query_index = np.argwhere(gl==ql)\n #same camera\n camera_index = np.argwhere(gc==qc)\n\n #good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)\n junk_index1 = np.argwhere(gl==-1)\n junk_index2 = np.intersect1d(query_index, camera_index)\n junk_index = np.append(junk_index2, junk_index1) \n\n mask = np.in1d(index, junk_index, invert=True)\n index = index[mask]\n return index\n\ni = opts.query_index\nindex = sort_img(query_feature[i],query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam)\n\n########################################################################\n# Visualize the rank result\n\nquery_path, _ = image_datasets['query'].imgs[i]\nquery_label = query_label[i]\nprint(query_path)\nprint('Top 10 images are as follow:')\ntry: # Visualize Ranking Result \n # Graphical User Interface is needed\n fig = plt.figure(figsize=(16,4))\n ax = plt.subplot(1,11,1)\n ax.axis('off')\n imshow(query_path,'query')\n for i in range(10):\n ax = plt.subplot(1,11,i+2)\n ax.axis('off')\n img_path, _ = image_datasets['gallery'].imgs[index[i]]\n label = gallery_label[index[i]]\n imshow(img_path)\n if label == query_label:\n ax.set_title('%d'%(i+1), color='green')\n else:\n ax.set_title('%d'%(i+1), color='red')\n print(img_path)\nexcept RuntimeError:\n for i in range(10):\n img_path = image_datasets.imgs[index[i]]\n print(img_path[0])\n print('If you want to see the visualization of the ranking result, graphical user interface is needed.')\n\nfig.savefig(\"show.png\")\n"
] | [
[
"matplotlib.pyplot.imread",
"torch.FloatTensor",
"matplotlib.pyplot.pause",
"numpy.append",
"numpy.argwhere",
"numpy.intersect1d",
"matplotlib.pyplot.figure",
"torch.mm",
"numpy.argsort",
"numpy.in1d",
"matplotlib.pyplot.title",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplot",
"matplotlib.use"
]
] |
erinaldi/MetaRL | [
"6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871"
] | [
"rlkit/envs/point_robot.py"
] | [
"import numpy as np\nfrom gym import spaces\nfrom gym import Env\n\nfrom . import register_env\n\n\n@register_env('point-robot')\nclass PointEnv(Env):\n \"\"\"\n point robot on a 2-D plane with position control\n tasks (aka goals) are positions on the plane\n\n - tasks sampled from unit square\n - reward is L2 distance\n \"\"\"\n\n def __init__(self, randomize_tasks=False, n_tasks=2):\n\n if randomize_tasks:\n np.random.seed(1337)\n goals = [[np.random.uniform(-1., 1.), np.random.uniform(-1., 1.)] for _ in range(n_tasks)]\n else:\n # some hand-coded goals for debugging\n goals = [np.array([10, -10]),\n np.array([10, 10]),\n np.array([-10, 10]),\n np.array([-10, -10]),\n np.array([0, 0]),\n\n np.array([7, 2]),\n np.array([0, 4]),\n np.array([-6, 9])\n ]\n goals = [g / 10. for g in goals]\n self.goals = goals\n\n self.reset_task(0)\n self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(2,))\n self.action_space = spaces.Box(low=-0.1, high=0.1, shape=(2,))\n\n def reset_task(self, idx):\n ''' reset goal AND reset the agent '''\n self._goal = self.goals[idx]\n self.reset()\n\n def get_all_task_idx(self):\n return range(len(self.goals))\n\n def reset_model(self):\n # reset to a random location on the unit square\n self._state = np.random.uniform(-1., 1., size=(2,))\n return self._get_obs()\n\n def reset(self):\n return self.reset_model()\n\n def _get_obs(self):\n return np.copy(self._state)\n\n def step(self, action):\n self._state = self._state + action\n x, y = self._state\n x -= self._goal[0]\n y -= self._goal[1]\n reward = - (x ** 2 + y ** 2) ** 0.5\n done = False\n ob = self._get_obs()\n return ob, reward, done, dict()\n\n def viewer_setup(self):\n print('no viewer')\n pass\n\n def render(self):\n print('current state:', self._state)\n\n\n@register_env('sparse-point-robot')\nclass SparsePointEnv(PointEnv):\n '''\n - tasks sampled from unit half-circle\n - reward is L2 distance given only within goal radius\n\n NOTE that `step()` returns the dense reward because this is used during meta-training\n the algorithm should call `sparsify_rewards()` to get the sparse rewards\n '''\n def __init__(self, randomize_tasks=False, n_tasks=2, goal_radius=0.2):\n super().__init__(randomize_tasks, n_tasks)\n self.goal_radius = goal_radius\n\n if randomize_tasks:\n np.random.seed(1337)\n radius = 1.0\n angles = np.linspace(0, np.pi, num=n_tasks)\n xs = radius * np.cos(angles)\n ys = radius * np.sin(angles)\n goals = np.stack([xs, ys], axis=1)\n np.random.shuffle(goals)\n goals = goals.tolist()\n\n self.goals = goals\n self.reset_task(0)\n\n def sparsify_rewards(self, r):\n ''' zero out rewards when outside the goal radius '''\n mask = (r >= -self.goal_radius).astype(np.float32)\n r = r * mask\n return r\n\n def reset_model(self):\n self._state = np.array([0, 0])\n return self._get_obs()\n\n def step(self, action):\n ob, reward, done, d = super().step(action)\n sparse_reward = self.sparsify_rewards(reward)\n # make sparse rewards positive\n if reward >= -self.goal_radius:\n sparse_reward += 1\n d.update({'sparse_reward': sparse_reward})\n return ob, reward, done, d\n"
] | [
[
"numpy.random.uniform",
"numpy.random.shuffle",
"numpy.stack",
"numpy.random.seed",
"numpy.cos",
"numpy.copy",
"numpy.array",
"numpy.sin",
"numpy.linspace"
]
] |
sejongjoa/openpilot_083 | [
"301500dff6bd53e64257898cac939b24f56befac"
] | [
"selfdrive/locationd/locationd.py"
] | [
"#!/usr/bin/env python3\nimport json\nimport numpy as np\nimport sympy as sp\nimport cereal.messaging as messaging\nfrom cereal import log\nfrom common.params import Params\nimport common.transformations.coordinates as coord\nfrom common.transformations.orientation import ecef_euler_from_ned, \\\n euler_from_quat, \\\n ned_euler_from_ecef, \\\n quat_from_euler, euler_from_rot, \\\n rot_from_quat, rot_from_euler\nfrom rednose.helpers import KalmanError\nfrom selfdrive.locationd.models.live_kf import LiveKalman, States, ObservationKind\nfrom selfdrive.locationd.models.constants import GENERATED_DIR\nfrom selfdrive.swaglog import cloudlog\n\n#from datetime import datetime\n#from laika.gps_time import GPSTime\n\nfrom sympy.utilities.lambdify import lambdify\nfrom rednose.helpers.sympy_helpers import euler_rotate\n\nSensorSource = log.SensorEventData.SensorSource\n\n\nVISION_DECIMATION = 2\nSENSOR_DECIMATION = 10\nPOSENET_STD_HIST = 40\n\n\ndef to_float(arr):\n return [float(arr[0]), float(arr[1]), float(arr[2])]\n\n\ndef get_H():\n # this returns a function to eval the jacobian\n # of the observation function of the local vel\n roll = sp.Symbol('roll')\n pitch = sp.Symbol('pitch')\n yaw = sp.Symbol('yaw')\n vx = sp.Symbol('vx')\n vy = sp.Symbol('vy')\n vz = sp.Symbol('vz')\n\n h = euler_rotate(roll, pitch, yaw).T*(sp.Matrix([vx, vy, vz]))\n H = h.jacobian(sp.Matrix([roll, pitch, yaw, vx, vy, vz]))\n H_f = lambdify([roll, pitch, yaw, vx, vy, vz], H)\n return H_f\n\n\nclass Localizer():\n def __init__(self, disabled_logs=None, dog=None):\n if disabled_logs is None:\n disabled_logs = []\n\n self.kf = LiveKalman(GENERATED_DIR)\n self.reset_kalman()\n self.max_age = .1 # seconds\n self.disabled_logs = disabled_logs\n self.calib = np.zeros(3)\n self.device_from_calib = np.eye(3)\n self.calib_from_device = np.eye(3)\n self.calibrated = False\n self.H = get_H()\n\n self.posenet_invalid_count = 0\n self.posenet_speed = 0\n self.car_speed = 0\n self.posenet_stds = 10*np.ones((POSENET_STD_HIST))\n\n self.converter = coord.LocalCoord.from_ecef(self.kf.x[States.ECEF_POS])\n\n self.unix_timestamp_millis = 0\n self.last_gps_fix = 0\n self.device_fell = False\n\n @staticmethod\n def msg_from_state(converter, calib_from_device, H, predicted_state, predicted_cov, calibrated):\n predicted_std = np.sqrt(np.diagonal(predicted_cov))\n\n fix_ecef = predicted_state[States.ECEF_POS]\n fix_ecef_std = predicted_std[States.ECEF_POS_ERR]\n vel_ecef = predicted_state[States.ECEF_VELOCITY]\n vel_ecef_std = predicted_std[States.ECEF_VELOCITY_ERR]\n fix_pos_geo = coord.ecef2geodetic(fix_ecef)\n #fix_pos_geo_std = np.abs(coord.ecef2geodetic(fix_ecef + fix_ecef_std) - fix_pos_geo)\n orientation_ecef = euler_from_quat(predicted_state[States.ECEF_ORIENTATION])\n orientation_ecef_std = predicted_std[States.ECEF_ORIENTATION_ERR]\n device_from_ecef = rot_from_quat(predicted_state[States.ECEF_ORIENTATION]).T\n calibrated_orientation_ecef = euler_from_rot(calib_from_device.dot(device_from_ecef))\n\n acc_calib = calib_from_device.dot(predicted_state[States.ACCELERATION])\n acc_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(\n predicted_cov[States.ACCELERATION_ERR, States.ACCELERATION_ERR]).dot(\n calib_from_device.T)))\n ang_vel_calib = calib_from_device.dot(predicted_state[States.ANGULAR_VELOCITY])\n ang_vel_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(\n predicted_cov[States.ANGULAR_VELOCITY_ERR, States.ANGULAR_VELOCITY_ERR]).dot(\n calib_from_device.T)))\n\n vel_device = device_from_ecef.dot(vel_ecef)\n device_from_ecef_eul = euler_from_quat(predicted_state[States.ECEF_ORIENTATION]).T\n idxs = list(range(States.ECEF_ORIENTATION_ERR.start, States.ECEF_ORIENTATION_ERR.stop)) + \\\n list(range(States.ECEF_VELOCITY_ERR.start, States.ECEF_VELOCITY_ERR.stop))\n condensed_cov = predicted_cov[idxs][:, idxs]\n HH = H(*list(np.concatenate([device_from_ecef_eul, vel_ecef])))\n vel_device_cov = HH.dot(condensed_cov).dot(HH.T)\n vel_device_std = np.sqrt(np.diagonal(vel_device_cov))\n\n vel_calib = calib_from_device.dot(vel_device)\n vel_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(\n vel_device_cov).dot(calib_from_device.T)))\n\n orientation_ned = ned_euler_from_ecef(fix_ecef, orientation_ecef)\n #orientation_ned_std = ned_euler_from_ecef(fix_ecef, orientation_ecef + orientation_ecef_std) - orientation_ned\n ned_vel = converter.ecef2ned(fix_ecef + vel_ecef) - converter.ecef2ned(fix_ecef)\n #ned_vel_std = self.converter.ecef2ned(fix_ecef + vel_ecef + vel_ecef_std) - self.converter.ecef2ned(fix_ecef + vel_ecef)\n\n fix = messaging.log.LiveLocationKalman.new_message()\n\n # write measurements to msg\n measurements = [\n # measurement field, value, std, valid\n (fix.positionGeodetic, fix_pos_geo, np.nan*np.zeros(3), True),\n (fix.positionECEF, fix_ecef, fix_ecef_std, True),\n (fix.velocityECEF, vel_ecef, vel_ecef_std, True),\n (fix.velocityNED, ned_vel, np.nan*np.zeros(3), True),\n (fix.velocityDevice, vel_device, vel_device_std, True),\n (fix.accelerationDevice, predicted_state[States.ACCELERATION], predicted_std[States.ACCELERATION_ERR], True),\n (fix.orientationECEF, orientation_ecef, orientation_ecef_std, True),\n (fix.calibratedOrientationECEF, calibrated_orientation_ecef, np.nan*np.zeros(3), calibrated),\n (fix.orientationNED, orientation_ned, np.nan*np.zeros(3), True),\n (fix.angularVelocityDevice, predicted_state[States.ANGULAR_VELOCITY], predicted_std[States.ANGULAR_VELOCITY_ERR], True),\n (fix.velocityCalibrated, vel_calib, vel_calib_std, calibrated),\n (fix.angularVelocityCalibrated, ang_vel_calib, ang_vel_calib_std, calibrated),\n (fix.accelerationCalibrated, acc_calib, acc_calib_std, calibrated),\n ]\n\n for field, value, std, valid in measurements:\n # TODO: can we write the lists faster?\n field.value = to_float(value)\n field.std = to_float(std)\n field.valid = valid\n\n return fix\n\n def liveLocationMsg(self):\n fix = self.msg_from_state(self.converter, self.calib_from_device, self.H, self.kf.x, self.kf.P, self.calibrated)\n # experimentally found these values, no false positives in 20k minutes of driving\n old_mean, new_mean = np.mean(self.posenet_stds[:POSENET_STD_HIST//2]), np.mean(self.posenet_stds[POSENET_STD_HIST//2:])\n std_spike = new_mean/old_mean > 4 and new_mean > 7\n\n fix.posenetOK = not (std_spike and self.car_speed > 5)\n fix.deviceStable = not self.device_fell\n self.device_fell = False\n\n #fix.gpsWeek = self.time.week\n #fix.gpsTimeOfWeek = self.time.tow\n fix.unixTimestampMillis = self.unix_timestamp_millis\n\n if np.linalg.norm(fix.positionECEF.std) < 50 and self.calibrated:\n fix.status = 'valid'\n elif np.linalg.norm(fix.positionECEF.std) < 50:\n fix.status = 'uncalibrated'\n else:\n fix.status = 'uninitialized'\n return fix\n\n def update_kalman(self, time, kind, meas, R=None):\n try:\n self.kf.predict_and_observe(time, kind, meas, R)\n except KalmanError:\n cloudlog.error(\"Error in predict and observe, kalman reset\")\n self.reset_kalman()\n\n def handle_gps(self, current_time, log):\n # ignore the message if the fix is invalid\n if log.flags % 2 == 0:\n return\n\n self.last_gps_fix = current_time\n\n self.converter = coord.LocalCoord.from_geodetic([log.latitude, log.longitude, log.altitude])\n ecef_pos = self.converter.ned2ecef([0, 0, 0])\n ecef_vel = self.converter.ned2ecef(np.array(log.vNED)) - ecef_pos\n ecef_pos_R = np.diag([(3*log.verticalAccuracy)**2]*3)\n ecef_vel_R = np.diag([(log.speedAccuracy)**2]*3)\n\n #self.time = GPSTime.from_datetime(datetime.utcfromtimestamp(log.timestamp*1e-3))\n self.unix_timestamp_millis = log.timestamp\n gps_est_error = np.sqrt((self.kf.x[0] - ecef_pos[0])**2 +\n (self.kf.x[1] - ecef_pos[1])**2 +\n (self.kf.x[2] - ecef_pos[2])**2)\n\n orientation_ecef = euler_from_quat(self.kf.x[States.ECEF_ORIENTATION])\n orientation_ned = ned_euler_from_ecef(ecef_pos, orientation_ecef)\n orientation_ned_gps = np.array([0, 0, np.radians(log.bearingDeg)])\n orientation_error = np.mod(orientation_ned - orientation_ned_gps - np.pi, 2*np.pi) - np.pi\n initial_pose_ecef_quat = quat_from_euler(ecef_euler_from_ned(ecef_pos, orientation_ned_gps))\n if np.linalg.norm(ecef_vel) > 5 and np.linalg.norm(orientation_error) > 1:\n cloudlog.error(\"Locationd vs ubloxLocation orientation difference too large, kalman reset\")\n self.reset_kalman(init_pos=ecef_pos, init_orient=initial_pose_ecef_quat)\n self.update_kalman(current_time, ObservationKind.ECEF_ORIENTATION_FROM_GPS, initial_pose_ecef_quat)\n elif gps_est_error > 50:\n cloudlog.error(\"Locationd vs ubloxLocation position difference too large, kalman reset\")\n self.reset_kalman(init_pos=ecef_pos, init_orient=initial_pose_ecef_quat)\n\n self.update_kalman(current_time, ObservationKind.ECEF_POS, ecef_pos, R=ecef_pos_R)\n self.update_kalman(current_time, ObservationKind.ECEF_VEL, ecef_vel, R=ecef_vel_R)\n\n def handle_car_state(self, current_time, log):\n self.speed_counter += 1\n\n if self.speed_counter % SENSOR_DECIMATION == 0:\n self.update_kalman(current_time, ObservationKind.ODOMETRIC_SPEED, [log.vEgo])\n self.car_speed = abs(log.vEgo)\n if log.vEgo == 0:\n self.update_kalman(current_time, ObservationKind.NO_ROT, [0, 0, 0])\n\n def handle_cam_odo(self, current_time, log):\n self.cam_counter += 1\n\n if self.cam_counter % VISION_DECIMATION == 0:\n rot_device = self.device_from_calib.dot(log.rot)\n rot_device_std = self.device_from_calib.dot(log.rotStd)\n self.update_kalman(current_time,\n ObservationKind.CAMERA_ODO_ROTATION,\n np.concatenate([rot_device, 10*rot_device_std]))\n trans_device = self.device_from_calib.dot(log.trans)\n trans_device_std = self.device_from_calib.dot(log.transStd)\n self.posenet_speed = np.linalg.norm(trans_device)\n self.posenet_stds[:-1] = self.posenet_stds[1:]\n self.posenet_stds[-1] = trans_device_std[0]\n self.update_kalman(current_time,\n ObservationKind.CAMERA_ODO_TRANSLATION,\n np.concatenate([trans_device, 10*trans_device_std]))\n\n def handle_sensors(self, current_time, log):\n # TODO does not yet account for double sensor readings in the log\n for sensor_reading in log:\n sensor_time = 1e-9 * sensor_reading.timestamp\n # TODO: handle messages from two IMUs at the same time\n if sensor_reading.source == SensorSource.lsm6ds3:\n continue\n\n # Gyro Uncalibrated\n if sensor_reading.sensor == 5 and sensor_reading.type == 16:\n self.gyro_counter += 1\n if self.gyro_counter % SENSOR_DECIMATION == 0:\n v = sensor_reading.gyroUncalibrated.v\n self.update_kalman(sensor_time, ObservationKind.PHONE_GYRO, [-v[2], -v[1], -v[0]])\n\n # Accelerometer\n if sensor_reading.sensor == 1 and sensor_reading.type == 1:\n # check if device fell, estimate 10 for g\n # 40m/s**2 is a good filter for falling detection, no false positives in 20k minutes of driving\n self.device_fell = self.device_fell or (np.linalg.norm(np.array(sensor_reading.acceleration.v) - np.array([10, 0, 0])) > 40)\n\n self.acc_counter += 1\n if self.acc_counter % SENSOR_DECIMATION == 0:\n v = sensor_reading.acceleration.v\n self.update_kalman(sensor_time, ObservationKind.PHONE_ACCEL, [-v[2], -v[1], -v[0]])\n\n def handle_live_calib(self, current_time, log):\n if len(log.rpyCalib):\n self.calib = log.rpyCalib\n self.device_from_calib = rot_from_euler(self.calib)\n self.calib_from_device = self.device_from_calib.T\n self.calibrated = log.calStatus == 1\n\n def reset_kalman(self, current_time=None, init_orient=None, init_pos=None):\n self.filter_time = current_time\n init_x = LiveKalman.initial_x.copy()\n # too nonlinear to init on completely wrong\n if init_orient is not None:\n init_x[3:7] = init_orient\n if init_pos is not None:\n init_x[:3] = init_pos\n self.kf.init_state(init_x, covs=np.diag(LiveKalman.initial_P_diag), filter_time=current_time)\n\n self.observation_buffer = []\n\n self.gyro_counter = 0\n self.acc_counter = 0\n self.speed_counter = 0\n self.cam_counter = 0\n\n\ndef locationd_thread(sm, pm, disabled_logs=None):\n if disabled_logs is None:\n disabled_logs = []\n\n if sm is None:\n socks = ['gpsLocationExternal', 'sensorEvents', 'cameraOdometry', 'liveCalibration', 'carState']\n sm = messaging.SubMaster(socks, ignore_alive=['gpsLocationExternal'])\n if pm is None:\n pm = messaging.PubMaster(['liveLocationKalman'])\n\n params = Params()\n localizer = Localizer(disabled_logs=disabled_logs)\n\n while True:\n sm.update()\n\n for sock, updated in sm.updated.items():\n if updated and sm.valid[sock]:\n t = sm.logMonoTime[sock] * 1e-9\n if sock == \"sensorEvents\":\n localizer.handle_sensors(t, sm[sock])\n elif sock == \"gpsLocationExternal\":\n localizer.handle_gps(t, sm[sock])\n elif sock == \"carState\":\n localizer.handle_car_state(t, sm[sock])\n elif sock == \"cameraOdometry\":\n localizer.handle_cam_odo(t, sm[sock])\n elif sock == \"liveCalibration\":\n localizer.handle_live_calib(t, sm[sock])\n\n if sm.updated['cameraOdometry']:\n t = sm.logMonoTime['cameraOdometry']\n msg = messaging.new_message('liveLocationKalman')\n msg.logMonoTime = t\n\n msg.liveLocationKalman = localizer.liveLocationMsg()\n msg.liveLocationKalman.inputsOK = sm.all_alive_and_valid()\n msg.liveLocationKalman.sensorsOK = sm.alive['sensorEvents'] and sm.valid['sensorEvents']\n\n gps_age = (t / 1e9) - localizer.last_gps_fix\n msg.liveLocationKalman.gpsOK = gps_age < 1.0\n pm.send('liveLocationKalman', msg)\n\n if sm.frame % 1200 == 0 and msg.liveLocationKalman.gpsOK: # once a minute\n location = {\n 'latitude': msg.liveLocationKalman.positionGeodetic.value[0],\n 'longitude': msg.liveLocationKalman.positionGeodetic.value[1],\n 'altitude': msg.liveLocationKalman.positionGeodetic.value[2],\n }\n params.put(\"LastGPSPosition\", json.dumps(location))\n\n\ndef main(sm=None, pm=None):\n locationd_thread(sm, pm)\n\n\nif __name__ == \"__main__\":\n import os\n os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n main()\n"
] | [
[
"numpy.eye",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.diag",
"numpy.concatenate",
"numpy.mod",
"numpy.sqrt",
"numpy.diagonal",
"numpy.linalg.norm",
"numpy.radians",
"numpy.mean"
]
] |
redhat6/cornac | [
"856cf0f546a0dc6b46f407128d89ef2534994c60"
] | [
"cornac/models/hft/recom_hft.py"
] | [
"# Copyright 2018 The Cornac Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\n\nfrom ..recommender import Recommender\nfrom ...exception import ScoreException\n\n\nclass HFT(Recommender):\n \"\"\"Hidden Factors and Hidden Topics\n\n Parameters\n ----------\n name: string, default: 'HFT'\n The name of the recommender model.\n\n k: int, optional, default: 10\n The dimension of the latent factors.\n\n max_iter: int, optional, default: 50\n Maximum number of iterations for EM.\n\n grad_iter: int, optional, default: 50\n Maximum number of iterations for L-BFGS.\n\n lambda_text: float, default: 0.1\n Weight of corpus likelihood in objective function.\n\n l2_reg: float, default: 0.001\n Regularization for user item latent factors.\n\n vocab_size: int, optional, default: 8000\n Size of vocabulary for review text.\n\n init_params: dictionary, optional, default: None\n List of initial parameters, e.g., init_params = {'alpha': alpha, 'beta_u': beta_u,\n 'beta_i': beta_i, 'gamma_u': gamma_u, 'gamma_v': gamma_v}\n\n alpha: float\n Model offset, optional initialization via init_params.\n\n beta_u: ndarray. shape (n_user, 1)\n User biases, optional initialization via init_params.\n\n beta_u: ndarray. shape (n_item, 1)\n Item biases, optional initialization via init_params.\n\n gamma_u: ndarray, shape (n_users,k)\n The user latent factors, optional initialization via init_params.\n\n gamma_v: ndarray, shape (n_items,k)\n The item latent factors, optional initialization via init_params.\n\n trainable: boolean, optional, default: True\n When False, the model will not be re-trained, and input of pre-trained parameters are required.\n\n verbose: boolean, optional, default: True\n When True, some running logs are displayed.\n \n seed: int, optional, default: None\n Random seed for weight initialization.\n\n References\n ----------\n Julian McAuley, Jure Leskovec. \"Hidden Factors and Hidden Topics: Understanding Rating Dimensions with Review Text\"\n RecSys '13 Proceedings of the 7th ACM conference on Recommender systems Pages 165-172\n \"\"\"\n\n def __init__(self, name='HFT', k=10, max_iter=50, grad_iter=50, \n lambda_text=0.1, l2_reg=0.001, vocab_size=8000,\n init_params=None, trainable=True, verbose=True, seed=None):\n super().__init__(name=name, trainable=trainable, verbose=verbose)\n \n self.k = k\n self.lambda_text = lambda_text\n self.l2_reg = l2_reg\n self.grad_iter = grad_iter\n self.name = name\n self.max_iter = max_iter\n self.verbose = verbose\n self.init_params = {} if not init_params else init_params\n self.seed = seed\n self.vocab_size = vocab_size\n\n def fit(self, train_set, val_set=None):\n \"\"\"Fit the model to observations.\n\n Parameters\n ----------\n train_set: :obj:`cornac.data.Dataset`, required\n User-Item preference data as well as additional modalities.\n\n val_set: :obj:`cornac.data.Dataset`, optional, default: None\n User-Item preference data for model selection purposes (e.g., early stopping).\n\n Returns\n -------\n self : object\n \"\"\"\n Recommender.fit(self, train_set, val_set)\n from ...utils.init_utils import normal\n\n self.n_item = self.train_set.num_items\n self.n_user = self.train_set.num_users\n\n self.alpha = self.init_params.get('alpha', train_set.global_mean)\n self.beta_u = self.init_params.get('beta_u', normal(self.n_user, std=0.01, random_state=self.seed))\n self.beta_i = self.init_params.get('beta_i', normal(self.n_item, std=0.01, random_state=self.seed))\n self.gamma_u = self.init_params.get('gamma_u', normal((self.n_user, self.k), std=0.01, random_state=self.seed))\n self.gamma_i = self.init_params.get('gamma_i', normal((self.n_item, self.k), std=0.01, random_state=self.seed))\n\n if self.trainable:\n self._fit_hft()\n\n return self\n\n @staticmethod\n def _build_data(csr_mat):\n index_list = []\n rating_list = []\n for i in range(csr_mat.shape[0]):\n j, k = csr_mat.indptr[i], csr_mat.indptr[i + 1]\n index_list.append(csr_mat.indices[j:k])\n rating_list.append(csr_mat.data[j:k])\n return index_list, rating_list\n\n def _fit_hft(self):\n from .hft import Model\n from tqdm import trange\n\n # document data\n bow_mat = self.train_set.item_text.batch_bow(np.arange(self.n_item), keep_sparse=True)\n documents, _ = self._build_data(bow_mat) # bag of word feature\n # Rating data\n user_data = self._build_data(self.train_set.matrix)\n item_data = self._build_data(self.train_set.matrix.T.tocsr())\n\n model = Model(n_user=self.n_user, n_item=self.n_item, alpha=self.alpha, beta_u=self.beta_u, beta_i=self.beta_i,\n gamma_u=self.gamma_u, gamma_i=self.gamma_i, n_vocab=self.vocab_size, k=self.k,\n lambda_text=self.lambda_text, l2_reg=self.l2_reg, grad_iter=self.grad_iter)\n\n model.init_count(docs=documents)\n\n # training\n loop = trange(self.max_iter, disable=not self.verbose)\n for _ in loop:\n model.assign_word_topics(docs=documents)\n loss = model.update_params(rating_data=(user_data, item_data))\n loop.set_postfix(loss=loss)\n\n self.alpha, self.beta_u, self.beta_i, self.gamma_u, self.gamma_i = model.get_parameter()\n\n if self.verbose:\n print('Learning completed!')\n\n def score(self, user_idx, item_idx=None):\n \"\"\"Predict the scores/ratings of a user for an item.\n\n Parameters\n ----------\n user_idx: int, required\n The index of the user for whom to perform score prediction.\n\n item_idx: int, optional, default: None\n The index of the item for that to perform score prediction.\n If None, scores for all known items will be returned.\n\n Returns\n -------\n res : A scalar or a Numpy array\n Relative scores that the user gives to the item or to all known items\n \"\"\"\n if item_idx is None:\n if self.train_set.is_unk_user(user_idx):\n raise ScoreException(\"Can't make score prediction for (user_id=%d)\" % user_idx)\n\n known_item_scores = self.alpha + self.beta_u[user_idx] + self.beta_i + self.gamma_i.dot(\n self.gamma_u[user_idx, :])\n return known_item_scores\n else:\n if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx):\n raise ScoreException(\"Can't make score prediction for (user_id=%d, item_id=%d)\" % (user_idx, item_idx))\n\n user_pred = self.alpha + self.beta_u[user_idx] + self.beta_i[item_idx] + self.gamma_i[item_idx, :].dot(\n self.gamma_u[user_idx, :])\n\n return user_pred\n"
] | [
[
"numpy.arange"
]
] |
ryanloney/openvino-1 | [
"4e0a740eb3ee31062ba0df88fcf438564f67edb7",
"4e0a740eb3ee31062ba0df88fcf438564f67edb7",
"4e0a740eb3ee31062ba0df88fcf438564f67edb7",
"4e0a740eb3ee31062ba0df88fcf438564f67edb7"
] | [
"tools/mo/unit_tests/mo/load/loader_test.py",
"tools/mo/unit_tests/mo/ops/dft_signal_size_canonicalization_test.py",
"tools/mo/unit_tests/mo/middle/InterpolateSequenceToInterpolate_test.py",
"tools/mo/openvino/tools/mo/ops/constant_fill.py"
] | [
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport unittest\n\nimport numpy as np\n\nfrom openvino.tools.mo.load.tf.loader import graph_or_sub_graph_has_nhwc_ops\nfrom unit_tests.utils.graph import build_graph, result, regular_op, const, connect_front\n\n\nclass TFLoaderTest(unittest.TestCase):\n @staticmethod\n def build_conv_graph():\n nodes = {\n **const('weights', np.random.randn(1, 1, 1, 1)),\n **regular_op('input', {'op': 'Parameter'}),\n **regular_op('conv', {'op': 'Conv2D', 'layout': 'NHWC'}),\n **result('result'),\n }\n edges = [*connect_front('input', '0:conv'),\n *connect_front('weights', '1:conv'),\n *connect_front('conv:0', 'result'),\n ]\n graph = build_graph(nodes, edges)\n\n graph.stage = 'front'\n return graph\n\n @staticmethod\n def build_parameter_result_graph():\n nodes = {\n **regular_op('input', {'op': 'Parameter'}),\n **result('result'),\n }\n edges = [*connect_front('input', '0:result'),\n ]\n graph = build_graph(nodes, edges)\n graph.stage = 'front'\n return graph\n\n @staticmethod\n def build_loop_graph(body_graph):\n # create fake Loop operation\n nodes = {\n **regular_op('input', {'op': 'Parameter'}),\n **regular_op('loop', {'op': 'Loop', 'body': body_graph, 'sub_graphs': ['body']}),\n **result('result'),\n }\n edges = [*connect_front('input', '0:loop'),\n *connect_front('loop:0', 'result'),\n ]\n graph = build_graph(nodes, edges)\n graph.stage = 'front'\n return graph\n\n def test_convolution_main_graph(self):\n self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_conv_graph()))\n\n def test_convolution_loop_body_graph(self):\n self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_conv_graph())))\n\n def test_no_convolution_main_graph(self):\n self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_parameter_result_graph()))\n\n def test_no_convolution_main_and_sub_graph(self):\n self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_parameter_result_graph())))\n",
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport unittest\n\nimport numpy as np\nfrom generator import generator, generate\n\nfrom openvino.tools.mo.ops.dft import FFTBase\nfrom openvino.tools.mo.front.common.partial_infer.utils import int64_array\n\n\n@generator\nclass DFTSignalSizeCanonicalizationTest(unittest.TestCase):\n @generate(*[\n (int64_array([-1, 77]), int64_array([1, 2]), int64_array([2, 180, 180, 2]), int64_array([180, 77])),\n (int64_array([390, 87]), int64_array([2, 0]), int64_array([2, 180, 180, 2]), int64_array([390, 87])),\n (int64_array([600, -1, 40]),\n int64_array([3, 0, 1]),\n int64_array([7, 50, 130, 400, 2]),\n int64_array([600, 7, 40])),\n (int64_array([-1, 16, -1]),\n int64_array([3, 0, 2]),\n int64_array([7, 50, 130, 400, 2]),\n int64_array([400, 16, 130])),\n (int64_array([16, -1, -1]),\n int64_array([3, 0, 2]),\n int64_array([7, 50, 130, 400, 2]),\n int64_array([16, 7, 130])),\n (int64_array([-1, -1, 16]),\n int64_array([3, 0, 2]),\n int64_array([7, 50, 130, 400, 2]),\n int64_array([400, 7, 16])),\n (int64_array([-1, -1, -1]),\n int64_array([3, 0, 2]),\n int64_array([7, 50, 130, 400, 2]),\n int64_array([400, 7, 130])),\n ])\n def test_canonicalization(self, signal_size, axes, input_shape, expected_result):\n canonicalized_signal_size = FFTBase.canonicalize_signal_size(signal_size, axes, input_shape)\n self.assertTrue(np.array_equal(canonicalized_signal_size, expected_result))\n",
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\nimport unittest\n\nfrom openvino.tools.mo.middle.InterpolateSequenceToInterpolate import InterpolateSequenceToInterpolate\nfrom openvino.tools.mo.front.common.partial_infer.utils import int64_array\nfrom openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs\nfrom unit_tests.utils.graph import build_graph\n\ngraph_node_attrs_for_2d_case_1_opset4_case = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'size_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])\n },\n 'size_1_data': {'value': int64_array([660]), 'shape': [1], 'kind': 'data'},\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([3.0])\n },\n 'scale_1_data': {'value': np.array([3.0]), 'shape': [1], 'kind': 'data'},\n 'axes_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])\n },\n 'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'nearest',\n 'shape_calculation_mode': 'scales',\n 'version': 'opset4'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},\n 'size_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])\n },\n 'size_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])\n },\n 'scale_2_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'},\n 'axes_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])\n },\n 'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'nearest',\n 'shape_calculation_mode': 'scales',\n 'version': 'opset4'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},\n 'size_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])\n },\n 'size_3_data': {'value': int64_array([1320]), 'shape': [1], 'kind': 'data'},\n 'scale_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])\n },\n 'scale_3_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'},\n 'axes_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])\n },\n 'axes_3_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},\n 'interpolate_3': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'nearest',\n 'shape_calculation_mode': 'scales',\n 'version': 'opset4'\n },\n 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nedges_for_2d_case_1_opset4_case = [\n ('placeholder', 'placeholder_data'),\n\n ('placeholder_data', 'interpolate_1', {'in': 0}),\n ('size_1', 'size_1_data'),\n ('scale_1', 'scale_1_data'),\n ('axes_1', 'axes_1_data'),\n ('size_1_data', 'interpolate_1', {'in': 1}),\n ('scale_1_data', 'interpolate_1', {'in': 2}),\n ('axes_1_data', 'interpolate_1', {'in': 3}),\n ('interpolate_1', 'interpolate_1_data'),\n\n ('interpolate_1_data', 'interpolate_2', {'in': 0}),\n ('size_2', 'size_2_data'),\n ('scale_2', 'scale_2_data'),\n ('axes_2', 'axes_2_data'),\n ('size_2_data', 'interpolate_2', {'in': 1}),\n ('scale_2_data', 'interpolate_2', {'in': 2}),\n ('axes_2_data', 'interpolate_2', {'in': 3}),\n ('interpolate_2', 'interpolate_2_data'),\n\n ('interpolate_2_data', 'interpolate_3', {'in': 0}),\n ('size_3', 'size_3_data'),\n ('scale_3', 'scale_3_data'),\n ('axes_3', 'axes_3_data'),\n ('size_3_data', 'interpolate_3', {'in': 1}),\n ('scale_3_data', 'interpolate_3', {'in': 2}),\n ('axes_3_data', 'interpolate_3', {'in': 3}),\n ('interpolate_3', 'interpolate_3_data'),\n\n ('interpolate_3_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n]\n\n\nref_graph_node_attrs_for_2d_case_1_opset4_case = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'size_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660, 700])\n },\n 'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([3.0, 2.0])\n },\n 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'axes_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3])\n },\n 'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'nearest',\n 'shape_calculation_mode': 'scales',\n 'antialias': 0,\n 'pads_begin': int64_array([0]),\n 'pads_end': int64_array([0]),\n 'coordinate_transformation_mode': 'half_pixel',\n 'nearest_mode': 'round_prefer_floor',\n 'cube_coeff': -0.75,\n 'version': 'opset4'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},\n 'size_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])\n },\n 'size_3_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'scale_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])\n },\n 'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'axes_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])\n },\n 'axes_3_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},\n 'interpolate_3': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'nearest',\n 'shape_calculation_mode': 'scales',\n 'version': 'opset4'\n },\n 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nref_edges_for_2d_case_1_opset4_case = [\n ('placeholder', 'placeholder_data'),\n\n ('placeholder_data', 'interpolate_1', {'in': 0}),\n ('size_1', 'size_1_data'),\n ('scale_1', 'scale_1_data'),\n ('axes_1', 'axes_1_data'),\n ('size_1_data', 'interpolate_1', {'in': 1}),\n ('scale_1_data', 'interpolate_1', {'in': 2}),\n ('axes_1_data', 'interpolate_1', {'in': 3}),\n ('interpolate_1', 'interpolate_1_data'),\n\n ('interpolate_1_data', 'interpolate_3', {'in': 0}),\n ('size_3', 'size_3_data'),\n ('scale_3', 'scale_3_data'),\n ('axes_3', 'axes_3_data'),\n ('size_3_data', 'interpolate_3', {'in': 1}),\n ('scale_3_data', 'interpolate_3', {'in': 2}),\n ('axes_3_data', 'interpolate_3', {'in': 3}),\n ('interpolate_3', 'interpolate_3_data'),\n\n ('interpolate_3_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n]\n\n\ngraph_node_attrs_for_2d_case_1 = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])\n },\n 'scale_1_data': {'value': int64_array([660]), 'shape': [1], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])\n },\n 'scale_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([3]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},\n 'scale_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])\n },\n 'scale_3_data': {'value': int64_array([1320]), 'shape': [1], 'kind': 'data'},\n 'interpolate_3': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nedges_for_2d_case_1 = [\n ('placeholder', 'placeholder_data'),\n\n ('placeholder_data', 'interpolate_1', {'in': 0}),\n ('scale_1', 'scale_1_data'),\n ('scale_1_data', 'interpolate_1', {'in': 1}),\n ('interpolate_1', 'interpolate_1_data'),\n\n ('interpolate_1_data', 'interpolate_2', {'in': 0}),\n ('scale_2', 'scale_2_data'),\n ('scale_2_data', 'interpolate_2', {'in': 1}),\n ('interpolate_2', 'interpolate_2_data'),\n\n ('interpolate_2_data', 'interpolate_3', {'in': 0}),\n ('scale_3', 'scale_3_data'),\n ('scale_3_data', 'interpolate_3', {'in': 1}),\n ('interpolate_3', 'interpolate_3_data'),\n\n ('interpolate_3_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n]\n\n\ngraph_node_attrs_for_2d_case_2 = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])\n },\n 'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nedges_for_2d_case_2 = [\n ('placeholder', 'placeholder_data'),\n\n ('placeholder_data', 'interpolate_1', {'in': 0}),\n ('scale_1', 'scale_1_data'),\n ('scale_1_data', 'interpolate_1', {'in': 1}),\n ('interpolate_1', 'interpolate_1_data'),\n\n ('interpolate_1_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n]\n\n\ngraph_node_attrs_for_2d_case_3 = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])\n },\n 'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])\n },\n 'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([3]),\n 'mode': 'linear',\n 'version': 'opset1'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},\n 'scale_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])\n },\n 'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'interpolate_3': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2]),\n 'mode': 'cubic',\n 'version': 'opset1'\n },\n 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nedges_for_2d_case_3 = edges_for_2d_case_1\n\n\nnew_graph_node_attrs_for_2d_case_4_opset4_case = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'size_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200])\n },\n 'size_1_data': {'value': int64_array([2200]), 'shape': [1], 'kind': 'data'},\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([10.0])\n },\n 'scale_1_data': {'value': np.array([10.0]), 'shape': [1], 'kind': 'data'},\n 'axes_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])\n },\n 'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'linear',\n 'coordinate_transformation_mode': 'asymmetric',\n 'nearest_mode': 'simple',\n 'cube_coeff': -0.4,\n 'antialias': 1,\n 'shape_calculation_mode': 'scales',\n 'version': 'opset4'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'},\n 'size_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])\n },\n 'size_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])\n },\n 'scale_2_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'},\n 'axes_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])\n },\n 'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'linear',\n 'coordinate_transformation_mode': 'asymmetric',\n 'nearest_mode': 'simple',\n 'cube_coeff': -0.4,\n 'antialias': 1,\n 'shape_calculation_mode': 'scales',\n 'version': 'opset4'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nnew_edges_for_2d_case_4_opset4_case = [\n ('placeholder', 'placeholder_data'),\n\n ('placeholder_data', 'interpolate_1', {'in': 0}),\n ('size_1', 'size_1_data'),\n ('size_1_data', 'interpolate_1', {'in': 1}),\n ('scale_1', 'scale_1_data'),\n ('scale_1_data', 'interpolate_1', {'in': 2}),\n ('axes_1', 'axes_1_data'),\n ('axes_1_data', 'interpolate_1', {'in': 3}),\n ('interpolate_1', 'interpolate_1_data'),\n\n ('interpolate_1_data', 'interpolate_2', {'in': 0}),\n ('size_2', 'size_2_data'),\n ('size_2_data', 'interpolate_2', {'in': 1}),\n ('scale_2', 'scale_2_data'),\n ('scale_2_data', 'interpolate_2', {'in': 2}),\n ('axes_2', 'axes_2_data'),\n ('axes_2_data', 'interpolate_2', {'in': 3}),\n ('interpolate_2', 'interpolate_2_data'),\n\n ('interpolate_2_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n]\n\n\nnew_ref_graph_node_attrs_for_2d_case_4_opset4_case = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'size_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200, 700])\n },\n 'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([10.0, 2.0])\n },\n 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'axes_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3])\n },\n 'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'linear',\n 'coordinate_transformation_mode': 'asymmetric',\n 'nearest_mode': 'simple',\n 'cube_coeff': -0.4,\n 'antialias': 1,\n 'shape_calculation_mode': 'scales',\n 'version': 'opset4'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nnew_ref_edges_for_2d_case_4_opset4_case = [\n ('placeholder', 'placeholder_data'),\n\n ('placeholder_data', 'interpolate_1', {'in': 0}),\n ('size_1', 'size_1_data'),\n ('size_1_data', 'interpolate_1', {'in': 1}),\n ('scale_1', 'scale_1_data'),\n ('scale_1_data', 'interpolate_1', {'in': 2}),\n ('axes_1', 'axes_1_data'),\n ('axes_1_data', 'interpolate_1', {'in': 3}),\n ('interpolate_1', 'interpolate_1_data'),\n\n ('interpolate_1_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n]\n\n\ngraph_node_attrs_for_2d_case_4_opset4_case = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200])\n },\n 'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'axes_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])\n },\n 'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'linear',\n 'coordinate_transformation_mode': 'asymmetric',\n 'nearest_mode': 'simple',\n 'cube_coeff': -0.4,\n 'antialias': 1,\n 'version': 'opset4'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])\n },\n 'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'axes_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])\n },\n 'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'linear',\n 'coordinate_transformation_mode': 'asymmetric',\n 'nearest_mode': 'simple',\n 'cube_coeff': -0.4,\n 'antialias': 1,\n 'version': 'opset4'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nedges_for_2d_case_4_opset4_case = [\n ('placeholder', 'placeholder_data'),\n\n ('placeholder_data', 'interpolate_1', {'in': 0}),\n ('scale_1', 'scale_1_data'),\n ('scale_1_data', 'interpolate_1', {'in': 1}),\n ('axes_1', 'axes_1_data'),\n ('axes_1_data', 'interpolate_1', {'in': 2}),\n ('interpolate_1', 'interpolate_1_data'),\n\n ('interpolate_1_data', 'interpolate_2', {'in': 0}),\n ('scale_2', 'scale_2_data'),\n ('scale_2_data', 'interpolate_2', {'in': 1}),\n ('axes_2', 'axes_2_data'),\n ('axes_2_data', 'interpolate_2', {'in': 2}),\n ('interpolate_2', 'interpolate_2_data'),\n\n ('interpolate_2_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n]\n\n\ngraph_node_attrs_for_2d_case_4 = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200])\n },\n 'scale_1_data': {'value': int64_array([2200]), 'shape': [1], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2]),\n 'mode': 'linear',\n 'align_corners': 0,\n 'antialias': 1,\n 'pads_begin': 5,\n 'pads_end': 3,\n 'version': 'opset1'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])\n },\n 'scale_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([3]),\n 'mode': 'linear',\n 'align_corners': 0,\n 'antialias': 1,\n 'pads_begin': 5,\n 'pads_end': 3,\n 'version': 'opset1'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nedges_for_2d_case_4 = [\n ('placeholder', 'placeholder_data'),\n\n ('placeholder_data', 'interpolate_1', {'in': 0}),\n ('scale_1', 'scale_1_data'),\n ('scale_1_data', 'interpolate_1', {'in': 1}),\n ('interpolate_1', 'interpolate_1_data'),\n\n ('interpolate_1_data', 'interpolate_2', {'in': 0}),\n ('scale_2', 'scale_2_data'),\n ('scale_2_data', 'interpolate_2', {'in': 1}),\n ('interpolate_2', 'interpolate_2_data'),\n\n ('interpolate_2_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n]\n\n\ngraph_node_attrs_for_2d_case_6 = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([220, 350])\n },\n 'scale_1_data': {'value': None, 'shape': [2], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2, 3]),\n 'mode': 'linear',\n 'align_corners': 0,\n 'antialias': 1,\n 'pads_begin': 5,\n 'pads_end': 3,\n 'version': 'opset1'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([220])\n },\n 'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2]),\n 'mode': 'linear',\n 'align_corners': 0,\n 'antialias': 1,\n 'pads_begin': 5,\n 'pads_end': 3,\n 'version': 'opset1'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nedges_for_2d_case_6 = edges_for_2d_case_4\n\n\nnew_ref_graph_node_attrs_for_3d_case_1_opset4_case = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 5, 1024, 256, 800]),\n 'kind': 'data',\n 'data_type': None\n },\n 'size_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280, 2400])\n },\n 'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4.0, 5.0, 3.0])\n },\n 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'axes_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3, 4])\n },\n 'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'nearest',\n 'shape_calculation_mode': 'sizes',\n 'version': 'opset4'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},\n 'size_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])\n },\n 'size_3_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'scale_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([512.0 / 2400.0])\n },\n 'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'axes_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4])\n },\n 'axes_3_data': {'value': int64_array([4]), 'shape': [1], 'kind': 'data'},\n 'interpolate_3': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'nearest',\n 'shape_calculation_mode': 'sizes',\n 'version': 'opset4'\n },\n 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\n\nnew_ref_edges_for_3d_case_1_opset4_case = ref_edges_for_2d_case_1_opset4_case\n\n\nnew_graph_node_attrs_for_3d_case_1_opset4_case = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 5, 1024, 256, 800]),\n 'kind': 'data',\n 'data_type': None\n },\n 'size_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 2400])\n },\n 'size_1_data': {'value': int64_array([4096, 2400]), 'shape': [2], 'kind': 'data'},\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4.0, 3.0])\n },\n 'scale_1_data': {'value': np.array([4.0, 3.0]), 'shape': [2], 'kind': 'data'},\n 'axes_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 4])\n },\n 'axes_1_data': {'value': int64_array([2, 4]), 'shape': [2], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'nearest',\n 'shape_calculation_mode': 'sizes',\n 'version': 'opset4'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 256, 2400]), 'kind': 'data'},\n 'size_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1280])\n },\n 'size_2_data': {'value': int64_array([1280]), 'shape': [1], 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([5.0])\n },\n 'scale_2_data': {'value': np.array([5.0]), 'shape': [1], 'kind': 'data'},\n 'axes_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])\n },\n 'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'nearest',\n 'shape_calculation_mode': 'sizes',\n 'version': 'opset4'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},\n 'size_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])\n },\n 'size_3_data': {'value': int64_array([512]), 'shape': [1], 'kind': 'data'},\n 'scale_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([512.0 / 2400.0])\n },\n 'scale_3_data': {'value': np.array([512.0 / 2400.0]), 'shape': [1], 'kind': 'data'},\n 'axes_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4])\n },\n 'axes_3_data': {'value': int64_array([4]), 'shape': [1], 'kind': 'data'},\n 'interpolate_3': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'nearest',\n 'shape_calculation_mode': 'sizes',\n 'version': 'opset4'\n },\n 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nnew_edges_for_3d_case_1_opset4_case = edges_for_2d_case_1_opset4_case\n\n\ngraph_node_attrs_for_3d_case_1 = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 5, 1024, 256, 800]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 2400])\n },\n 'scale_1_data': {'value': int64_array([4096, 2400]), 'shape': [2], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2, 4]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 256, 2400]), 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1280])\n },\n 'scale_2_data': {'value': int64_array([1280]), 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([3]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},\n 'scale_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])\n },\n 'scale_3_data': {'value': int64_array([512]), 'shape': [1], 'kind': 'data'},\n 'interpolate_3': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([4]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nedges_for_3d_case_1 = edges_for_2d_case_1\n\n\ngraph_node_attrs_for_3d_case_2 = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 5, 1024, 256, 800]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280])\n },\n 'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2, 3]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 800]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 800]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nedges_for_3d_case_2 = edges_for_2d_case_2\n\n\ngraph_node_attrs_for_3d_case_3 = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([16, 44, 512, 87, 790]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([256])\n },\n 'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([16, 44, 256, 87, 790]), 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2370])\n },\n 'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([4]),\n 'mode': 'linear',\n 'version': 'opset1'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([16, 44, 256, 87, 2370]), 'kind': 'data'},\n 'scale_3': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([435])\n },\n 'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'interpolate_3': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([3]),\n 'mode': 'cubic',\n 'version': 'opset1'\n },\n 'interpolate_3_data': {'value': None, 'shape': int64_array([16, 44, 256, 435, 2370]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([16, 44, 256, 435, 2370]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nedges_for_3d_case_3 = edges_for_2d_case_3\n\n\nnew_ref_graph_node_attrs_for_3d_case_4_opset4_case = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([10, 64, 511, 416, 10240]),\n 'kind': 'data',\n 'data_type': None\n },\n 'size_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 912, 133120])\n },\n 'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const',\n 'value': np.array([4599.0 / 511.0, 912.0 / 416.0, 133120.0 / 10240.0])\n },\n 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'axes_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3, 4])\n },\n 'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'linear',\n 'antialias': 1,\n 'shape_calculation_mode': 'sizes',\n 'version': 'opset4'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nnew_ref_edges_for_3d_case_4_opset4_case = new_ref_edges_for_2d_case_4_opset4_case\n\n\nnew_graph_node_attrs_for_3d_case_4_opset4_case = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([10, 64, 511, 416, 10240]),\n 'kind': 'data',\n 'data_type': None\n },\n 'size_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 133120])\n },\n 'size_1_data': {'value': int64_array([4599, 133120]), 'shape': [2], 'kind': 'data'},\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4599.0 / 511.0, 133120.0 / 10240.0])\n },\n 'scale_1_data': {'value': np.array([4599.0 / 511.0, 133120.0 / 10240.0]), 'shape': [2], 'kind': 'data'},\n 'axes_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 4])\n },\n 'axes_1_data': {'value': int64_array([2, 4]), 'shape': [2], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'linear',\n 'antialias': 1,\n 'shape_calculation_mode': 'sizes',\n 'version': 'opset4'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 416, 133120]), 'kind': 'data'},\n 'size_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([912])\n },\n 'size_2_data': {'value': int64_array([912]), 'shape': [1], 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([912.0 / 416.0])\n },\n 'scale_2_data': {'value': np.array([912.0 / 416.0]), 'shape': [1], 'kind': 'data'},\n 'axes_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])\n },\n 'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'mode': 'linear',\n 'antialias': 1,\n 'shape_calculation_mode': 'sizes',\n 'version': 'opset4'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nnew_edges_for_3d_case_4_opset4_case = new_edges_for_2d_case_4_opset4_case\n\n\ngraph_node_attrs_for_3d_case_4 = {\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([10, 64, 511, 416, 10240]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 133120])\n },\n 'scale_1_data': {'value': int64_array([4599, 133120]), 'shape': [2], 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2, 4]),\n 'mode': 'linear',\n 'align_corners': 0,\n 'antialias': 1,\n 'pads_begin': 5,\n 'pads_end': 3,\n 'version': 'opset1'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 416, 133120]), 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([912])\n },\n 'scale_2_data': {'value': int64_array([912]), 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([3]),\n 'mode': 'linear',\n 'align_corners': 0,\n 'antialias': 1,\n 'pads_begin': 5,\n 'pads_end': 3,\n 'version': 'opset1'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n}\n\nedges_for_3d_case_4 = edges_for_2d_case_4\n\n\nclass InterpolateSequenceToInterpolateTest(unittest.TestCase):\n def test_2d_interpolate_sequence_1(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_1,\n edges=edges_for_2d_case_1\n )\n\n ref_graph = build_graph(\n nodes_attrs={\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660, 700])\n },\n 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2, 3]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])\n },\n 'scale_2_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n },\n edges=[\n ('placeholder', 'placeholder_data'),\n ('placeholder_data', 'interpolate_1', {'in': 0}),\n ('scale_1', 'scale_1_data'),\n ('scale_1_data', 'interpolate_1', {'in': 1}),\n ('interpolate_1', 'interpolate_1_data'),\n ('scale_2', 'scale_2_data'),\n ('interpolate_2', 'interpolate_2_data'),\n ('interpolate_1_data', 'interpolate_2', {'in': 0}),\n ('scale_2_data', 'interpolate_2', {'in': 1}),\n ('interpolate_2_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n ]\n )\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_2d_interpolate_sequence_1_opset4_case(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_1_opset4_case,\n edges=edges_for_2d_case_1_opset4_case\n )\n\n ref_graph = build_graph(\n nodes_attrs=ref_graph_node_attrs_for_2d_case_1_opset4_case,\n edges=ref_edges_for_2d_case_1_opset4_case\n )\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_2d_interpolate_sequence_2(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_2,\n edges=edges_for_2d_case_2\n )\n ref_graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_2,\n edges=edges_for_2d_case_2\n )\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_2d_interpolate_sequence_3(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_3,\n edges=edges_for_2d_case_3\n )\n\n ref_graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_3,\n edges=edges_for_2d_case_3\n )\n\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_2d_interpolate_sequence_4(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_4,\n edges=edges_for_2d_case_4\n )\n\n ref_graph = build_graph(\n nodes_attrs={\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 4, 220, 350]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200, 700])\n },\n 'scale_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'interpolate': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2, 3]),\n 'mode': 'linear',\n 'align_corners': 0,\n 'antialias': 1,\n 'pads_begin': 5,\n 'pads_end': 3,\n 'version': 'opset1'\n },\n 'interpolate_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n },\n edges=[\n ('placeholder', 'placeholder_data'),\n\n ('placeholder_data', 'interpolate', {'in': 0}),\n ('scale', 'scale_data'),\n ('scale_data', 'interpolate', {'in': 1}),\n ('interpolate', 'interpolate_data'),\n\n ('interpolate_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n ]\n )\n\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_2d_interpolate_sequence_4_opset4_case(self):\n graph = build_graph(\n nodes_attrs=new_graph_node_attrs_for_2d_case_4_opset4_case,\n edges=new_edges_for_2d_case_4_opset4_case\n )\n\n ref_graph = build_graph(\n nodes_attrs=new_ref_graph_node_attrs_for_2d_case_4_opset4_case,\n edges=new_ref_edges_for_2d_case_4_opset4_case\n )\n\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_2d_interpolate_sequence_5(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_4,\n edges=edges_for_2d_case_4,\n update_attributes={\n 'interpolate_1': {\n 'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 0\n }\n }\n )\n\n ref_graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_4,\n edges=edges_for_2d_case_4,\n update_attributes={\n 'interpolate_1': {\n 'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 0\n }\n }\n )\n\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_2d_interpolate_sequence_5_opset4_case(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_4_opset4_case,\n edges=edges_for_2d_case_4_opset4_case,\n update_attributes={\n 'interpolate_1': {\n 'antialias': 0, 'cube_coeff': -0.1\n }\n }\n )\n\n ref_graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_4_opset4_case,\n edges=edges_for_2d_case_4_opset4_case,\n update_attributes={\n 'interpolate_1': {\n 'antialias': 0, 'cube_coeff': -0.1\n }\n }\n )\n\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_2d_interpolate_sequence_6(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_6,\n edges=edges_for_2d_case_6,\n )\n\n ref_graph = build_graph(\n nodes_attrs=graph_node_attrs_for_2d_case_6,\n edges=edges_for_2d_case_6\n )\n\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_3d_interpolate_sequence_1(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_3d_case_1,\n edges=edges_for_3d_case_1\n )\n\n ref_graph = build_graph(\n nodes_attrs={\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([1, 5, 1024, 256, 800]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale_1': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280, 2400])\n },\n 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'interpolate_1': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2, 3, 4]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},\n 'scale_2': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])\n },\n 'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},\n 'interpolate_2': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([4]),\n 'mode': 'nearest',\n 'version': 'opset1'\n },\n 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n },\n edges=[\n ('placeholder', 'placeholder_data'),\n ('placeholder_data', 'interpolate_1', {'in': 0}),\n ('scale_1', 'scale_1_data'),\n ('scale_1_data', 'interpolate_1', {'in': 1}),\n ('interpolate_1', 'interpolate_1_data'),\n ('scale_2', 'scale_2_data'),\n ('interpolate_2', 'interpolate_2_data'),\n ('interpolate_1_data', 'interpolate_2', {'in': 0}),\n ('scale_2_data', 'interpolate_2', {'in': 1}),\n ('interpolate_2_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n ]\n )\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_3d_interpolate_sequence_1_opset4_case(self):\n graph = build_graph(\n nodes_attrs=new_graph_node_attrs_for_3d_case_1_opset4_case,\n edges=new_edges_for_3d_case_1_opset4_case\n )\n\n ref_graph = build_graph(\n nodes_attrs=new_ref_graph_node_attrs_for_3d_case_1_opset4_case,\n edges=new_ref_edges_for_3d_case_1_opset4_case\n )\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_3d_interpolate_sequence_2(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_3d_case_2,\n edges=edges_for_3d_case_2\n )\n ref_graph = build_graph(\n nodes_attrs=graph_node_attrs_for_3d_case_2,\n edges=edges_for_3d_case_2\n )\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_3d_interpolate_sequence_3(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_3d_case_3,\n edges=edges_for_3d_case_3\n )\n ref_graph = build_graph(\n nodes_attrs=graph_node_attrs_for_3d_case_3,\n edges=edges_for_3d_case_3\n )\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_3d_interpolate_sequence_4(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_3d_case_4,\n edges=edges_for_3d_case_4\n )\n\n ref_graph = build_graph(\n nodes_attrs={\n 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_data': {\n 'value': None,\n 'shape': int64_array([10, 64, 511, 416, 10240]),\n 'kind': 'data',\n 'data_type': None\n },\n 'scale': {\n 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 912, 133120])\n },\n 'scale_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'interpolate': {\n 'type': 'Interpolate',\n 'kind': 'op',\n 'op': 'Interpolate',\n 'axes': int64_array([2, 3, 4]),\n 'mode': 'linear',\n 'align_corners': 0,\n 'antialias': 1,\n 'pads_begin': 5,\n 'pads_end': 3,\n 'version': 'opset1'\n },\n 'interpolate_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},\n 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},\n 'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},\n 'output': {'kind': 'op', 'op': 'Result'},\n },\n edges=[\n ('placeholder', 'placeholder_data'),\n\n ('placeholder_data', 'interpolate', {'in': 0}),\n ('scale', 'scale_data'),\n ('scale_data', 'interpolate', {'in': 1}),\n ('interpolate', 'interpolate_data'),\n\n ('interpolate_data', 'abs'),\n ('abs', 'abs_data'),\n ('abs_data', 'output'),\n ]\n )\n\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_3d_interpolate_sequence_4_opset4_case(self):\n graph = build_graph(\n nodes_attrs=new_graph_node_attrs_for_3d_case_4_opset4_case,\n edges=new_edges_for_3d_case_4_opset4_case\n )\n\n ref_graph = build_graph(\n nodes_attrs=new_ref_graph_node_attrs_for_3d_case_4_opset4_case,\n edges=new_ref_edges_for_3d_case_4_opset4_case\n )\n\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n\n def test_3d_interpolate_sequence_5(self):\n graph = build_graph(\n nodes_attrs=graph_node_attrs_for_3d_case_4,\n edges=edges_for_3d_case_4,\n update_attributes={\n 'interpolate_1': {\n 'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 7\n }\n }\n )\n\n ref_graph = build_graph(\n nodes_attrs=graph_node_attrs_for_3d_case_4,\n edges=edges_for_3d_case_4,\n update_attributes={\n 'interpolate_1': {\n 'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 7\n }\n }\n )\n\n InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)\n (flag, resp) = compare_graphs(graph, ref_graph, 'output')\n self.assertTrue(flag, resp)\n",
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined\nfrom openvino.tools.mo.graph.graph import Node, Graph\nfrom openvino.tools.mo.ops.op import Op\n\n\nclass ConstantFill(Op):\n \"\"\" Constant blob generation by broadcasting specified value to a given shape.\n\n It is assumed that there is no equivalent of this op in IE,\n so it is usually relevant to constant folding.\n \"\"\"\n op = 'ConstantFill'\n enabled = False\n\n def __init__(self, graph: Graph, attrs: dict):\n mandatory_props = {\n 'type': None,\n 'op': self.op,\n 'input_as_shape': 1,\n 'in_ports_count': 1,\n 'out_ports_count': 1,\n 'infer': self.infer\n }\n super().__init__(graph, mandatory_props, attrs)\n\n def supported_attrs(self):\n return [\n 'input_as_shape',\n 'fill_value'\n ]\n\n @staticmethod\n def infer(node: Node):\n assert len(node.in_nodes()) == 1\n assert node.fill_value is not None\n assert node.input_as_shape\n\n shape = node.in_port(0).data.get_value()\n assert shape is not None\n\n if is_fully_defined(shape):\n node.out_port(0).data.set_value(np.full(shape, node.fill_value, np.float32))\n else:\n node.out_port(0).data.set_shape(shape)\n"
] | [
[
"numpy.random.randn"
],
[
"numpy.array_equal"
],
[
"numpy.array"
],
[
"numpy.full"
]
] |
hades208002/mdp-project | [
"c242a8d00412cc3772d298986977f6acc47002ee"
] | [
"client_server_test/NEWGUI.py"
] | [
"from tkinter import *\nfrom tkinter import ttk\nimport tkinter.filedialog as fd\nimport pandas as pd\nfrom LocalModelCommunication import LocalModelCommunication\nfrom APP import APP\n\nclass GUI(object):\n\tdef __init__(self):\n\t\t# overall\n\t\tself.tabControl = None\n\t\tself.tab_step1 = None\n\t\tself.tab_step2 = None\n\t\tself.tab_step3 = None\n\t\tself.tab_step4 = None\n\t\tself.dataframe = None\n\t\tself.img_wait = PhotoImage(file='test.GIF')\n\n\t\t# 1 step\n\t\tself.fname = None\n\t\tself.data = None\n\t\tself.features = None\n\t\tself.import_lable = None\n\t\tself.import_label_text = StringVar()\n\t\tself.import_label_text.set(' ')\n\n\t\t# 2 step\n\t\tself.required = ['RR', 'QTm_old', 'sbjBeatConsidered', 'numRRaveraged', 'QR', 'QTn', 'QRS', 'IPG',\n\t\t\t\t\t\t\t\t\t'PQ', 'PCpos', 'PCneg', 'patsex', 'AFclass', 'Age']\n\t\tself.required_ordered = []\n\t\ti = 0\n\t\tfor item in self.required:\n\t\t\tself.required_ordered.append(str(i) + ': ' + item)\n\t\t\ti = i + 1\n\t\tself.leftbox = StringVar()\n\t\tself.rightbox = StringVar()\n\t\tself.rrightbox = StringVar()\n\t\tself.list_left = None\n\t\tself.list_right = None\n\t\tself.list_rright = None\n\n\t\t# 3 step\n\t\tself.model_label = None\n\t\tself.model_label_text = StringVar()\n\t\tself.model_label_text.set('Waiting for model training...')\n\t\tself.img_gif = PhotoImage(file='img.GIF')\n\t\t\n\n\t\t# 4 step\n\t\tself.connect_label = None\n\t\tself.connect_label_text = StringVar()\n\t\tself.connect_label_text.set('Waiting for central server response...')\n\n\t\t# 5 step\n\n\t# help functions\n\tdef add_tab(self, tabControl, tab_name):\n\t\ttab = ttk.Frame(tabControl) # Create a tab\n\t\ttabControl.add(tab, text=tab_name)\n\t\treturn tab\n\n\t# Callback functions\n\t## step 1\n\tdef get_csv(self): # open file system\n\t\tself.fname = fd.askopenfilename(filetypes=[(\".csv file\", \".csv\")])\n\t\tself.data = pd.read_csv(self.fname, delimiter=',')\n\t\tself.features = self.data.columns\n\n\t\tself.import_label_text.set('Import data from: ' + self.fname + '\\n' + str(self.features))\n\t\tself.import_lable.pack(side=TOP)\n\tdef go_next_step2(self):\n\t\tself.tab_step2 = self.add_tab(self.tabControl, \"Step 2: Match Features\")\n\t\tself.tab_match(self.tab_step2)\n\t\tself.tabControl.select(self.tab_step2)\n\t\tself.tabControl.forget(self.tab_step1)\n\t## step 2\n\tdef move_to_right(self):\n\n\t\tself.list_right.insert(END,\n\t\t\t\t\t\t\t str(self.list_right.size()) + ': ' + self.list_left.get(self.list_left.curselection()))\n\t\tself.list_left.delete(self.list_left.curselection())\n\tdef move_to_left(self):\n\t\tcontent = self.list_right.get(self.list_right.curselection())\n\t\tcontents = content.split(': ')\n\t\tself.list_left.insert(END, contents[1])\n\t\tself.list_right.delete(self.list_right.curselection())\n\tdef add_nan(self):\n\t\tself.list_right.insert(END, str(self.list_right.size()) + ': ' + 'NAN')\n\tdef go_next_step3(self):\n\t\t# prepare dataframe for localmodel\n\t\tcolumns = []\n\t\tcontents = self.rightbox.get()\n\t\tcontents = contents.replace('(', '')\n\t\tcontents = contents.replace(')', '')\n\t\tcontents = contents.replace(\"'\", '')\n\t\titem_list = contents.split(', ')\n\t\tfor item in item_list:\n\t\t\tcontent = item.split(': ')[1]\n\t\t\tif content != 'NAN':\n\t\t\t\tcolumns.append(content)\n\n\t\tself.dataframe = self.data[columns]\n\t\tprint(self.dataframe.head(2))\n\t\tself.tab_step3 = self.add_tab(self.tabControl, \"Step 3: Train Model\")\n\t\t# render tab3\n\t\tself.tab_model(self.tab_step3)\n\t\tself.tabControl.select(self.tab_step3)\n\t\tself.tabControl.forget(self.tab_step2)\n\tdef go_back_step1(self):\n\t\tself.tab_step1 = self.add_tab(self.tabControl, \"Step 1: Import Data\")\n\t\t# render tab1\n\t\tself.tab_import(self.tab_step1, self.tabControl)\n\t\tself.tabControl.select(self.tab_step1)\n\t\tself.tabControl.forget(self.tab_step2)\n\t## step 3\n\tdef go_next_step4(self):\n\t\tself.tab_step4 = self.add_tab(self.tabControl, \"Step 4: Connect to Central Server\")\n\t\t# render tab4\n\t\tself.tab_connect(self.tab_step4)\n\t\tself.tabControl.select(self.tab_step4)\n\t\tself.tabControl.forget(self.tab_step3)\n\tdef go_back_step2(self):\n\t\tself.tab_step2 = self.add_tab(self.tabControl, \"Step 2: Match Features\")\n\t\t# render tab2\n\t\tself.tab_match(self.tab_step2)\n\t\tself.tabControl.select(self.tab_step2)\n\t\tself.tabControl.forget(self.tab_step3)\n\t## step 4\n\tdef go_next_step5(self):\n\t\tself.tab_step5 = self.add_tab(self.tabControl, \"Step 5: Wait for Prediction Call\")\n\t\t# render tab5\n\t\tself.tab_wait(self.tab_step5)\n\t\tself.tabControl.select(self.tab_step5)\n\t\tself.tabControl.forget(self.tab_step4)\n\tdef go_back_step3(self):\n\t\tself.tab_step3 = self.add_tab(self.tabControl, \"Step 3: Train Model\")\n\t\t# render tab3\n\t\tself.tab_model(self.tab_step3)\n\t\tself.tabControl.select(self.tab_step3)\n\t\tself.tabControl.forget(self.tab_step4)\n\t## step 5\n\n\t# frames\n\tdef tab_import(self, root, tabControl):\n\t\t\"\"\"\n\t\tLoad local data (csv file)\n\t\t\"\"\"\n\t\tself.tabControl = tabControl\n\t\tself.tab_step1 = root\n\n\t\tframe = Frame(root)\n\t\tframe.pack(side=TOP)\n\t\tButton(frame, text='Import Data', command=self.get_csv, width=16).pack(side=TOP)\n\t\tlabel_frame = ttk.LabelFrame(frame, text='Press Button to Import Data')\n\t\tlabel_frame.pack(side=TOP)\n\t\tself.import_lable = ttk.Label(label_frame, textvariable=self.import_label_text)\n\t\tself.import_lable.pack(side=TOP)\n\n\t\tframe = Frame(root)\n\t\tframe.pack(side=BOTTOM)\n\t\tButton(frame, text='Next>>', command=self.go_next_step2, width=16).pack(side=TOP)\n\n\tdef tab_match(self, root):\n\t\t\"\"\"\n\t\tFeature matching\n\t\t\"\"\"\n\t\tself.leftbox.set(sorted(self.features))\n\t\tself.rightbox.set('')\n\t\tself.rrightbox.set(self.required_ordered)\n\n\t\tframe = Frame(root)\n\t\tframe.pack(side=BOTTOM)\n\t\tButton(frame, text='Next>>', command=self.go_next_step3, width=16).pack(side=RIGHT)\n\t\tButton(frame, text='<<Back', command=self.go_back_step1, width=16).pack(side=LEFT)\n\n\t\tframe = Frame(root)\n\t\tframe.pack(side=LEFT)\n\t\tcolumn_head = ttk.Label(frame, text='Local Features')\n\t\tcolumn_head.pack(side=TOP)\n\t\tself.list_left = Listbox(frame, listvariable=self.leftbox, width=25, height=20)\n\t\tself.list_left.pack(side=LEFT)\n\n\t\tscrollbar = Scrollbar(frame, orient=\"vertical\")\n\t\tscrollbar.config(command=self.list_left.yview)\n\t\tscrollbar.pack(side=\"right\", fill=\"y\")\n\n\t\tframe = Frame(root)\n\t\tframe.pack(side=LEFT)\n\t\tButton(frame, text='->', command=self.move_to_right, width=7).pack(side=TOP)\n\t\tButton(frame, text='<-', command=self.move_to_left, width=7).pack(side=TOP)\n\t\tButton(frame, text='NAN', command=self.add_nan, width=7).pack(side=TOP)\n\n\t\tframe = Frame(root)\n\t\tframe.pack(side=LEFT)\n\t\tcolumn_head = ttk.Label(frame, text='Matched Features')\n\t\tcolumn_head.pack(side=TOP)\n\t\tself.list_right = Listbox(frame, listvariable=self.rightbox,height=20, width=25)\n\t\tself.list_right.pack(side=LEFT)\n\n\t\tscrollbar = Scrollbar(frame, orient=\"vertical\")\n\t\tscrollbar.config(command=self.list_right.yview)\n\t\tscrollbar.pack(side=\"right\", fill=\"y\")\n\n\t\tframe = Frame(root)\n\t\tframe.pack(side=RIGHT)\n\t\tcolumn_head = ttk.Label(frame, text='Required Features')\n\t\tcolumn_head.pack(side=TOP)\n\t\tself.list_rright = Listbox(frame, listvariable=self.rrightbox,height=20, width=25)\n\t\tself.list_rright.pack(side=LEFT)\n\n\t\tscrollbar = Scrollbar(frame, orient=\"vertical\")\n\t\tscrollbar.config(command=self.list_rright.yview)\n\t\tscrollbar.pack(side=\"right\", fill=\"y\")\n\n\tdef tab_model(self, root):\n\t\t\"\"\"\n\t\tCall localmodel.init() and localmodel.train()\n\t\tDisplay model accuracy\n\t\t\"\"\"\n\t\tframe = Frame(root)\n\t\tframe.pack(side=TOP)\n\t\tself.label_frame = ttk.LabelFrame(frame)\n\t\tself.label_frame.pack(side=TOP)\n\t\tself.model_label = ttk.Label(self.label_frame, textvariable=self.model_label_text)\n\t\tself.model_label.pack(side=TOP)\n\t\tself.label_img = ttk.Label(self.label_frame, image=self.img_wait)\n\t\tself.label_img.pack()\n\t\tframe = Frame(root)\n\t\tframe.pack(side=BOTTOM)\n\n\t\tButton(frame, text='Next>>', command=self.go_next_step4, width=16).pack(side=RIGHT)\n\t\tButton(frame, text='<<Back', command=self.go_back_step2, width=16).pack(side=LEFT)\n\n\t\tprint (\"MODEL TRAINED -> \")\n\n\t\tself.loca = LocalModelCommunication(data= self.dataframe)\n\t\ttraining_result = self.loca.chooseModel_with_crossValidation_and_train()\n\n\t\tprint (training_result)\n\n\t\tself.trainingdone()\n\n\n\n\tdef trainingdone(self):\n\t\tself.label_img.config(image=self.img_gif)\n\t\tself.label_img.pack()\n\n\tdef tab_connect(self, root):\n\t\t\"\"\"\n\t\tConnect to center server\n\t\t\"\"\"\n\t\tframe = Frame(root)\n\t\tframe.pack(side=TOP)\n\t\tlabel_frame = ttk.LabelFrame(frame)\n\t\tlabel_frame.pack(side=TOP)\n\t\tself.connect_label = ttk.Label(label_frame, textvariable=self.connect_label_text)\n\t\tself.connect_label.pack(side=TOP)\n\t\tlabel_img = ttk.Label(label_frame, image=self.img_wait)\n\t\tlabel_img.pack()\n\n\t\tframe = Frame(root)\n\t\tframe.pack(side=BOTTOM)\n\t\tButton(frame, text='Next>>', command=self.go_next_step5, width=16).pack(side=RIGHT)\n\t\tButton(frame, text='<<Back', command=self.go_back_step3, width=16).pack(side=LEFT)\n\n\t\t## cannot get fast responce! -> get false even if we are connected :]\n\t\tif self.loca.connectToCentral() == False :\n\t\t\tprint (\"not connected\")\n\t\telse :\n\t\t\tprint (\"connected\")\n\t\t'''\n\t\tself.root = Tk()\n\t\tself.root.geometry(\"700x500\")\n\t\tself.root.title(\"Doctor Application\")\n\t\tself.root.resizable(width=False, height=False)\n\n\t\tself.app = APP(root)\n\n\t\tself.root.mainloop()\n\t\t'''\n\n\tdef tab_wait(self, root):\n\t\t\"\"\"\n\t\tCall localmodel.predict()\n\t\t:return:\n\t\t\"\"\"\n\t\tframe = Frame(root)\n\t\tframe.pack(side=TOP)\n\t\tlabel_frame = ttk.LabelFrame(frame)\n\t\tlabel_frame.pack(side=TOP)\n\t\tlabel = ttk.Label(label_frame, text='TODO')\n\t\tlabel.pack(side=TOP)\n\nif __name__ == '__main__':\n root = Tk()\n root.geometry(\"700x500\")\n root.title(\"Modeling Tool GUI\")\n root.resizable(width=False, height=False)\n\n tabControl = ttk.Notebook(root)\n tab_step1 = ttk.Frame(tabControl)\n tabControl.add(tab_step1, text=\"Step 1: Import Data\")\n tabControl.pack(expand=1, fill=\"both\") # Pack to make visible\n\n gui = GUI()\n gui.tab_import(tab_step1, tabControl)\n\n root.mainloop()\n"
] | [
[
"pandas.read_csv"
]
] |
sidneyp/bidirectional | [
"d3d1dbb727e5a25b4980646f1eb500245072f079"
] | [
"cifar_cnn_three_conv.py"
] | [
"import tensorflow as tf\nimport keras\nfrom keras.datasets import cifar10\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport os\nimport sys\nimport csv\nimport utils_csv\nimport utils_tf as utils\nfrom cleverhans.utils_tf import model_train, model_eval\nfrom cleverhans.attacks import FastGradientMethod\nfrom cleverhans.model import Model\nprint(\"Tensorflow version \" + tf.__version__)\n\nconfig_num = int(sys.argv[1]) if len(sys.argv) > 1 else 1 # Choose type of learning technique according to config_dict\nconfig_dict = {0: \"backprop\", 1: \"biprop\", 2: \"halfbiprop\", 3: \"nobias_backprop\", 4: \"nobias_biprop\", 5: \"nobias_halfbiprop\"}\n\nnum_classes = 10\n\nmodel_name = sys.argv[0].replace(\".py\", \"\") + \"_\" + config_dict[config_num]\nprint(\"Model name: \" + model_name)\n\n# load data\n# https://github.com/BIGBALLON/cifar-10-cnn/blob/master/1_Lecun_Network/LeNet_keras.py\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\n\n# for reproducibility\nnp.random.seed(0)\ntf.set_random_seed(0)\n\nsess = tf.InteractiveSession()\n\n# three convolutional layers with their channel counts, and a\n# fully connected layer (tha last layer has 10 softmax neurons)\nK = 4 # first convolutional layer output depth\nL = 8 # second convolutional layer output depth\nM = 12 # third convolutional layer\nN = 200 # fully connected layer\n\nwith tf.name_scope(\"input\"):\n # input X & output GX_: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch\n X = tf.placeholder(tf.float32, [None, 32, 32, 3])\n X_noisy = tf.placeholder(tf.float32, [None, 32, 32, 3])\n X_adv = tf.placeholder(tf.float32, [None, 32, 32, 3])\n\n GX_ = tf.placeholder(tf.float32, [None, 32, 32, 3])\n\n # output Y_ & input GY: labels for classification and generation\n Y_ = tf.placeholder(tf.float32, [None, num_classes])\n GY = tf.placeholder(tf.float32, [None, num_classes])\n\n # variable learning rate\n lr = tf.placeholder(tf.float32)\n\n # variable batch size\n BS = tf.placeholder(tf.int32)\n\n input_test_sum = tf.summary.image(\"input\", X, num_classes)\n input_noisy_sum = tf.summary.image(\"input-noisy\", X_noisy, num_classes)\n input_adv_sum = tf.summary.image(\"input-adv\", X_adv, num_classes)\n\nwith tf.name_scope(\"classifier-generator\"):\n C_W1 = utils.weight_variable([5, 5, 3, K], stddev=0.1, name=\"C_W1\")\n C_W2 = utils.weight_variable([5, 5, K, L], stddev=0.1, name=\"C_W2\")\n C_W3 = utils.weight_variable([4, 4, L, M], stddev=0.1, name=\"C_W3\")\n\n C_W4 = utils.weight_variable([8 * 8 * M, N], stddev=0.1, name=\"C_W4\")\n C_W5 = utils.weight_variable([N, num_classes], stddev=0.1, name=\"C_W5\")\n\ndef classifier(x, reuse=None):\n with tf.variable_scope(\"classifier\", reuse=reuse) as scope_c:\n # Variables for classifier\n C_B1 = utils.bias_variable([K], name=\"C_B1\")\n C_B2 = utils.bias_variable([L], name=\"C_B2\")\n C_B3 = utils.bias_variable([M], name=\"C_B3\")\n C_B4 = utils.bias_variable([N], name=\"C_B4\")\n C_B5 = utils.bias_variable([num_classes], name=\"C_B5\")\n\n stride = 1 # output is 32x32\n H1 = tf.nn.relu(tf.nn.conv2d(x, C_W1, strides=[1, stride, stride, 1], padding='SAME') + C_B1)\n stride = 2 # output is 16x16\n H2 = tf.nn.relu(tf.nn.conv2d(H1, C_W2, strides=[1, stride, stride, 1], padding='SAME') + C_B2)\n stride = 2 # output is 8x8\n H3 = tf.nn.relu(tf.nn.conv2d(H2, C_W3, strides=[1, stride, stride, 1], padding='SAME') + C_B3)\n\n # reshape the output from the third convolution for the fully connected layer\n HH3 = tf.reshape(H3, shape=[-1, 8 * 8 * M])\n\n H4 = tf.nn.relu(tf.matmul(HH3, C_W4) + C_B4)\n Ylogits = tf.matmul(H4, C_W5) + C_B5\n\n Ysigmoid = tf.nn.sigmoid(Ylogits)\n Ysoftmax = tf.nn.softmax(Ylogits)\n\n return Ysoftmax, Ysigmoid, Ylogits\n\nclass ClassifierModel(Model):\n def get_logits(self, x):\n Ysoftmax, Ysigmoid, Ylogits = classifier(x, reuse=True)\n return Ylogits\n\n# Generator of random input reuses weights of classifier\ndef generator(y, bs, reuse=None):\n with tf.variable_scope(\"generator\", reuse=reuse) as scope_g:\n # Variables for classifier\n G_B1 = utils.bias_variable([3], name=\"G_B1\")\n G_B2 = utils.bias_variable([K], name=\"G_B2\")\n G_B3 = utils.bias_variable([L], name=\"G_B3\")\n G_B4 = utils.bias_variable([M*8*8], name=\"G_B4\")\n G_B5 = utils.bias_variable([N], name=\"G_B5\")\n\n GH4 = tf.nn.relu(tf.matmul(y, tf.transpose(C_W5)) + G_B5)\n GH3 = tf.nn.relu(tf.matmul(GH4, tf.transpose(C_W4)) + G_B4)\n GHH3 = tf.reshape(GH3, shape=[-1, 8, 8, M])\n stride = 2 # output is 14x14\n GH2 = tf.nn.relu(tf.nn.conv2d_transpose(GHH3, C_W3, output_shape=[bs, 16, 16, L], strides=[1, stride, stride, 1]) + G_B3) #deconv2 W3\n stride = 2 # output is 28x28\n GH1 = tf.nn.relu(tf.nn.conv2d_transpose(GH2, C_W2, output_shape=[bs, 32, 32, K], strides=[1, stride, stride, 1]) + G_B2)#deconv2 W2\n stride = 1 # output is 28x28\n GXlogits = tf.nn.conv2d_transpose(GH1, C_W1, output_shape=[bs, 32, 32, 3], strides=[1, stride, stride, 1]) + G_B1#deconv2 W1\n GXsigmoid = tf.nn.sigmoid(GXlogits)\n\n return GXsigmoid, GXlogits\n\ndef plot_generator(samples):\n if num_classes == 10:\n fig = plt.figure(figsize=(5, 2))\n gs = gridspec.GridSpec(2, 5)\n else:\n fig = plt.figure(figsize=(10, 10))\n gs = gridspec.GridSpec(10, 10)\n gs.update(wspace=0.05, hspace=0.05)\n for i, sample in enumerate(samples):\n ax = plt.subplot(gs[i])\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n plt.imshow(sample.reshape((32,32,3)))\n\n return fig\n\nGXsigmoid, GXlogits = generator(GY, BS)\nGXsigmoid_test, GXlogits_test = generator(GY, BS, reuse=True)\n\nYsoftmax, Ysigmoid, Ylogits = classifier(X)\nmodel_classifier = ClassifierModel()\n\nYsoftmax_noisy, Ysigmoid_noisy, Ylogits_noisy = classifier(X_noisy, reuse=True)\nYsoftmax_adv, Ysigmoid_adv, Ylogits_adv = classifier(X_adv, reuse=True)\n\nwith tf.name_scope(\"loss\"):\n c_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_))\n\n g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=GXlogits, labels=GX_))\n\n \"\"\" Summary \"\"\"\n g_loss_sum = tf.summary.scalar(\"g_loss\", g_loss)\n c_loss_sum = tf.summary.scalar(\"c_loss\", c_loss)\n\n# accuracy of the trained model, between 0 (worst) and 1 (best)\nwith tf.name_scope(\"accuracy\"):\n with tf.name_scope(\"correct_prediction\"):\n correct_prediction = tf.equal(tf.argmax(Ysoftmax, 1), tf.argmax(Y_, 1))\n with tf.name_scope(\"accuracy\"):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n with tf.name_scope(\"correct_prediction_noisy\"):\n correct_prediction_noisy = tf.equal(tf.argmax(Ysoftmax_noisy, 1), tf.argmax(Y_, 1))\n with tf.name_scope(\"accuracy_noisy\"):\n accuracy_noisy = tf.reduce_mean(tf.cast(correct_prediction_noisy, tf.float32))\n with tf.name_scope(\"correct_prediction_adv\"):\n correct_prediction_adv = tf.equal(tf.argmax(Ysoftmax_adv, 1), tf.argmax(Y_, 1))\n with tf.name_scope(\"accuracy_adv\"):\n accuracy_adv = tf.reduce_mean(tf.cast(correct_prediction_adv, tf.float32))\n\n \"\"\" Summary \"\"\"\n accuracy_sum = tf.summary.scalar(\"accuracy\", accuracy)\n accuracy_noisy_sum = tf.summary.scalar(\"accuracy_noisy\", accuracy_noisy)\n accuracy_adv_sum = tf.summary.scalar(\"accuracy_adv\", accuracy_adv)\n\nwith tf.name_scope(\"max_output\"):\n with tf.name_scope(\"max_output_test\"):\n max_output_sigmoid_test = tf.reduce_max(Ysigmoid)\n max_output_softmax_test = tf.reduce_max(Ysoftmax)\n with tf.name_scope(\"max_output_noise\"):\n max_output_sigmoid_noise = tf.reduce_max(Ysigmoid_noisy)\n max_output_softmax_noise = tf.reduce_max(Ysoftmax_noisy)\n with tf.name_scope(\"max_output_adv\"):\n max_output_sigmoid_adv = tf.reduce_max(Ysigmoid_adv)\n max_output_softmax_adv = tf.reduce_max(Ysoftmax_adv)\n\n \"\"\" Summary \"\"\"\n max_output_sigmoid_test_sum = tf.summary.scalar(\"max_output_sigmoid_test\", max_output_sigmoid_test)\n max_output_softmax_test_sum = tf.summary.scalar(\"max_output_softmax_test\", max_output_softmax_test)\n max_output_sigmoid_noise_sum = tf.summary.scalar(\"max_output_sigmoid_noise\", max_output_sigmoid_noise)\n max_output_softmax_noise_sum = tf.summary.scalar(\"max_output_softmax_noise\", max_output_softmax_noise)\n max_output_sigmoid_adv_sum = tf.summary.scalar(\"max_output_sigmoid_adv\", max_output_sigmoid_adv)\n max_output_softmax_adv_sum = tf.summary.scalar(\"max_output_softmax_adv\", max_output_softmax_adv)\n\nutils.show_all_variables()\nt_vars = tf.trainable_variables()\nc_vars = [var for var in t_vars if 'C_' in var.name]\\\n if config_num < 3 else [var for var in t_vars if 'C_W' in var.name]\ng_vars = [var for var in t_vars if 'C_W' in var.name or 'G_' in var.name]\\\n if config_num < 3 else c_vars\n\n# training step\nlearning_rate_dis = lr\nlearning_rate_gen = lr\n\nwith tf.name_scope(\"train\"):\n c_train = tf.train.AdamOptimizer(learning_rate_dis).minimize(c_loss, var_list=c_vars)\n g_train = tf.train.AdamOptimizer(learning_rate_gen).minimize(g_loss, var_list=g_vars)\n\n# final summary operations\ng_sum = tf.summary.merge([g_loss_sum])\nc_sum = tf.summary.merge([input_test_sum, accuracy_sum, c_loss_sum, max_output_sigmoid_test_sum, max_output_softmax_test_sum])\nnoise_sum = tf.summary.merge([max_output_sigmoid_noise_sum, max_output_softmax_noise_sum])\nnoisy_sum = tf.summary.merge([input_noisy_sum, accuracy_noisy_sum])\nadv_sum = tf.summary.merge([input_adv_sum, accuracy_adv_sum, max_output_sigmoid_adv_sum, max_output_softmax_adv_sum])\n\nfolder_out = 'out/' + model_name + '/'\nif not os.path.exists(folder_out):\n os.makedirs(folder_out)\n\nfolder_csv = 'csv/' + model_name + '/'\nif not os.path.exists(folder_csv):\n os.makedirs(folder_csv)\n\nfolder_logs = 'logs/' + model_name\nif not os.path.exists(folder_csv):\n os.makedirs(folder_logs)\n\nwriter = tf.summary.FileWriter(folder_logs, sess.graph)\n\nbatch_size = 100\nnum_train_images = x_train.shape[0]\nnum_batches = num_train_images // batch_size\nall_classes = np.eye(num_classes)\n\ncounter = 0\n\nfgsm_params = {'eps': 0.03,\n 'clip_min': 0.,\n 'clip_max': 1.}\n\nrandom_noise = np.random.random_sample(x_test.shape)\ntest_image_with_noise = np.clip(x_test + 0.1*random_noise, 0., 1.)\n\naccuracy_list = []\nsigmoid_list = []\nsoftmax_list = []\n\n# initialize all variables\ntf.global_variables_initializer().run()\n\nfor i in range(50001):\n if i % num_batches == 0:\n idx_train = np.arange(x_train.shape[0])\n np.random.shuffle(idx_train)\n x_train, y_train = x_train[idx_train], y_train[idx_train]\n \n idx = i % num_batches\n batch_X = x_train[idx*batch_size:(idx+1)*batch_size]\n batch_Y = y_train[idx*batch_size:(idx+1)*batch_size]\n\n # learning rate decay\n max_learning_rate = 0.003\n min_learning_rate = 0.0001\n decay_speed = 2000.0\n learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * np.exp(-i/decay_speed)\n\n if i % 500 == 0 or i == 50000:\n counter += 1\n # Saves generated images\n samples = sess.run(GXsigmoid_test, feed_dict={GY: all_classes, BS: num_classes})\n fig = plot_generator(samples)\n plt.savefig(folder_out+\"gen_\"+str(i).zfill(6)+'.png', bbox_inches='tight')\n plt.close(fig)\n\n attack_fgsm = FastGradientMethod(model_classifier, sess=sess)\n adv_x_np = attack_fgsm.generate_np(x_test, **fgsm_params)\n fig = plot_generator(adv_x_np[:num_classes])\n plt.savefig(folder_out+\"adv_\"+str(i).zfill(6)+'.png', bbox_inches='tight')\n plt.close(fig)\n\n accu_test, c_loss_test, sigmoid_test, softmax_test, sum_c = sess.run([accuracy, c_loss, max_output_sigmoid_test, max_output_softmax_test, c_sum], {X: x_test, Y_: y_test})\n writer.add_summary(sum_c, i)\n g_loss_test, sum_g = sess.run([g_loss, g_sum], {GY: batch_Y, GX_: batch_X, BS: batch_size})\n writer.add_summary(sum_g, i)\n\n print(str(i) + \": epoch \" + str(i*batch_size//x_train.shape[0]+1)\\\n + \" - test loss class: \" + str(c_loss_test) + \" test loss gen: \" + str(g_loss_test))\n print(\"Real test images - Sigmoid: \" + str(sigmoid_test) + \"\\tSoftmax: \" + str(softmax_test) + \"\\taccuracy: \"+ str(accu_test))\n\n sigmoid_random, softmax_random, sum_random = sess.run([max_output_sigmoid_noise, max_output_softmax_noise, noise_sum], {X_noisy: random_noise})\n writer.add_summary(sum_random, i)\n accu_random, sum_noisy = sess.run([accuracy_noisy, noisy_sum], {X_noisy: test_image_with_noise, Y_: y_test})\n writer.add_summary(sum_noisy, i)\n print(\"Random noise images - Sigmoid: \" + str(sigmoid_random) + \"\\tSoftmax: \" + str(softmax_random) + \"\\taccuracy: \"+ str(accu_random))\n\n accu_adv, sigmoid_adv, softmax_adv, sum_adv = sess.run([accuracy_adv, max_output_sigmoid_adv, max_output_softmax_adv, adv_sum], {X_adv: adv_x_np, Y_: y_test})\n writer.add_summary(sum_adv, i)\n print(\"Adversarial examples - Sigmoid: \" + str(sigmoid_adv) + \"\\tSoftmax: \" + str(softmax_adv) + \"\\taccuracy: \"+ str(accu_adv))\n print()\n accuracy_list.append([i, accu_test, accu_random, accu_adv, counter])\n sigmoid_list.append([i, sigmoid_test, sigmoid_random, sigmoid_adv, counter])\n softmax_list.append([i, softmax_test, softmax_random, softmax_adv, counter])\n\n sess.run(c_train, {X: batch_X, Y_: batch_Y, lr: learning_rate})\n if config_num == 1 or (config_num == 2 and i < 25000) or\\\n config_num == 4 or (config_num == 5 and i < 25000):\n sess.run(g_train, {GY: batch_Y, GX_: batch_X, lr: learning_rate, BS: batch_size})\n\nwriter.close()\n\n# Save data in csv\nwith open(folder_csv+\"accuracy.csv\", \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(accuracy_list)\n\nwith open(folder_csv+\"sigmoid.csv\", \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(sigmoid_list)\n\nwith open(folder_csv+\"softmax.csv\", \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(softmax_list)\n\n# Load data in csv\naccu_data = utils_csv.get_data_csv_file(folder_csv+\"accuracy.csv\")\nsigmoid_data = utils_csv.get_data_csv_file(folder_csv+\"sigmoid.csv\")\nsoftmax_data = utils_csv.get_data_csv_file(folder_csv+\"softmax.csv\")\n\n# Print best values\nutils_csv.print_best(accu_data, sigmoid_data, softmax_data, folder_csv+\"summary.txt\")\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.summary.image",
"tensorflow.reshape",
"tensorflow.reduce_max",
"numpy.random.seed",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.InteractiveSession",
"tensorflow.name_scope",
"tensorflow.summary.merge",
"tensorflow.summary.FileWriter",
"tensorflow.nn.softmax",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"tensorflow.transpose",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"numpy.eye",
"tensorflow.nn.conv2d_transpose",
"matplotlib.pyplot.axis",
"numpy.arange",
"tensorflow.cast",
"tensorflow.set_random_seed",
"matplotlib.pyplot.close",
"numpy.random.random_sample",
"tensorflow.nn.sigmoid",
"tensorflow.placeholder",
"numpy.random.shuffle",
"tensorflow.train.AdamOptimizer",
"numpy.exp",
"tensorflow.trainable_variables",
"tensorflow.nn.conv2d",
"matplotlib.pyplot.subplot",
"numpy.clip",
"tensorflow.argmax"
]
] |
kathryn-garside/PyDMD-fork | [
"0158c4144019f0899ce34ec44286b0f700c56b38"
] | [
"pydmd/hankeldmd.py"
] | [
"\"\"\"\nDerived module from dmdbase.py for hankel dmd.\n\nReference:\n- H. Arbabi, I. Mezic, Ergodic theory, dynamic mode decomposition, and\ncomputation of spectral properties of the Koopman operator. SIAM Journal on\nApplied Dynamical Systems, 2017, 16.4: 2096-2126.\n\"\"\"\nfrom copy import copy\n\nimport numpy as np\n\nfrom .dmdbase import DMDBase\nfrom .dmd import DMD\n\n\nclass HankelDMD(DMDBase):\n \"\"\"\n Hankel Dynamic Mode Decomposition\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\n is 0, that means no truncation.\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param opt: argument to control the computation of DMD modes amplitudes.\n See :class:`DMDBase`. Default is False.\n :type opt: bool or int\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param int d: the new order for spatial dimension of the input snapshots.\n Default is 1.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param reconstruction_method: Method used to reconstruct the snapshots of\n the dynamical system from the multiple versions available due to how\n HankelDMD is conceived. If `'first'` (default) the first version\n available is selected (i.e. the nearest to the 0-th row in the\n augmented matrix). If `'mean'` we compute the element-wise mean. If\n `reconstruction_method` is an array of float values we compute the\n weighted average (for each snapshots) using the given values as weights\n (the number of weights must be equal to `d`).\n :type reconstruction_method: {'first', 'mean'} or array-like\n \"\"\"\n\n def __init__(\n self,\n svd_rank=0,\n tlsq_rank=0,\n exact=False,\n opt=False,\n rescale_mode=None,\n forward_backward=False,\n d=1,\n sorted_eigs=False,\n reconstruction_method=\"first\",\n ):\n super().__init__(\n svd_rank=svd_rank,\n tlsq_rank=tlsq_rank,\n exact=exact,\n opt=opt,\n rescale_mode=rescale_mode,\n sorted_eigs=sorted_eigs,\n )\n self._d = d\n\n if isinstance(reconstruction_method, list):\n if len(reconstruction_method) != d:\n raise ValueError(\n \"The length of the array of weights must be equal to d\"\n )\n elif isinstance(reconstruction_method, np.ndarray):\n if (\n reconstruction_method.ndim > 1\n or reconstruction_method.shape[0] != d\n ):\n raise ValueError(\n \"The length of the array of weights must be equal to d\"\n )\n self._reconstruction_method = reconstruction_method\n\n self._sub_dmd = DMD(\n svd_rank=svd_rank,\n tlsq_rank=tlsq_rank,\n exact=exact,\n opt=opt,\n rescale_mode=rescale_mode,\n forward_backward=forward_backward,\n sorted_eigs=sorted_eigs,\n )\n\n @property\n def d(self):\n \"\"\"The new order for spatial dimension of the input snapshots.\"\"\"\n return self._d\n\n def _hankel_first_occurrence(self, time):\n r\"\"\"\n For a given `t` such that there is :math:`k \\in \\mathbb{N}` such that\n :math:`t = t_0 + k dt`, return the index of the first column in Hankel\n pseudo matrix (see also :func:`_pseudo_hankel_matrix`) which contains\n the snapshot corresponding to `t`.\n\n :param time: The time corresponding to the requested snapshot.\n :return: The index of the first appeareance of `time` in the columns of\n Hankel pseudo matrix.\n :rtype: int\n \"\"\"\n return max(\n 0,\n (time - self.original_time[\"t0\"]) // self.dmd_time[\"dt\"]\n - (self.original_time[\"t0\"] + self.d - 1),\n )\n\n def _update_sub_dmd_time(self):\n \"\"\"\n Update the time dictionaries (`dmd_time` and `original_time`) of\n the auxiliary DMD instance `HankelDMD._sub_dmd` after an update of the\n time dictionaries of the time dictionaries of this instance of the\n higher level instance of `HankelDMD`.\n \"\"\"\n self._sub_dmd.dmd_time[\"t0\"] = self._hankel_first_occurrence(\n self.dmd_time[\"t0\"]\n )\n self._sub_dmd.dmd_time[\"tend\"] = self._hankel_first_occurrence(\n self.dmd_time[\"tend\"]\n )\n\n def reconstructions_of_timeindex(self, timeindex=None):\n \"\"\"\n Build a collection of all the available versions of the given\n `timeindex`. The indexing of time instants is the same used for\n :func:`reconstructed_data`. For each time instant there are at least\n one and at most `d` versions. If `timeindex` is `None` the function\n returns the whole collection, for all the time instants.\n\n :param int timeindex: The index of the time snapshot.\n :return: a collection of all the available versions for the given\n time snapshot, or for all the time snapshots if `timeindex` is\n `None` (in the second case, time varies along the first dimension\n of the array returned).\n :rtype: numpy.ndarray or list\n \"\"\"\n self._update_sub_dmd_time()\n\n rec = self._sub_dmd.reconstructed_data\n space_dim = rec.shape[0] // self.d\n time_instants = rec.shape[1] + self.d - 1\n\n # for each time instance, we collect all its appearences. each\n # snapshot appears at most d times (for instance, the first appears\n # only once).\n reconstructed_snapshots = np.full(\n (time_instants, self.d, space_dim), np.nan, dtype=rec.dtype\n )\n\n c_idxes = (\n np.array(range(self.d))[:, None]\n .repeat(2, axis=1)[None, :]\n .repeat(rec.shape[1], axis=0)\n )\n c_idxes[:, :, 0] += np.array(range(rec.shape[1]))[:, None]\n\n reconstructed_snapshots[c_idxes[:, :, 0], c_idxes[:, :, 1]] = np.array(\n np.swapaxes(np.split(rec.T, self.d, axis=1), 0, 1)\n )\n\n if timeindex is None:\n return reconstructed_snapshots\n\n return reconstructed_snapshots[timeindex]\n\n def _first_reconstructions(self, reconstructions):\n \"\"\"Return the first occurrence of each snapshot available in the given\n matrix (which must be the result of `self._sub_dmd.reconstructed_data`,\n or have the same shape).\n\n :param reconstructions: A matrix of (higher-order) snapshots having\n shape `(space*self.d, time_instants)`\n :type reconstructions: np.ndarray\n :return: The first snapshot that occurs in `reconstructions` for each\n available time instant.\n :rtype: np.ndarray\n \"\"\"\n first_nonmasked_idx = np.repeat(\n np.array(range(reconstructions.shape[0]))[:, None], 2, axis=1\n )\n first_nonmasked_idx[self.d - 1 :, 1] = self.d - 1\n\n return reconstructions[\n first_nonmasked_idx[:, 0], first_nonmasked_idx[:, 1]\n ].T\n\n @property\n def reconstructed_data(self):\n self._update_sub_dmd_time()\n\n rec = self.reconstructions_of_timeindex()\n rec = np.ma.array(rec, mask=np.isnan(rec))\n\n if self._reconstruction_method == \"first\":\n result = self._first_reconstructions(rec)\n elif self._reconstruction_method == \"mean\":\n result = np.mean(rec, axis=1).T\n elif isinstance(self._reconstruction_method, (np.ndarray, list)):\n result = np.average(\n rec, axis=1, weights=self._reconstruction_method\n ).T\n else:\n raise ValueError(\n \"The reconstruction method wasn't recognized: {}\".format(\n self._reconstruction_method\n )\n )\n\n # we want to return only the requested timesteps\n time_index = min(\n self.d - 1,\n int(\n (self.dmd_time[\"t0\"] - self.original_time[\"t0\"])\n // self.dmd_time[\"dt\"]\n ),\n )\n result = result[:, time_index : time_index + len(self.dmd_timesteps)]\n\n return result.filled(fill_value=0)\n\n def _pseudo_hankel_matrix(self, X):\n \"\"\"\n Method for arranging the input snapshots `X` into the (pseudo) Hankel\n matrix. The attribute `d` controls the shape of the output matrix.\n :Example:\n\n >>> from pydmd import HankelDMD\n >>> dmd = HankelDMD(d=2)\n >>> a = np.array([[1, 2, 3, 4, 5]])\n >>> dmd._pseudo_hankel_matrix(a)\n array([[1, 2, 3, 4],\n [2, 3, 4, 5]])\n >>> dmd = pydmd.hankeldmd.HankelDMD(d=4)\n >>> dmd._pseudo_hankel_matrix(a)\n array([[1, 2],\n [2, 3],\n [3, 4],\n [4, 5]])\n\n \"\"\"\n return np.concatenate(\n [X[:, i : X.shape[1] - self.d + i + 1] for i in range(self.d)],\n axis=0,\n )\n\n @property\n def modes(self):\n return self._sub_dmd.modes\n\n @property\n def eigs(self):\n return self._sub_dmd.eigs\n\n @property\n def amplitudes(self):\n return self._sub_dmd.amplitudes\n\n @property\n def operator(self):\n return self._sub_dmd.operator\n\n @property\n def svd_rank(self):\n return self._sub_dmd.svd_rank\n\n @property\n def modes_activation_bitmask(self):\n return self._sub_dmd.modes_activation_bitmask\n\n @modes_activation_bitmask.setter\n def modes_activation_bitmask(self, value):\n self._sub_dmd.modes_activation_bitmask = value\n\n # due to how we implemented HankelDMD we need an alternative implementation\n # of __getitem__\n def __getitem__(self, key):\n \"\"\"\n Restrict the DMD modes used by this instance to a subset of indexes\n specified by keys. The value returned is a shallow copy of this DMD\n instance, with a different value in :func:`modes_activation_bitmask`.\n Therefore assignments to attributes are not reflected into the original\n instance.\n\n However the DMD instance returned should not be used for low-level\n manipulations on DMD modes, since the underlying DMD operator is shared\n with the original instance. For this reasons modifications to NumPy\n arrays may result in unwanted and unspecified situations which should\n be avoided in principle.\n\n :param key: An index (integer), slice or list of indexes.\n :type key: int or slice or list or np.ndarray\n :return: A shallow copy of this DMD instance having only a subset of\n DMD modes which are those indexed by `key`.\n :rtype: HankelDMD\n \"\"\"\n\n sub_dmd_copy = copy(self._sub_dmd)\n sub_dmd_copy.allocate_proxy()\n\n shallow_copy = copy(self)\n shallow_copy._sub_dmd = sub_dmd_copy\n return DMDBase.__getitem__(shallow_copy, key)\n\n def fit(self, X):\n \"\"\"\n Compute the Dynamic Modes Decomposition to the input data.\n\n :param X: the input snapshots.\n :type X: numpy.ndarray or iterable\n \"\"\"\n snp, self._snapshots_shape = self._col_major_2darray(X)\n self._snapshots = self._pseudo_hankel_matrix(snp)\n self._sub_dmd.fit(self._snapshots)\n\n # Default timesteps\n n_samples = snp.shape[1]\n self._set_initial_time_dictionary(\n {\"t0\": 0, \"tend\": n_samples - 1, \"dt\": 1}\n )\n\n return self\n"
] | [
[
"numpy.isnan",
"numpy.full",
"numpy.average",
"numpy.mean",
"numpy.split"
]
] |
tkelestemur/pfrl | [
"388855fb30313185d43ae0d0f4b694be647a5c43"
] | [
"pfrl/policies/softmax_policy.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.distributions import Categorical\n\n\nclass SoftmaxCategoricalHead(nn.Module):\n def forward(self, logits):\n return torch.distributions.Categorical(logits=logits)\n\n\n# class MultiSoftmaxCategoricalHead(nn.Module):\n# def forward(self, logits):\n# return Independent(Categorical(logits=logits), reinterpreted_batch_ndims=1)\n\n\nclass MultiCategorical():\n def __init__(self, dims=None, logits=None):\n self.dims = dims\n logits = torch.split(logits, tuple(dims), dim=1)\n self.dists = [Categorical(logits=logits_dim) for logits_dim in logits]\n\n def log_prob(self, actions):\n actions = torch.unbind(actions, dim=1)\n logprobs = torch.stack([\n dist.log_prob(action) for dist, action in zip(self.dists, actions)\n ], dim=1)\n return logprobs.sum(dim=1)\n\n def entropy(self):\n return torch.stack([dist.entropy() for dist in self.dists], dim=1).sum(dim=1)\n\n def sample(self):\n return torch.stack([dist.sample() for dist in self.dists], dim=1)\n\n def mode(self):\n return torch.stack([\n torch.argmax(dist.probs, dim=1) for dist in self.dists\n ], dim=1)\n\n\nclass MultiSoftmaxCategoricalHead(nn.Module):\n def __init__(self, dims=None):\n self.dims = dims\n super().__init__()\n\n def forward(self, logits):\n return MultiCategorical(dims=self.dims, logits=logits)\n"
] | [
[
"torch.unbind",
"torch.argmax",
"torch.distributions.Categorical"
]
] |
LaudateCorpus1/sunpy | [
"f7bdf22e5229a577c5851c1e05502f0d68b4b369"
] | [
"sunpy/coordinates/wcs_utils.py"
] | [
"import numpy as np\n\nimport astropy.units as u\nimport astropy.wcs.utils\nfrom astropy.coordinates import (\n ITRS,\n BaseCoordinateFrame,\n CartesianRepresentation,\n SkyCoord,\n SphericalRepresentation,\n)\nfrom astropy.wcs import WCS\n\nfrom sunpy import log\nfrom .frames import (\n BaseCoordinateFrame,\n Heliocentric,\n HeliographicCarrington,\n HeliographicStonyhurst,\n Helioprojective,\n SunPyBaseCoordinateFrame,\n)\n\n__all__ = ['solar_wcs_frame_mapping', 'solar_frame_to_wcs_mapping']\n\ntry:\n # TODO: Remove vendored version after Astropy 5.0\n from astropy.wcs.utils import obsgeo_to_frame\nexcept ImportError:\n def obsgeo_to_frame(obsgeo, obstime):\n \"\"\"\n Convert a WCS obsgeo property into an `~builtin_frames.ITRS` coordinate frame.\n\n Parameters\n ----------\n obsgeo : array-like\n A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as\n returned by ``WCS.wcs.obsgeo``.\n\n obstime : time-like\n The time assiociated with the coordinate, will be passed to\n `~.builtin_frames.ITRS` as the obstime keyword.\n\n Returns\n -------\n `~.builtin_frames.ITRS`\n An `~.builtin_frames.ITRS` coordinate frame\n representing the coordinates.\n\n Notes\n -----\n\n The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array\n where the first three elements are the coordinate in a cartesian\n representation and the second 3 are the coordinate in a spherical\n representation.\n\n This function priorities reading the cartesian coordinates, and will only\n read the spherical coordinates if the cartesian coordinates are either all\n zero or any of the cartesian coordinates are non-finite.\n\n In the case where both the spherical and cartesian coordinates have some\n non-finite values the spherical coordinates will be returned with the\n non-finite values included.\n\n \"\"\"\n if (obsgeo is None\n or len(obsgeo) != 6\n or np.all(np.array(obsgeo) == 0)\n or np.all(~np.isfinite(obsgeo))\n ): # NOQA\n raise ValueError(f\"Can not parse the 'obsgeo' location ({obsgeo}). \"\n \"obsgeo should be a length 6 non-zero, finite numpy array\")\n\n # If the cartesian coords are zero or have NaNs in them use the spherical ones\n if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):\n data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))\n\n # Otherwise we assume the cartesian ones are valid\n else:\n data = CartesianRepresentation(*obsgeo[:3] * u.m)\n\n return ITRS(data, obstime=obstime)\n\n\ndef solar_wcs_frame_mapping(wcs):\n \"\"\"\n This function registers the coordinates frames to their FITS-WCS coordinate\n type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry.\n\n Parameters\n ----------\n wcs : astropy.wcs.WCS\n\n Returns\n -------\n astropy.coordinates.BaseCoordinateFrame\n \"\"\"\n\n if hasattr(wcs, \"coordinate_frame\"):\n return wcs.coordinate_frame\n\n dateobs = wcs.wcs.dateobs or None\n\n # Get observer coordinate from the WCS auxillary information\n required_attrs = {HeliographicStonyhurst: ['hgln_obs', 'hglt_obs', 'dsun_obs'],\n HeliographicCarrington: ['crln_obs', 'hglt_obs', 'dsun_obs']}\n\n # Get rsun from the WCS auxillary information\n rsun = wcs.wcs.aux.rsun_ref\n if rsun is not None:\n rsun *= u.m\n\n # TODO: remove these errors in sunpy 4.1\n bad_attrs = [f'.{attr}' for attr in ['rsun', 'heliographic_observer']\n if hasattr(wcs, attr)]\n if len(bad_attrs):\n raise ValueError(f\"The {' and '.join(bad_attrs)} attribute(s) on a WCS \"\n \"are no longer supported.\")\n\n observer = None\n for frame, attr_names in required_attrs.items():\n attrs = [getattr(wcs.wcs.aux, attr_name) for attr_name in attr_names]\n if all([attr is not None for attr in attrs]):\n kwargs = {'obstime': dateobs}\n if rsun is not None:\n kwargs['rsun'] = rsun\n if issubclass(frame, HeliographicCarrington):\n kwargs['observer'] = 'self'\n\n observer = frame(attrs[0] * u.deg,\n attrs[1] * u.deg,\n attrs[2] * u.m,\n **kwargs)\n\n # Read the observer out of obsgeo for ground based observers\n if observer is None:\n try:\n observer = obsgeo_to_frame(wcs.wcs.obsgeo, dateobs)\n observer = SkyCoord(observer, rsun=rsun)\n except ValueError as e:\n # The helper function assumes you know the obsgeo coords you are\n # parsing are good, we are not sure, so catch the error.\n\n # This approach could lead to an invalid observer (i.e. one of the\n # coords being NaN), but only if the WCS has been constructed like that.\n log.debug(f\"Could not parse obsgeo coordinates from WCS:\\n{e}\")\n\n # Collect all of the possible frame attributes, although some may be removed later\n frame_args = {'obstime': dateobs}\n if observer is not None:\n frame_args['observer'] = observer\n if rsun is not None:\n frame_args['rsun'] = rsun\n\n frame_class = _sunpy_frame_class_from_ctypes(wcs.wcs.ctype)\n\n if frame_class:\n if frame_class == HeliographicStonyhurst:\n frame_args.pop('observer', None)\n if frame_class == Heliocentric:\n frame_args.pop('rsun', None)\n\n return frame_class(**frame_args)\n\n\ndef _sunpy_frame_class_from_ctypes(ctypes):\n # Truncate the ctype to the first four letters\n ctypes = {c[:4] for c in ctypes}\n\n mapping = {\n Helioprojective: {'HPLN', 'HPLT'},\n HeliographicStonyhurst: {'HGLN', 'HGLT'},\n HeliographicCarrington: {'CRLN', 'CRLT'},\n Heliocentric: {'SOLX', 'SOLY'},\n }\n\n for frame_class, ctype_pair in mapping.items():\n if ctype_pair <= ctypes:\n return frame_class\n\n\ndef _set_wcs_aux_obs_coord(wcs, obs_frame):\n \"\"\"\n Set (in-place) observer coordinate information on a WCS.\n\n Parameters\n ----------\n wcs : astropy.wcs.WCS\n obs_frame : astropy.coordinates.SkyCoord, astropy.coordinates.CoordinateFrame\n \"\"\"\n # Sometimes obs_coord can be a SkyCoord, so convert down to a frame\n if hasattr(obs_frame, 'frame'):\n obs_frame = obs_frame.frame\n\n if isinstance(obs_frame, HeliographicStonyhurst):\n wcs.wcs.aux.hgln_obs = obs_frame.lon.to_value(u.deg)\n elif isinstance(obs_frame, HeliographicCarrington):\n wcs.wcs.aux.crln_obs = obs_frame.lon.to_value(u.deg)\n else:\n raise ValueError('obs_coord must be in a Stonyhurst or Carrington frame')\n # These two keywords are the same for Carrington and Stonyhurst\n wcs.wcs.aux.hglt_obs = obs_frame.lat.to_value(u.deg)\n wcs.wcs.aux.dsun_obs = obs_frame.radius.to_value(u.m)\n\n\ndef solar_frame_to_wcs_mapping(frame, projection='TAN'):\n \"\"\"\n For a given frame, this function returns the corresponding WCS object.\n It registers the WCS coordinates types from their associated frame in the\n `astropy.wcs.utils.celestial_frame_to_wcs` registry.\n\n Parameters\n ----------\n frame : astropy.coordinates.BaseCoordinateFrame\n projection : str, optional\n\n Returns\n -------\n astropy.wcs.WCS\n \"\"\"\n wcs = WCS(naxis=2)\n\n if hasattr(frame, 'rsun'):\n wcs.wcs.aux.rsun_ref = frame.rsun.to_value(u.m)\n\n if hasattr(frame, 'observer') and frame.observer is not None:\n if isinstance(frame.observer, BaseCoordinateFrame):\n observer = frame.observer\n elif frame.observer == 'self':\n observer = frame\n _set_wcs_aux_obs_coord(wcs, observer)\n\n if isinstance(frame, SunPyBaseCoordinateFrame):\n\n if frame.obstime:\n wcs.wcs.dateobs = frame.obstime.utc.isot\n\n if isinstance(frame, Helioprojective):\n xcoord = 'HPLN' + '-' + projection\n ycoord = 'HPLT' + '-' + projection\n wcs.wcs.cunit = ['arcsec', 'arcsec']\n elif isinstance(frame, Heliocentric):\n xcoord = 'SOLX'\n ycoord = 'SOLY'\n wcs.wcs.cunit = ['deg', 'deg']\n elif isinstance(frame, HeliographicCarrington):\n xcoord = 'CRLN' + '-' + projection\n ycoord = 'CRLT' + '-' + projection\n wcs.wcs.cunit = ['deg', 'deg']\n elif isinstance(frame, HeliographicStonyhurst):\n xcoord = 'HGLN' + '-' + projection\n ycoord = 'HGLT' + '-' + projection\n wcs.wcs.cunit = ['deg', 'deg']\n\n else:\n return None\n\n wcs.wcs.ctype = [xcoord, ycoord]\n\n return wcs\n\n\nastropy.wcs.utils.WCS_FRAME_MAPPINGS.append([solar_wcs_frame_mapping])\nastropy.wcs.utils.FRAME_WCS_MAPPINGS.append([solar_frame_to_wcs_mapping])\n"
] | [
[
"numpy.array",
"numpy.all",
"numpy.isfinite"
]
] |
icane/demographic-indicators | [
"b1c394a4497e8e4c0189bf4c0518ce38fb873d4c"
] | [
"etl/deaths.py"
] | [
"\"\"\"Deaths indicators.\"\"\"\n\nfrom etl.common import to_json_stat, write_to_file\n\nfrom etl.config_deaths import deaths_cfg as cfg\n\nfrom etlstat.extractor.extractor import xlsx\n\nimport json\n\nimport pandas as pd\n\n\ndef transform(df, periods, prefix=''):\n \"\"\"Slice dataframe. Generate time period column.\n \n df (dataframe): dataset\n periods (int): number of time periods\n prefix (str): prefix for time periods\n \"\"\"\n for i in range(0, len(df)):\n period1 = str(df.loc[i, 'Año'])\n period2 = '{:0>2}'.format(df.loc[i, 'Mes'])\n df.loc[i, 'period'] = prefix + period1 + '-' + period2\n\n df.drop(columns={'Año', 'Mes'}, axis=1, inplace=True)\n df.rename(columns={'period': 'Mes'}, inplace=True)\n df = df.tail(periods)\n df = df.round(2)\n return df\n\ndef replace_month(json_str):\n \"\"\"Replace month number by its name.\"\"\"\n json_str = json_str.replace('-01\"', '-Ene\"')\n json_str = json_str.replace('-02\"', '-Feb\"')\n json_str = json_str.replace('-03\"', '-Mar\"')\n json_str = json_str.replace('-04\"', '-Abr\"')\n json_str = json_str.replace('-05\"', '-May\"')\n json_str = json_str.replace('-06\"', '-Jun\"')\n json_str = json_str.replace('-07\"', '-Jul\"')\n json_str = json_str.replace('-08\"', '-Ago\"')\n json_str = json_str.replace('-09\"', '-Sep\"')\n json_str = json_str.replace('-10\"', '-Oct\"')\n json_str = json_str.replace('-11\"', '-Nov\"')\n json_str = json_str.replace('-12\"', '-Dic\"')\n return json_str\n\n# Read input files\ndata = xlsx(cfg.path.input)\n\n# Datasets\ndf_global = pd.DataFrame()\nindicators = []\nfor key in cfg.series:\n print(key)\n variables = [\n 'Año', 'Mes',\n cfg.series[key].variables[0],\n cfg.series[key].moving_avg[0]]\n if (len(cfg.series[key].variables) == 2):\n variables.append(cfg.series[key].variables[1])\n variables.append(cfg.series[key].moving_avg[1])\n df = data[cfg.file]\\\n [cfg.series[key].sheet][variables].copy()\n\n # Drop NA rows, if any\n df.dropna(axis=0, how='all', inplace=True)\n\n # Rename variables\n df.rename(\n columns={\n cfg.series[key].variables[0]: 'Cantabria',\n cfg.series[key].moving_avg[0]: 'Cantabria_MM'},\n inplace=True)\n if (len(cfg.series[key].variables) == 2):\n df.rename(\n columns={\n cfg.series[key].variables[1]: 'España',\n cfg.series[key].moving_avg[1]: 'España_MM'}, \n inplace=True)\n\n # Remove .0 from Año and Mes\n df['Año'] = df['Año'].astype(str).replace('\\.0', '', regex=True)\n df['Mes'] = df['Mes'].astype(str).replace('\\.0', '', regex=True)\n\n # Merge global dataset\n df_cant = df[['Año', 'Mes', 'Cantabria']].copy()\n df_cant = transform(df_cant, cfg.periods.global_deaths, 'Cantabria - ')\n df_cant.set_index('Mes', inplace=True)\n df_cant = df_cant.transpose()\n df_cant.insert(0, 'Categoria', cfg.series[key].category)\n df_cant[' - Indicadores'] = cfg.series[key].label\n if (len(cfg.series[key].variables) == 2):\n df_esp = df[['Año', 'Mes', 'España']].copy()\n df_esp = transform(df_esp, cfg.periods.global_deaths, 'España - ')\n df_esp.set_index('Mes', inplace=True)\n df_esp = df_esp.transpose()\n df_esp[' - Indicadores'] = cfg.series[key].label\n df_cant = df_cant.merge(df_esp, on=' - Indicadores')\n\n indicators.append(df_cant)\n\n # Generate JSON-Stat dataset\n df = transform(df, cfg.periods.deaths)\n vars = ['Cantabria', 'Cantabria_MM']\n if (len(cfg.series[key].variables) == 2):\n vars.append('España')\n vars.append('España_MM')\n json_file = to_json_stat(\n df,\n ['Mes'],\n vars,\n cfg.series[key].source)\n json_obj = json.loads(json_file)\n json_obj['dimension']['Variables']['category']['unit'] = \\\n cfg.series[key].unit\n json_obj['note'] = cfg.series[key].note\n json_file = json.dumps(json_obj)\n json_file = replace_month(json_file)\n write_to_file(json_file, cfg.path.output + cfg.series[key].json)\n\n# Generate CSV global dataset\ndf_global = pd.concat(indicators, axis=0, verify_integrity=False)\ndf_global.to_csv(cfg.path.output + cfg.globals.csv, index=False)\n\nprint('\\nEnd of process. Files generated successfully.')\n"
] | [
[
"pandas.DataFrame",
"pandas.concat"
]
] |
codecakes/random_games | [
"1e670021ec97a196726e937e658878dc63ba9d34"
] | [
"probability_combinatorics/linear_regression.py"
] | [
"from math import sqrt\nfrom itertools import izip\nfrom numpy import mean\n\nfrom py_variance_std import t_percentile\n\ndef calc_slope(r, sdy, sdx): return r * (float(sdy)/sdx)\n\ndef line_fitting(x_arr, y_arr):\n \"\"\"\n using straight line y = mx + c;\n m(of a sample data points) = Covariance(X,Y)/Covariance(X,X) =\n E[(X - E(X))(Y - E(Y))]/E[(X - E(X))^2]\n Another way: Look at calc_slope given STD Y and STD X and r\n \"\"\"\n xbar = mean(x_arr)\n ybar = mean(y_arr)\n xsqr_bar = mean([i**2 for i in x_arr])\n xybar = mean([i*j for i,j in izip(x_arr, y_arr)])\n #calcuate the slope m\n m = (xbar*ybar - xybar)/(xbar**2 - xsqr_bar)\n #calculate the y intercept\n c = ybar - m*xbar\n return ybar,m,xbar,c\n\ndef trace_line(x_arr, y_arr, x_start = 0):\n y, m, x, c = line_fitting(x_arr, y_arr)\n return [(i, (m*i)+c) for i in [x_start]+list(x_arr)]\n\ndef line_error(**params):\n \"\"\"\n The least squares estimates represent the minimum value;\n http://www.pmean.com/10/LeastSquares.html\n params: x_arr, y_arr, m,c\n \"\"\"\n if 'x_arr' in params and 'y_arr' in params:\n if ('m' in params and 'c' in params):\n m,c = params['m'], params['c']\n else:\n y, m, x, c = line_fitting(params['x_arr'], params['y_arr'])\n #return difference magnitude between y,actual - y,calculated/predicted\n return [(yi - ((m*xi)+c))**2 for yi,xi in izip(params['y_arr'], params['x_arr'])]\n\n\ndef std_error_y_estimate(n, y_line_error_var):\n \"\"\"\n To construct a confidence interval for the slope of the regression line, we need to know the standard error of the sampling distribution of the slope;\n\n n: total samples in x or y;\n y_line_error_var: sum(line_error(**params))\n\n df = n-2 since two variables while calculating linear regression.\n #calculate \\summ(yi - y_cap)^2 variance\n line_error_var = line_error(**params)\n \"\"\"\n return sqrt(float(y_line_error_var)/(n-2))\n\ndef x_line_std(x_arr):\n xbar = mean(x_arr)\n return sqrt(sum([(xi - xbar)**2 for xi in x_arr]))\n\ndef std_error_linear(se_y, x_line_std):\n \"\"\"\n se_y: from std_error_y_estimate(n, y_line_error_var)\n #calculate x - xbar variance and then STD\n xbar = mean(x_arr)\n x_line_std: x_line_std(x_arr, xbar)\n \"\"\"\n return se_y/x_line_std\n\ndef find_std_err_linear(x_arr, y_arr, n_sample):\n #Find SE of SEy/SEx\n #find descriptive params\n ybar,m,xbar,c = line_fitting(x_arr, y_arr)\n #find error in x\n se_x = x_line_std(x_arr)\n #find error in y\n y_line_error = sum(line_error(**dict(x_arr=x_arr, y_arr=y_arr, m=m, c=c)))\n se_y = std_error_y_estimate(n_sample, y_line_error)\n #return standard error\n return std_error_linear(se_y, se_x)\n\ndef r_squared(x_arr, y_arr):\n \"\"\"\n Literally Trying to do sqrt() of scipy.stats import pearsonr val\n using functions in this module: linear_regression.py.\n\n Also called Coefficient of Determination.\n It simply means total_variation_line: How much the best fit line is\n \"fit\" Or Away from the scattered points. High value means good fit.\n How much % is explained by the Fitted Line.\n High R^2 = good model, probably profitable,\n Low R^2 = bad model, probably dangerous\n \"\"\"\n y, m, x, c = line_fitting(x_arr, y_arr)\n total_var_y = ([(i-y)**2 for i in y_arr]) #(y-ybar)^2\n #print sum(total_var_y)\n #\\summ(yi - mxi * c)^2/\\summ(yi - ybar)^2\n variation_not_by_line = float(sum(line_error(x_arr=x_arr, y_arr=y_arr, m=m, c=c)))/sum(total_var_y)\n #R sqaured\n return 1 - variation_not_by_line #total variation in x, variation in line\n\ndef calc_tscore_from_r(r2,n):\n \"\"\"\n Hypothesis Testing if relationship is due to sampling error.\n r: coefficient of determination\n n: number of elements in a sample\n Returns: t score\n For looking at critical t val and comparing the t score,\n df = n-2 since there are 2 variables for correlation under test.\n \"\"\"\n return sqrt(r2*float(n-2)/(1 - r2))\n\ndef calc_p_from_tval_from_r(r,n, one_tailed= 0 ):\n return t_percentile(calc_tscore_from_r(r,n), n-2, one_tailed= one_tailed)\n\ndef margin_error_linear(tscore, se): return tscore * se\n\ndef ci_linear(slope, tscore, se):\n margin_error = margin_error_linear(tscore, se)\n return (slope - margin_error, slope + margin_error)\n"
] | [
[
"numpy.mean"
]
] |
LeoRoweBrown/ckvpy | [
"fff27847f5577750ae5860e3fdff81877fa4455a"
] | [
"tools/photon_yield.py"
] | [
"import numpy as np\nfrom scipy.integrate import simps\nimport scipy.constants as const\n\ndef compute(theta_in, f, beta, L, n=None):\n \"\"\"compute number of photons due to Frank-Tamm and Fresen equations\n theta (ndarray/list[float]): Angles in chosen wavelength range\n f (ndarray/list[float]): Frequencies in chosen wavelength range\n n (ndarray/list[float]): Refractive index in chosen wavelength range\n beta (float): Ratio of electron speed to speed of light\n\n TODO: replace n = 1/(beta*np.cos(theta_in)) with actual n_eff\n \"\"\"\n if n is None:\n print(\"Using Cherenkov angle to derive n instead of d(omega)/dk\")\n n = 1/(beta*np.cos(theta_in))\n r_s = np.absolute(\n (n*np.cos(theta_in) - np.sqrt(1-(n*np.sin(theta_in)**2.)))/ \\\n (n*np.cos(theta_in) + np.sqrt(1-(n*np.sin(theta_in)**2.)))\n )\n r_p = np.absolute(\n (n*np.sqrt(1-(n*np.sin(theta_in)**2.)) - np.cos(theta_in))/ \\\n (n*np.sqrt(1-(n*np.sin(theta_in)**2.)) + np.cos(theta_in))\n )\n r_eff =(r_p + r_s)/2.\n # print(r_eff)\n t_eff = 1-r_eff\n print(\"Transmission coeff:\", t_eff)\n # derive angles inside medium with snell's law for Fresnel equation\n # theta_in = np.arcsin(n*np.sin(theta))\n # n_photons = \\\n # (const*fine_structure/(const.hbar*const.c**2.))*\\\n # simps((1-1./(beta**2.*n**2.))*t_eff, x=const.h*f)\n # need even spaced intervals -> interpolate\n # integral is over f\n f_interp = np.linspace(np.min(f), np.max(f), num=30)\n theta_interp = np.interp(f_interp, f, theta_in)\n t_eff_interp = np.interp(f_interp, f, t_eff)\n n_photons = \\\n L*(const.fine_structure/(const.hbar*const.c))* \\\n simps(np.sin(theta_interp)**2.*t_eff_interp*const.h, x=f_interp)\n print(n_photons, \"photons\")\n return n_photons"
] | [
[
"numpy.interp",
"numpy.cos",
"numpy.max",
"numpy.min",
"numpy.sin"
]
] |
andrewschreiber/numpy-saliency | [
"2e788a1150f6e160f2271cbb4f20747559f243c0"
] | [
"model/network.py"
] | [
"import numpy as np\nimport pickle\nfrom model.loss import cross_entropy\nfrom model.layers import Conv2D, Maxpool2D, Dense, Flatten, ReLu, Softmax\n\n\nclass LeNet5:\n \"\"\"Implementation of LeNet 5 for MNIST\n http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf\n \"\"\"\n\n def __init__(self, weights_path=None):\n lr = 0.01\n layers = []\n layers.append(Conv2D(n_filter=6, n_channel=1,\n kernel_size=5, padding=2, stride=1,\n learning_rate=lr, name='conv1'))\n layers.append(ReLu())\n layers.append(Maxpool2D(\n pool_size=2, stride=2, name='maxpool2'))\n layers.append(Conv2D(n_filter=16, n_channel=6,\n kernel_size=5, padding=0, stride=1,\n learning_rate=lr, name='conv3'))\n layers.append(ReLu())\n layers.append(Maxpool2D(\n pool_size=2, stride=2, name='maxpool4'))\n layers.append(Conv2D(n_filter=120, n_channel=16,\n kernel_size=5, padding=0, stride=1,\n learning_rate=lr, name='conv5'))\n layers.append(ReLu())\n layers.append(Flatten())\n layers.append(Dense(\n num_inputs=120, num_outputs=84, learning_rate=lr, name='dense6'))\n layers.append(ReLu())\n layers.append(Dense(\n num_inputs=84, num_outputs=10, learning_rate=lr, name='dense7'))\n layers.append(Softmax())\n self.layers = layers\n if weights_path is not None:\n self._load(weights_path)\n\n def _load(self, weights_path):\n with open(weights_path, 'rb') as handle:\n b = pickle.load(handle)\n self.layers[0].load(b[0]['conv1.weights'], b[0]['conv1.bias'])\n self.layers[3].load(b[3]['conv3.weights'], b[3]['conv3.bias'])\n self.layers[6].load(b[6]['conv5.weights'], b[6]['conv5.bias'])\n self.layers[9].load(b[9]['dense6.weights'], b[9]['dense6.bias'])\n self.layers[11].load(b[11]['dense7.weights'], b[11]['dense7.bias'])\n\n def train(self, training_data, training_labels, batch_size, epochs,\n weights_path):\n print(\"Training LeNet...\")\n total_acc = 0\n for epoch in range(epochs):\n # batch training data\n for batch_index in range(0, training_data.shape[0], batch_size):\n loss = 0\n acc = 0\n\n data = training_data[batch_index:batch_index+batch_size]\n labels = training_labels[batch_index:batch_index+batch_size]\n\n # iterate over batch\n for b in range(len(data)):\n x = data[b]\n y = labels[b]\n\n # forward pass\n output = self.forward(x)\n if np.argmax(output) == np.argmax(y):\n acc += 1\n total_acc += 1\n loss += cross_entropy(output, y)\n\n # backward pass\n # update network on each datapoint for simplicity\n dy = y\n for l in range(len(self.layers)-1, -1, -1):\n dout = self.layers[l].backward(dy)\n dy = dout\n\n # print performance\n loss /= len(data)\n batch_acc = float(acc)/float(len(data))\n train_acc = float(total_acc) / \\\n float((batch_index+len(data)+epoch*len(training_data)))\n\n print(('| Epoch: {0:d}/{1:d} | Iter:{2:d} | Loss: {3:.2f} | ' +\n 'BatchAcc: {4:.2f} | TrainAcc: {5:.2f} |')\n .format(epoch+1, epochs, batch_index+len(data),\n loss, batch_acc, train_acc))\n\n # save parameters after each epoch\n print(\"Saving model to\", weights_path)\n layers = [layer.parameters() for layer in self.layers]\n with open(weights_path, 'wb') as handle:\n pickle.dump(layers, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def forward(self, x):\n for l in range(len(self.layers)):\n output = self.layers[l].forward(x)\n x = output\n return output\n\n def predict(self, x):\n output = self.forward(x)\n digit = np.argmax(output)\n probability = output[0, digit]\n return digit, probability\n\n def test(self, data, labels):\n print(\"Testing LeNet...\")\n total_acc = 0\n test_size = len(data)\n for i in range(test_size):\n x = data[i]\n y = labels[i]\n if np.argmax(self.forward(x)) == np.argmax(y):\n total_acc += 1\n\n print(\"== Correct: {}/{}. Accuracy: {} ==\"\n .format(total_acc, test_size, total_acc/test_size))\n"
] | [
[
"numpy.argmax"
]
] |
Seb-Good/deepecg | [
"c99fbe80718ee9969936154ae2c1a04d81c9b246"
] | [
"deepecg/training/model/disc/model.py"
] | [
"\"\"\"\nmodel.py\n--------\nThis module provides a class and methods for building and managing a model with tensorflow.\nBy: Sebastian D. Goodfellow, Ph.D., 2018\n\"\"\"\n\n# Compatibility imports\nfrom __future__ import absolute_import, division, print_function\n\n# 3rd party imports\nimport os\nimport sys\nimport json\nimport pickle\nimport tensorflow as tf\n\n# Local imports\nfrom deepecg.training.model.disc.graph import Graph\nfrom deepecg.training.networks.deep_ecg_v1 import DeepECGV1\nfrom deepecg.training.networks.deep_ecg_v2 import DeepECGV2\nfrom deepecg.training.networks.deep_ecg_v3 import DeepECGV3\nfrom deepecg.training.networks.deep_ecg_v4 import DeepECGV4\nfrom deepecg.training.networks.deep_ecg_v5 import DeepECGV5\nfrom deepecg.training.networks.deep_ecg_v6 import DeepECGV6\nfrom deepecg.training.networks.deep_ecg_v7 import DeepECGV7\n\n\nclass Model(object):\n\n \"\"\"A class for managing a model through training.\"\"\"\n\n def __init__(self, model_name, network_name, network_parameters, save_path, data_path, max_to_keep):\n\n # Set input parameters\n self.model_name = model_name\n self.network_name = network_name\n self.network_parameters = network_parameters\n self.save_path = os.path.join(save_path, self.model_name)\n self.data_path = data_path\n self.max_to_keep = max_to_keep\n\n # Set attributes\n self.sess = None\n self.graph = None\n self.network = None\n\n # Create project file structure\n self._create_folder_structure()\n\n # Save parameters\n self._save_parameters()\n\n # Initialize graph\n self.initialize_graph()\n\n def initialize_graph(self):\n\n # Get neural network\n self.network = self._get_neural_network()\n\n # Save network object\n self._pickle_network()\n\n # Build computational graph\n self.graph = Graph(network=self.network, save_path=self.save_path, data_path=self.data_path,\n max_to_keep=self.max_to_keep)\n\n # Start session\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Initialize global variables\n self.sess.run(self.graph.init_global)\n\n @classmethod\n def build_training_graph(cls, save_path):\n \"\"\"Build training graph.\"\"\"\n # Import model parameters\n model_parameters = cls._import_model_parameters(save_path=save_path)\n\n # Import network parameters\n network_parameters = cls._import_network_parameters(save_path=save_path)\n\n # Initialize Model\n return cls(model_name=model_parameters['model_name'], network_name=model_parameters['network_name'],\n network_parameters=network_parameters, save_path=os.path.dirname(save_path),\n data_path=model_parameters['data_path'], max_to_keep=model_parameters['max_to_keep'])\n\n def restore(self, global_step):\n \"\"\"Restore model from checkpoint.\"\"\"\n # Initialize graph\n if self.sess._closed:\n self.initialize_graph()\n\n # Restore checkpoint\n self.graph.saver.restore(sess=self.sess, save_path=os.path.join(self.save_path, 'checkpoints', global_step))\n\n def close_session(self):\n \"\"\"Close any active sessions.\"\"\"\n try:\n self.sess.close()\n except AttributeError:\n print('No active Tensorflow session.')\n\n def _save_parameters(self):\n \"\"\"Save model and network parameters to JSON.\"\"\"\n # Save model parameters\n self._save_model_parameters()\n\n # Save network parameters\n self._save_network_parameters()\n\n def _save_model_parameters(self):\n \"\"\"Save model parameters to JSON.\"\"\"\n # Get model parameters\n model_parameters = dict(model_name=self.model_name, network_name=self.network_name, save_path=self.save_path,\n data_path=self.data_path, max_to_keep=self.max_to_keep)\n\n # Save model parameters to JSON\n if not os.path.exists(os.path.join(self.save_path, 'parameters', 'model_parameters.json')):\n with open(os.path.join(self.save_path, 'parameters', 'model_parameters.json'), 'w') as file:\n json.dump(model_parameters, file)\n\n def _save_network_parameters(self):\n \"\"\"Save network parameters to JSON.\"\"\"\n if not os.path.exists(os.path.join(self.save_path, 'parameters', 'network_parameters.json')):\n with open(os.path.join(self.save_path, 'parameters', 'network_parameters.json'), 'w') as file:\n json.dump(self.network_parameters, file)\n\n def _get_neural_network(self):\n \"\"\"Instantiate neural network.\"\"\"\n # Convert string to class\n network = getattr(sys.modules[__name__], self.network_name)\n\n # Instantiate network class with network parameters\n network = network(**self.network_parameters)\n\n return network\n\n def _create_folder_structure(self):\n\n # Set list of folders\n folders = ['train', 'val', 'checkpoints', 'network', 'graph', 'logs', 'parameters']\n\n # Main project directory\n if not os.path.exists(self.save_path):\n os.makedirs(self.save_path)\n\n # Loop through and create project folders\n for folder in folders:\n self._create_folder(folder=folder)\n\n def _create_folder(self, folder):\n \"\"\"Create folder.\"\"\"\n if not os.path.exists(os.path.join(self.save_path, folder)):\n os.makedirs(os.path.join(self.save_path, folder))\n\n def _pickle_network(self):\n \"\"\"Pickle graph.\"\"\"\n with open(os.path.join(self.save_path, 'network', 'network.obj'), 'wb') as file:\n pickle.dump(obj=self.network, file=file)\n\n @staticmethod\n def _import_model_parameters(save_path):\n \"\"\"Import model parameters.\"\"\"\n with open(os.path.join(save_path, 'parameters', 'model_parameters.json')) as file:\n return json.load(file)\n\n @staticmethod\n def _import_network_parameters(save_path):\n \"\"\"Import network parameters.\"\"\"\n with open(os.path.join(save_path, 'parameters', 'network_parameters.json')) as file:\n return json.load(file)\n"
] | [
[
"tensorflow.ConfigProto"
]
] |
garibaldu/boundary-seekers | [
"441fea01e93de882bf22e0deb411f0b10602fa37"
] | [
"Testing/ND-Testing.py"
] | [
"import numpy as np\nimport tensorflow as tf\n\ndef __perms(n):\n if not n:\n return\n\n p = []\n\n for i in range(0, 2**n):\n s = bin(i)[2:]\n s = \"0\" * (n-len(s)) + s\n\n s_prime = np.array(list(map(lambda x: int(x), list(s))))\n p.append(s_prime)\n\n return p\n\ndef care(normal, bias, example):\n z = np.dot(normal, example) + bias\n return 1.0/(1.0 + np.exp(-z))\n\ndef deci(normal, bias, example):\n z = np.dot(normal, example) + bias\n return 1.0/(1.0 + np.exp(-z))\n\ndef sigmoid(phi):\n return 1.0/(1.0 + tf.exp(-phi))\n\ndef compute_penalty(weights, size):\n mask = np.concatenate((np.array([0], dtype=np.float32), np.ones(size, dtype=np.float32)))\n return tf.reduce_sum(tf.abs(tf.multiply(mask, weights)))\n\ndef train_boundary_hunter(points, out, iterations):\n in_size = len(points[0])\n out_size = 1\n\n inputs = tf.placeholder('float32', [in_size])\n targets = tf.placeholder('float32', [out_size])\n\n hidden_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(1, in_size+1)), dtype='float32')\n gate_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(1, in_size+1)), dtype='float32')\n byas = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(1)), dtype='float32')\n #output_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(out_size, num_centroids + 1)), dtype='float32')\n\n inputs_prime = tf.concat([[1.0], inputs], axis=0)\n\n # Peform Computation\n # Peform Computation\n prob = tf.reduce_sum(tf.multiply(inputs_prime, hidden_weights), 1)\n\n g = sigmoid(tf.reduce_sum(tf.multiply(inputs_prime, gate_weights), 1))\n #hidden_out = tf.add(byas, tf.multiply(g, tf.subtract(prob, byas)))\n hidden_out = sigmoid(tf.add(g * prob, (1-g) * byas))\n\n reward = tf.log(compute_penalty(hidden_weights, in_size) + compute_penalty(gate_weights, in_size))\n\n targets_prime = tf.expand_dims(targets, 1)\n output = hidden_out\n errors = -(targets_prime * tf.log(output) + (1 -targets_prime) * tf.log(1 - output))#tf.pow(tf.subtract(tf.expand_dims(targets, 1), output), 2.0)\n error = tf.reduce_sum(errors)\n minimize = error - 0.02 * reward\n\n train_op = tf.train.GradientDescentOptimizer(0.01).minimize(minimize)\n #clip_byas = tf.assign(byas, tf.clip_by_value(byas, 0, 1))\n\n model = tf.global_variables_initializer()\n\n with tf.Session() as session:\n session.run(model)\n \n for e in range(iterations):\n for d in range(len(points)):\n session.run(train_op, feed_dict={inputs: points[d], targets: [out[d]]})\n #session.run(clip_byas)\n \n\n if e % 10 == 0:\n print(session.run(byas))\n err = 0\n for d in range(len(points)):\n err += session.run(error, feed_dict={inputs: points[d], targets: [out[d]]})\n print(err)\n print(session.run(reward))\n print()\n\n\n gates = session.run(gate_weights)[0]\n byas = session.run(byas)[0]\n boundarys = session.run(hidden_weights)[0]\n\n return (boundarys, gates, byas)\n\ndef get_final_class(predictions):\n tally_0 = 0\n tally_1 = 0\n\n for p in predictions:\n if (not p == None) and p >= 0.5:\n tally_1 += 1\n elif (not p == None) and p < 0.5:\n tally_0 += 1\n\n if tally_0 == 0 and tally_1 == 0:\n return None\n \n return 0 if tally_0 > tally_1 else 1\n\ndef run_boundary_hunters(boundarys, gates, points, out):\n in_size = len(points[0])\n out_size = 1\n \n inputs = tf.placeholder('float32', [in_size])\n targets = tf.placeholder('float32', [out_size])\n hidden_weights = tf.placeholder('float32', [None])\n gate_weights = tf.placeholder('float32', [None])\n\n inputs_prime = tf.concat([[1.0], inputs], axis=0)\n\n g = sigmoid(tf.reduce_sum(tf.multiply(inputs_prime, gate_weights)))\n prob = sigmoid(tf.reduce_sum(tf.multiply(inputs_prime, hidden_weights)))\n\n model = tf.global_variables_initializer()\n\n unsure = 0\n guessed = 0\n correct = 0\n with tf.Session() as session:\n session.run(model)\n\n for d in range(len(points)):\n predictions = []\n for b in range(len(boundarys)):\n prediction = None\n care = session.run(g, feed_dict={inputs: points[d], hidden_weights: boundarys[b], gate_weights: gates[b]})\n\n if care > 0.5:\n prediction = session.run(prob, feed_dict={inputs: points[d], hidden_weights: boundarys[b], gate_weights: gates[b]})\n predictions.append(prediction)\n\n p = get_final_class(predictions)\n #print(predictions, \": \", p)\n if not p == None:\n guessed += 1\n \n if p == out[d]:\n correct += 1\n elif p == None:\n unsure += 1\n\n return float(correct)/float(guessed), float(unsure)/float(len(points))\n\nN = 7\n# Generate All Points On Hypercube\nexamples = __perms(N)\ntargets = []\n\n# Generate Boundary Hunter\nbias = np.random.uniform(0, 1, 1)\ndecision = np.random.uniform(-1, 1, N)\ndecision_b = np.random.uniform(-1, 1, 1)\ncaring = np.random.uniform(-1, 1, N)\ncaring_b = np.random.uniform(-1, 1, 1)\n\nuncertian = 0\nclass1 = 0\nclass0 = 0\n\nfor example in examples:\n clas = None\n c = care(caring, caring_b, example)\n\n if c < 0.5:\n uncertian += 1\n r = np.random.rand(1)\n if r > bias:\n clas = 1\n else:\n clas = 0\n else:\n d = deci(decision, decision_b, example)\n if d >= 0.5:\n clas = 1\n class1 += 1\n else:\n clas=0\n class0 += 1\n targets.append(clas)\n\nif class0 == 0 or class1 == 0:\n print(\"Class 0: \", class0)\n print(\"Class 1: \", class1)\n print(\"Err\")\n raise \"GSFE\"\n\n\nbh = train_boundary_hunter(examples, targets, 20000)\n\nprint(\"Uncertian: \", uncertian)\nprint(\"Class 0: \", class0)\nprint(\"Class 1: \", class1)\n\nprint(\"Bias: \", bias)\nprint(\"{}, {}\".format(decision_b, decision))\nprint(\"{}, {}\".format(caring_b, caring))\nprint(run_boundary_hunters([np.concatenate((decision_b, decision))], [np.concatenate((caring_b, caring))], examples, targets))\n\nprint()\nprint(bh)\nprint(run_boundary_hunters([bh[0]], [bh[1]], examples, targets))\n\n"
] | [
[
"numpy.random.uniform",
"numpy.ones",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"numpy.concatenate",
"tensorflow.multiply",
"tensorflow.expand_dims",
"tensorflow.add",
"numpy.exp",
"tensorflow.exp",
"numpy.random.rand",
"tensorflow.concat",
"tensorflow.Session",
"numpy.array",
"tensorflow.train.GradientDescentOptimizer",
"numpy.dot",
"tensorflow.log",
"tensorflow.reduce_sum"
]
] |
okkhoy/rlpy | [
"af25d2011fff1d61cb7c5cc8992549808f0c6103"
] | [
"examples/pacman/independent.py"
] | [
"\"\"\"\nCart-pole balancing with independent discretization\n\"\"\"\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom rlpy.Domains import Pacman\nfrom rlpy.Agents import Q_Learning\nfrom rlpy.Representations import *\nfrom rlpy.Policies import eGreedy\nfrom rlpy.Experiments import Experiment\nimport numpy as np\nfrom hyperopt import hp\n\nparam_space = {'discretization': hp.quniform(\"discretization\", 3, 50, 1),\n 'lambda_': hp.uniform(\"lambda_\", 0., 1.),\n 'boyan_N0': hp.loguniform(\"boyan_N0\", np.log(1e1), np.log(1e5)),\n 'initial_learn_rate': hp.loguniform(\"initial_learn_rate\", np.log(5e-2), np.log(1))}\n\n\ndef make_experiment(\n exp_id=1, path=\"./Results/Temp/{domain}/{agent}/{representation}/\",\n lambda_=0.9,\n boyan_N0=22.36,\n initial_learn_rate=.068,\n discretization=9):\n opt = {}\n opt[\"path\"] = path\n opt[\"exp_id\"] = exp_id\n opt[\"max_steps\"] = 150000\n opt[\"num_policy_checks\"] = 30\n opt[\"checks_per_policy\"] = 1\n\n domain = Pacman()\n opt[\"domain\"] = domain\n representation = IncrementalTabular(\n domain,\n discretization=discretization)\n policy = eGreedy(representation, epsilon=0.1)\n opt[\"agent\"] = Q_Learning(\n policy, representation, discount_factor=domain.discount_factor,\n lambda_=0.9, initial_learn_rate=initial_learn_rate,\n learn_rate_decay_mode=\"boyan\", boyan_N0=boyan_N0)\n experiment = Experiment(**opt)\n return experiment\n\nif __name__ == '__main__':\n #from Tools.run import run_profiled\n # run_profiled(make_experiment)\n experiment = make_experiment(1)\n experiment.run(visualize_steps=True)\n experiment.plot()\n # experiment.save()\n"
] | [
[
"numpy.log"
]
] |
rsampaths16/ReRes | [
"51089c806c57087eb94d9a659036ebed88e96f13"
] | [
"processing/gray-scale-processing.py"
] | [
"import numpy\nimport scipy\nimport glob\nfrom matplotlib import pyplot\nfrom scipy import misc\nfrom numpy import random\n\nrandom.seed(0)\nSIZE = 128\nORIGINAL = '../data/offline-data/black-and-white-images/original'\nHIGH = '../data/offline-data/black-and-white-images/train/high'\nLOW = '../data/offline-data/black-and-white-images/train/low'\n\ndef sample_patch(image):\n x = random.randint(0, image.shape[0] - SIZE, dtype=numpy.int)\n y = random.randint(0, image.shape[1] - SIZE, dtype=numpy.int)\n high = numpy.copy(image[x:x+SIZE, y:y+SIZE])\n low = numpy.copy(high)\n low = misc.imresize(low, (SIZE // 4, SIZE // 4))\n low = misc.imresize(low, (SIZE, SIZE))\n return low, high\n\nunique_id = 1\nfor image_path in glob.glob(ORIGINAL + '/*.jpg'):\n print(image_path)\n sample = 1\n image = misc.imread(image_path)\n while sample > 0:\n low, high = sample_patch(image)\n misc.imsave(HIGH + '/' + str(unique_id) + '.jpg', high)\n misc.imsave(LOW + '/' + str(unique_id) + '.jpg', low)\n sample -= 1\n unique_id += 1\n"
] | [
[
"scipy.misc.imresize",
"numpy.random.seed",
"numpy.copy",
"numpy.random.randint",
"scipy.misc.imread"
]
] |
franyancr/lenstronomy | [
"3a7b33512a474bf1796d23276d9028b580580cf1"
] | [
"lenstronomy/PointSource/point_source_types.py"
] | [
"import numpy as np\nfrom lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver\n\n\nclass Unlensed(object):\n \"\"\"\n class of a single point source in the image plane, aka star\n parameters: ra_image, dec_image, point_amp\n\n \"\"\"\n def __init__(self):\n pass\n\n def image_position(self, kwargs_ps, kwargs_lens=None, **kwargs): # kwargs_lens=None, min_distance=0.01, search_window=5, precision_limit=10**(-10), num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):\n \"\"\"\n\n :param ra_image:\n :param dec_image:\n :param point_amp:\n :return:\n \"\"\"\n ra_image = kwargs_ps['ra_image']\n dec_image = kwargs_ps['dec_image']\n return np.array(ra_image), np.array(dec_image)\n\n def source_position(self, kwargs_ps, kwargs_lens=None):\n ra_image = kwargs_ps['ra_image']\n dec_image = kwargs_ps['dec_image']\n return np.array(ra_image), np.array(dec_image)\n\n def image_amplitude(self, kwargs_ps, kwargs_lens=None, **kwargs): # , x_pos=None, y_pos=None, min_distance=0.01, search_window=5, precision_limit=10**(-10), num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):\n point_amp = kwargs_ps['point_amp']\n return np.array(point_amp)\n\n def source_amplitude(self, kwargs_ps, kwargs_lens=None):\n point_amp = kwargs_ps['point_amp']\n return np.array(point_amp)\n\n def update_lens_model(self, lens_model_class):\n pass\n\n\nclass LensedPositions(object):\n \"\"\"\n class of a single point source in the image plane, aka star\n parameters: ra_image, dec_image, point_amp\n\n \"\"\"\n def __init__(self, lensModel, fixed_magnification=False, additional_image=False):\n self._lensModel = lensModel\n self._solver = LensEquationSolver(lensModel)\n self._fixed_magnification = fixed_magnification\n self._additional_image = additional_image\n if fixed_magnification is True and additional_image is True:\n Warning('The combination of fixed_magnification=True and additional_image=True is not optimal for the current computation.'\n 'If you see this warning, please approach the developers.')\n\n def image_position(self, kwargs_ps, kwargs_lens, min_distance=0.01, search_window=5, precision_limit=10**(-10),\n num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):\n \"\"\"\n\n :param ra_image:\n :param dec_image:\n :param point_amp:\n :return:\n \"\"\"\n if self._additional_image is True:\n ra_source, dec_source = self.source_position(kwargs_ps, kwargs_lens)\n ra_image, dec_image = self._solver.image_position_from_source(ra_source, dec_source, kwargs_lens,\n min_distance=min_distance,\n search_window=search_window,\n precision_limit=precision_limit,\n num_iter_max=num_iter_max, x_center=x_center,\n y_center=y_center, magnification_limit=magnification_limit)\n else:\n ra_image = kwargs_ps['ra_image']\n dec_image = kwargs_ps['dec_image']\n return np.array(ra_image), np.array(dec_image)\n\n def source_position(self, kwargs_ps, kwargs_lens):\n ra_image = kwargs_ps['ra_image']\n dec_image = kwargs_ps['dec_image']\n x_source, y_source = self._lensModel.ray_shooting(ra_image, dec_image, kwargs_lens)\n x_source = np.mean(x_source)\n y_source = np.mean(y_source)\n return np.array(x_source), np.array(y_source)\n\n def image_amplitude(self, kwargs_ps, kwargs_lens=None, x_pos=None, y_pos=None, **kwargs): # min_distance=0.01, search_window=5, precision_limit=10**(-10),num_iter_max=100, x_center=0, y_center=0):\n if self._fixed_magnification:\n if x_pos is not None and y_pos is not None:\n ra_image, dec_image = x_pos, y_pos\n else:\n ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens)\n mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)\n point_amp = kwargs_ps['source_amp'] * np.abs(mag)\n else:\n point_amp = kwargs_ps['point_amp']\n if x_pos is not None:\n point_amp = _expand_to_array(point_amp, len(x_pos))\n #if np.atleast_1d(point_amp):\n # pass\n return np.array(point_amp)\n\n def source_amplitude(self, kwargs_ps, kwargs_lens=None):\n if self._fixed_magnification:\n source_amp = kwargs_ps['source_amp']\n else:\n ra_image, dec_image = kwargs_ps['ra_image'], kwargs_ps['dec_image']\n mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)\n point_amp = kwargs_ps['point_amp']\n source_amp = np.mean(np.array(point_amp) / np.array(np.abs(mag)))\n return np.array(source_amp)\n\n def update_lens_model(self, lens_model_class):\n self._lensModel = lens_model_class\n self._solver = LensEquationSolver(lens_model_class)\n\n\nclass SourcePositions(object):\n \"\"\"\n class of a single point source in the image plane, aka star\n parameters: ra_image, dec_image, point_amp\n\n \"\"\"\n def __init__(self, lensModel, fixed_magnification=True):\n self._lensModel = lensModel\n self._solver = LensEquationSolver(lensModel)\n self._fixed_magnification = fixed_magnification\n\n def image_position(self, kwargs_ps, kwargs_lens, min_distance=0.01, search_window=5, precision_limit=10**(-10),\n num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):\n \"\"\"\n\n :param ra_image:\n :param dec_image:\n :param point_amp:\n :return:\n \"\"\"\n ra_source, dec_source = self.source_position(kwargs_ps, kwargs_lens)\n ra_image, dec_image = self._solver.image_position_from_source(ra_source, dec_source, kwargs_lens,\n min_distance=min_distance,\n search_window=search_window,\n precision_limit=precision_limit,\n num_iter_max=num_iter_max, x_center=x_center,\n y_center=y_center, magnification_limit=magnification_limit)\n return ra_image, dec_image\n\n def source_position(self, kwargs_ps, kwargs_lens=None):\n ra_source = kwargs_ps['ra_source']\n dec_source = kwargs_ps['dec_source']\n return np.array(ra_source), np.array(dec_source)\n\n def image_amplitude(self, kwargs_ps, kwargs_lens=None, x_pos=None, y_pos=None, min_distance=0.01, search_window=5,\n precision_limit=10**(-10), num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):\n if self._fixed_magnification:\n if x_pos is not None and y_pos is not None:\n ra_image, dec_image = x_pos, y_pos\n else:\n ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens, min_distance=min_distance,\n search_window=search_window,\n precision_limit=precision_limit,\n num_iter_max=num_iter_max, x_center=x_center,\n y_center=y_center, magnification_limit=magnification_limit)\n mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)\n point_amp = kwargs_ps['source_amp'] * np.abs(mag)\n else:\n point_amp = kwargs_ps['point_amp']\n if x_pos is not None:\n point_amp = _expand_to_array(point_amp, len(x_pos))\n return np.array(point_amp)\n\n def source_amplitude(self, kwargs_ps, kwargs_lens=None):\n if self._fixed_magnification:\n source_amp = kwargs_ps['source_amp']\n else:\n ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens)\n mag = self._lensModel.magnification(ra_image, dec_image, kwargs_lens)\n point_amp = kwargs_ps['point_amp']\n source_amp = np.mean(np.array(point_amp) / np.array(mag))\n return np.array(source_amp)\n\n def update_lens_model(self, lens_model_class):\n self._lensModel = lens_model_class\n self._solver = LensEquationSolver(lens_model_class)\n\n\nclass PointSourceCached(object):\n \"\"\"\n\n \"\"\"\n def __init__(self, point_source_model, save_cache=False):\n self._model = point_source_model\n self._save_cache = save_cache\n\n def delete_lens_model_cache(self):\n if hasattr(self, '_x_image'):\n del self._x_image\n if hasattr(self, '_y_image'):\n del self._y_image\n if hasattr(self, '_x_source'):\n del self._x_source\n if hasattr(self, '_y_source'):\n del self._y_source\n\n def set_save_cache(self, bool):\n self._save_cache = bool\n\n def update_lens_model(self, lens_model_class):\n self._model.update_lens_model(lens_model_class)\n\n def image_position(self, kwargs_ps, kwargs_lens=None, min_distance=0.05, search_window=10,\n precision_limit=10**(-10), num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):\n \"\"\"\n\n :param ra_image:\n :param dec_image:\n :param point_amp:\n :return:\n \"\"\"\n\n if not self._save_cache or not hasattr(self, '_x_image') or not hasattr(self, '_y_image'):\n self._x_image, self._y_image = self._model.image_position(kwargs_ps, kwargs_lens, min_distance=min_distance,\n search_window=search_window,\n precision_limit=precision_limit,\n num_iter_max=num_iter_max, x_center=x_center,\n y_center=y_center, magnification_limit=magnification_limit)\n return self._x_image, self._y_image\n\n def source_position(self, kwargs_ps, kwargs_lens=None):\n if not self._save_cache or not hasattr(self, '_x_source') or not hasattr(self, '_y_source'):\n self._x_source, self._y_source = self._model.source_position(kwargs_ps, kwargs_lens)\n return self._x_source, self._y_source\n\n def image_amplitude(self, kwargs_ps, kwargs_lens=None, min_distance=0.01, search_window=5, precision_limit=10**(-10),\n num_iter_max=100, x_center=0, y_center=0, magnification_limit=None):\n x_pos, y_pos = self.image_position(kwargs_ps, kwargs_lens, min_distance=min_distance,\n search_window=search_window,\n precision_limit=precision_limit,\n num_iter_max=num_iter_max, x_center=x_center,\n y_center=y_center, magnification_limit=magnification_limit)\n return self._model.image_amplitude(kwargs_ps, kwargs_lens, x_pos=x_pos, y_pos=y_pos)\n\n def source_amplitude(self, kwargs_ps, kwargs_lens=None):\n return self._model.source_amplitude(kwargs_ps, kwargs_lens)\n\n\ndef _expand_to_array(array, num):\n \"\"\"\n\n :param array: float/int or numpy array\n :param num: number of array entries expected in array\n :return: array of size num\n \"\"\"\n if np.isscalar(array):\n return np.ones(num) * array\n elif len(array) < num:\n out = np.zeros(num)\n out[0:len(array)] = array\n return out\n else:\n return array"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.abs",
"numpy.array",
"numpy.isscalar",
"numpy.mean"
]
] |
dirty-cat/categorical-encoding | [
"fb0a1c4216533034e7516efc0698c7e4477b0243"
] | [
"benchmarks/supervectorizer_tuning.py"
] | [
"\"\"\"\nPerforms a GridSearch to find the best parameters for the SuperVectorizer\namong a selection.\n\"\"\"\n\nimport logging\nimport pandas as pd\n\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\n\nfrom dirty_cat import SuperVectorizer\nfrom dirty_cat.datasets import fetch_open_payments, fetch_drug_directory, \\\n fetch_road_safety, fetch_midwest_survey, fetch_medical_charge, \\\n fetch_employee_salaries, fetch_traffic_violations\n\nfrom pathlib import Path\nfrom functools import wraps\nfrom datetime import datetime\nfrom typing import List, Tuple\n\n\ndef get_classification_datasets() -> List[Tuple[dict, str]]:\n return [\n (fetch_open_payments(), 'open_payments'),\n # (fetch_drug_directory(), 'drug_directory),\n (fetch_road_safety(), 'road_safety'),\n (fetch_midwest_survey(), 'midwest_survey'),\n (fetch_traffic_violations(), 'traffic_violations'),\n ]\n\n\ndef get_regression_datasets() -> List[Tuple[dict, str]]:\n return [\n (fetch_medical_charge(), 'medical_charge'),\n (fetch_employee_salaries(), 'employee_salaries'),\n ]\n\n\ndef get_dataset(info) -> Tuple[pd.DataFrame, pd.Series]:\n df = pd.read_csv(info['path'], **info['read_csv_kwargs'])\n y = df[info['y']]\n X = df.drop(info['y'], axis=1).astype(str)\n return X, y\n\n\ndef set_logging(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n logging_level = logging.DEBUG\n\n logger = logging.getLogger()\n logger.setLevel(logging_level)\n\n formatter = logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s')\n formatter.datefmt = '%m/%d/%Y %H:%M:%S'\n\n path = Path(__file__).parent / f'tuning_{str(datetime.now())[:10]}.log'\n\n fh = logging.FileHandler(filename=path, mode='w')\n fh.setLevel(logging_level)\n fh.setFormatter(formatter)\n\n # sh = logging.StreamHandler(sys.stdout)\n # sh.setLevel(logging_level)\n # sh.setFormatter(formatter)\n\n logger.addHandler(fh)\n # logger.addHandler(sh)\n\n return func(*args, **kwargs)\n return wrapper\n\n\n@set_logging\ndef main():\n logging.info('Launching !')\n\n card_possibilities = [20, 30, 40, 50]\n n_comp_possibilities = [10, 30, 50]\n\n logging.debug('Creating pipelines')\n regression_pipeline = Pipeline([\n ('sv', SuperVectorizer()),\n ('estimator', RandomForestRegressor()),\n ])\n classification_pipeline = Pipeline([\n ('sv', SuperVectorizer()),\n ('estimator', RandomForestClassifier()),\n ])\n\n logging.debug(f'With cardinality possibilities: {card_possibilities} '\n f'and n_components possibilities: {n_comp_possibilities}')\n for pipeline, datasets in zip(\n [\n regression_pipeline,\n classification_pipeline,\n ],\n [\n get_regression_datasets(),\n get_classification_datasets(),\n ]\n ):\n for info, name in datasets:\n X, y = get_dataset(info)\n if name != 'traffic_violations':\n continue\n\n csv_path = Path('.').resolve() / f'{name}_results.csv'\n if csv_path.exists():\n # If the results already exist, we'll skip to the next\n logging.debug(f'Skipping {name} as {csv_path!s} was found')\n continue\n\n logging.debug(f'Running search on {name}')\n grid = GridSearchCV(\n estimator=pipeline,\n param_grid={\n 'sv__cardinality_threshold': card_possibilities,\n 'sv__high_card_str_transformer__n_components': n_comp_possibilities,\n },\n n_jobs=30,\n )\n grid.fit(X, y)\n\n df = pd.DataFrame(grid.cv_results_)\n df.to_csv(csv_path)\n logging.info(f'Saved search results in {csv_path!s}')\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.model_selection.GridSearchCV",
"sklearn.ensemble.RandomForestClassifier"
]
] |
viettriit2110/face_recognition | [
"0e1821af6538c573ed4a87acc361c44900f849eb"
] | [
"examples/face_recognition_svm.py"
] | [
"# Train multiple images per person\r\n# Find and recognize faces in an image using a SVC with scikit-learn\r\n\r\n\"\"\"\r\nStructure:\r\n <test_image>.jpg\r\n <train_dir>/\r\n <person_1>/\r\n <person_1_face-1>.jpg\r\n <person_1_face-2>.jpg\r\n .\r\n .\r\n <person_1_face-n>.jpg\r\n <person_2>/\r\n <person_2_face-1>.jpg\r\n <person_2_face-2>.jpg\r\n .\r\n .\r\n <person_2_face-n>.jpg\r\n .\r\n .\r\n <person_n>/\r\n <person_n_face-1>.jpg\r\n <person_n_face-2>.jpg\r\n .\r\n .\r\n <person_n_face-n>.jpg\r\n\"\"\"\r\n\r\nimport face_recognition\r\nfrom sklearn import svm\r\nimport os\r\n\r\n# Training the SVC classifier\r\n\r\n# The training data would be all the face encodings from all the known images and the labels are their names\r\nencodings = []\r\nnames = []\r\n\r\n# Training directory\r\ntrain_dir = os.listdir('/train_dir/')\r\n\r\n# Loop through each person in the training directory\r\nfor person in train_dir:\r\n pix = os.listdir(\"/train_dir/\" + person)\r\n\r\n # Loop through each training image for the current person\r\n for person_img in pix:\r\n # Get the face encodings for the face in each image file\r\n face = face_recognition.load_image_file(\"/train_dir/\" + person + \"/\" + person_img)\r\n face_bounding_boxes = face_recognition.face_locations(face)\r\n\r\n #If training image contains none or more than faces, print an error message and exit\r\n if len(face_bounding_boxes) != 1:\r\n print(person + \"/\" + person_img + \" contains none or more than one faces and can't be used for training.\")\r\n exit()\r\n else:\r\n face_enc = face_recognition.face_encodings(face)[0]\r\n # Add face encoding for current image with corresponding label (name) to the training data\r\n encodings.append(face_enc)\r\n names.append(person)\r\n\r\n# Create and train the SVC classifier\r\nclf = svm.SVC(gamma='scale')\r\nclf.fit(encodings,names)\r\n\r\n# Load the test image with unknown faces into a numpy array\r\ntest_image = face_recognition.load_image_file('test_image.jpg')\r\n\r\n# Find all the faces in the test image using the default HOG-based model\r\nface_locations = face_recognition.face_locations(test_image)\r\nno = len(face_locations)\r\nprint(\"Number of faces detected: \", no)\r\n\r\n# Predict all the faces in the test image using the trained classifier\r\nprint(\"Found:\")\r\nfor i in range(no):\r\n test_image_enc = face_recognition.face_encodings(test_image)[i]\r\n name = clf.predict([test_image_enc])\r\n print(*name)\r\n"
] | [
[
"sklearn.svm.SVC"
]
] |
oeg-upm/ttla | [
"ab1cc5a2777b3d4fb905f4452379f469153c904b"
] | [
"commons/__init__.py"
] | [
"import os\nimport pandas as pd\nfrom easysparql import *\n\nENDPOINT = \"https://dbpedia.org/sparql\"\nMIN_NUM_OF_ENT_PER_PROP = 30 # the minimum number of entities per property (get_properties)\nQUERY_LIMIT = \"\" # At the moment, we do not put any limit on the number of results\nMIN_NUM_NUMS = 30 # The minimum number of values that will be annotated, this is to ignore small size\n\nproj_path = (os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\n\ndata_dir = os.path.join(proj_path, 'data')\nmeta_dir = os.path.join(proj_path, 'meta')\nmodels_dir = os.path.join(proj_path, 'local_models')\nlog_dir = os.path.join(proj_path, 'local_logs')\n\n\n# kinds\nNOMINAL = \"nominal\"\nORDINAL = \"ordinal\"\nRATIO_INTERVAL = \"ratio-interval\"\n\n# sub kinds\nCATEGORICAL = \"categorical\"\nSEQUENTIAL = \"sequential\"\nHIERARCHICAL = \"hierarchical\"\nRANDOM = \"random\"\nCOUNTS = \"count\"\nOTHER = \"other\"\n\nYEAR = \"year\"\n\n\n# I am not sure of the below is useful\n# kinds and subkinds\nKINDS = {\n ORDINAL: [],\n NOMINAL: [CATEGORICAL, SEQUENTIAL, HIERARCHICAL, RANDOM],\n RATIO_INTERVAL: [COUNTS, OTHER],\n YEAR: []\n}\n\n\ndef get_column_from_meta(fname, column_id):\n \"\"\"\n :param fname:\n :param column_id:\n :return:\n \"\"\"\n fdir = os.path.join(data_dir, 'T2Dv2', fname+\".csv\")\n df = pd.read_csv(fdir)\n col_name = df.columns.values[column_id]\n return list(df[col_name])\n\n\ndef t2dv2_columns_of_kind(num_kind, sub_kind=None):\n \"\"\"\n :param num_kind: nominal, ordinal, ratio-interval\n :return: a dataframe of the specified kind\n \"\"\"\n meta_file_dir = os.path.join(meta_dir, 'T2Dv2_typology.csv')\n df = pd.read_csv(meta_file_dir)\n if sub_kind is None:\n dfkind = df[df.kind == num_kind]\n else:\n dfkind = df[df.kind == num_kind and df.sub_kind == sub_kind]\n print(dfkind)\n return dfkind\n\n\ndef get_numerics_from_list(nums_str_list):\n \"\"\"\n :param nums_str_list: list of string or numbers or a mix\n :return: list of numbers or None if less than 50% are numbers\n \"\"\"\n nums = []\n for c in nums_str_list:\n n = get_num(c)\n if n is not None:\n nums.append(n)\n if len(nums) < len(nums_str_list)/2:\n return None\n return nums\n\n\ndef get_num(num_or_str):\n \"\"\"\n :param num_or_str:\n :return: number or None if it is not a number\n \"\"\"\n if pd.isna(num_or_str):\n return None\n elif isinstance(num_or_str, (int, float)):\n return num_or_str\n elif isinstance(num_or_str, basestring):\n if '.' in num_or_str or ',' in num_or_str or num_or_str.isdigit():\n try:\n return float(num_or_str.replace(',', ''))\n except Exception as e:\n return None\n return None\n\n\ndef class_uri_to_fname(class_uri):\n \"\"\"\n :param class_uri:\n :return:\n \"\"\"\n if class_uri[:7] == \"http://\":\n class_dname = class_uri[7:]\n elif class_uri[:8] == \"https://\":\n class_dname = class_uri[8:]\n class_fname = class_dname.replace('/', '__').replace(',', '').replace('#', '_')#.replace('-', '_')\n return class_fname\n"
] | [
[
"pandas.read_csv",
"pandas.isna"
]
] |
dyabel/wsod-mmdet | [
"60fc1993ea298f992b160b5599a6134702ac0d4f"
] | [
"mmdet/models/losses/my_cross_entropy_loss.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom ..builder import LOSSES\nfrom .utils import weight_reduce_loss\neps = 0.000001\n\n\ndef cross_entropy_without_softmax(pred,\n label,\n weight=None,\n reduction='mean',\n avg_factor=None,\n class_weight=None):\n \"\"\"Calculate the CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the number\n of classes.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n reduction (str, optional): The method used to reduce the loss.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n # element-wise losses\n #loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')\n\n loss = F.nll_loss(torch.log(pred), label, reduction = 'none')\n\n # apply weights and do the reduction\n if weight is not None:\n weight = weight.float()\n loss = weight_reduce_loss(\n loss, weight=weight, reduction=reduction, avg_factor=avg_factor)\n\n return loss\n\ndef cross_entropy(pred,\n label,\n weight=None,\n reduction='mean',\n avg_factor=None,\n class_weight=None):\n \"\"\"Calculate the CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the number\n of classes.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n reduction (str, optional): The method used to reduce the loss.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n # element-wise losses\n loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')\n\n # apply weights and do the reduction\n if weight is not None:\n weight = weight.float()\n loss = weight_reduce_loss(\n loss, weight=weight, reduction=reduction, avg_factor=avg_factor)\n\n return loss\n\n\ndef _expand_onehot_labels(labels, label_weights, label_channels):\n bin_labels = labels.new_full((labels.size(0), label_channels), 0)\n inds = torch.nonzero(\n (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()\n if inds.numel() > 0:\n bin_labels[inds, labels[inds]] = 1\n\n if label_weights is None:\n bin_label_weights = None\n else:\n bin_label_weights = label_weights.view(-1, 1).expand(\n label_weights.size(0), label_channels)\n\n return bin_labels, bin_label_weights\n\n\ndef binary_cross_entropy(pred,\n label,\n weight=None,\n reduction='mean',\n avg_factor=None,\n class_weight=None):\n \"\"\"Calculate the binary CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 1).\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n reduction (str, optional): The method used to reduce the loss.\n Options are \"none\", \"mean\" and \"sum\".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n if pred.dim() != label.dim():\n label, weight = _expand_onehot_labels(label, weight, pred.size(-1))\n\n # weighted element-wise losses\n if weight is not None:\n weight = weight.float()\n\n\n pred = pred.clamp(1e-6,1-1e-6)\n label = label.clamp(0,1)\n loss = F.binary_cross_entropy(pred,label)\n\n return loss\n\n\ndef mask_cross_entropy(pred,\n target,\n label,\n reduction='mean',\n avg_factor=None,\n class_weight=None):\n \"\"\"Calculate the CrossEntropy loss for masks.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the number\n of classes.\n target (torch.Tensor): The learning label of the prediction.\n label (torch.Tensor): ``label`` indicates the class label of the mask'\n corresponding object. This will be used to select the mask in the\n of the class which the object belongs to when the mask prediction\n if not class-agnostic.\n reduction (str, optional): The method used to reduce the loss.\n Options are \"none\", \"mean\" and \"sum\".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n # TODO: handle these two reserved arguments\n assert reduction == 'mean' and avg_factor is None\n num_rois = pred.size()[0]\n inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)\n pred_slice = pred[inds, label].squeeze(1)\n return F.binary_cross_entropy_with_logits(\n pred_slice, target, weight=class_weight, reduction='mean')[None]\n\n\[email protected]_module()\nclass MyCrossEntropyLoss(nn.Module):\n\n def __init__(self,\n use_sigmoid=False,\n use_mask=False,\n reduction='mean',\n class_weight=None,\n loss_weight=1.0):\n \"\"\"CrossEntropyLoss.\n\n Args:\n use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n of softmax. Defaults to False.\n use_mask (bool, optional): Whether to use mask cross entropy loss.\n Defaults to False.\n reduction (str, optional): . Defaults to 'mean'.\n Options are \"none\", \"mean\" and \"sum\".\n class_weight (list[float], optional): Weight of each class.\n Defaults to None.\n loss_weight (float, optional): Weight of the loss. Defaults to 1.0.\n \"\"\"\n super(MyCrossEntropyLoss, self).__init__()\n assert (use_sigmoid is False) or (use_mask is False)\n self.use_sigmoid = use_sigmoid\n self.use_mask = use_mask\n self.reduction = reduction\n self.loss_weight = loss_weight\n self.class_weight = class_weight\n\n self.cls_criterion = binary_cross_entropy\n\n def forward(self,\n cls_score,\n label,\n weight=None,\n avg_factor=None,\n reduction_override=None,\n **kwargs):\n \"\"\"Forward function.\n\n Args:\n cls_score (torch.Tensor): The prediction.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction (str, optional): The method used to reduce the loss.\n Options are \"none\", \"mean\" and \"sum\".\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n if self.class_weight is not None:\n class_weight = cls_score.new_tensor(\n self.class_weight, device=cls_score.device)\n else:\n class_weight = None\n loss_cls = self.loss_weight * self.cls_criterion(\n cls_score,\n label,\n weight,\n class_weight=class_weight,\n reduction=reduction,\n avg_factor=avg_factor,\n **kwargs)\n return loss_cls\n"
] | [
[
"torch.nonzero",
"torch.log",
"torch.arange",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.binary_cross_entropy",
"torch.nn.functional.binary_cross_entropy_with_logits"
]
] |
ForrestPi/ObjectDetection | [
"54e0821e73f67be5360c36f01229a123c34ab3b3"
] | [
"nms/benchmark/nms_numba_cpu.py"
] | [
"from __future__ import absolute_import\nimport numba\nimport numpy as np\[email protected](nopython=True)\ndef nms_cpu(dets, thresh):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n\n return keep\n\nif __name__ == \"__main__\":\n bbox=np.load(\"bbox.npy\")\n print(bbox.shape)\n keep=nms_cpu(bbox,0.7)\n print(len(keep))\n"
] | [
[
"numpy.maximum",
"numpy.load",
"numpy.where",
"numpy.minimum"
]
] |
delemottelab/demystifying | [
"e8527b52d5fbe0570cd391921ecda5aefceb797a"
] | [
"demystifying/feature_extraction/mlp_feature_extractor.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport logging\nimport sys\n\nlogging.basicConfig(\n stream=sys.stdout,\n format='%(asctime)s %(name)s-%(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\nimport numpy as np\nfrom sklearn.neural_network import MLPClassifier, MLPRegressor\n\nfrom .. import relevance_propagation as relprop\nfrom .feature_extractor import FeatureExtractor\nfrom ..postprocessing import PerFrameImportancePostProcessor\n\nlogger = logging.getLogger(\"mlp\")\n\n\nclass MlpFeatureExtractor(FeatureExtractor):\n\n def __init__(self,\n name=\"MLP\",\n activation=relprop.relu,\n randomize=True,\n supervised=True,\n one_vs_rest=False,\n per_frame_importance_outfile=None,\n per_frame_importance_samples=None,\n per_frame_importance_labels=None,\n classifier_kwargs={},\n **kwargs):\n FeatureExtractor.__init__(self,\n name=name,\n supervised=supervised,\n **kwargs)\n self.backend = \"scikit-learn\" # Only available option for now, more to come probably\n if activation not in [relprop.relu, relprop.logistic_sigmoid]:\n Exception(\"Relevance propagation currently only supported for relu or logistic\")\n self.activation = activation\n self.randomize = randomize\n self.classifier_kwargs = classifier_kwargs.copy()\n if classifier_kwargs.get('activation', None) is not None and \\\n classifier_kwargs.get('activation') != self.activation:\n logger.warn(\"Conflicting activation properiies. '%s' will be overwritten with '%s'\",\n classifier_kwargs.get('activation'),\n self.activation)\n self.classifier_kwargs['activation'] = self.activation\n if not self.randomize:\n self.classifier_kwargs['random_state'] = 89274\n self.frame_importances = None\n self.per_frame_importance_outfile = per_frame_importance_outfile\n self.per_frame_importance_samples = per_frame_importance_samples\n self.per_frame_importance_labels = per_frame_importance_labels\n if self.use_regression:\n self.one_vs_rest = False\n else:\n self.one_vs_rest = one_vs_rest\n\n logger.debug(\"Initializing MLP with the following parameters:\"\n \" activation function %s, randomize %s, classifier_kwargs %s,\"\n \" per_frame_importance_outfile %s, backend %s, per_frame_importance_samples %s, one_vs_rest %s\",\n activation, randomize, classifier_kwargs, per_frame_importance_outfile, self.backend,\n None if per_frame_importance_samples is None else per_frame_importance_samples.shape,\n self.one_vs_rest)\n\n def _train_one_vs_rest(self, data, labels):\n n_clusters = labels.shape[1]\n n_points = data.shape[0]\n\n classifiers = []\n\n for i_cluster in range(n_clusters):\n classifiers.append(self._create_classifier())\n binary_labels = np.zeros((n_points, 2))\n binary_labels[labels[:, i_cluster] == 1, 0] = 1\n binary_labels[labels[:, i_cluster] != 1, 1] = 1\n classifiers[i_cluster].fit(data, binary_labels)\n\n return classifiers\n\n def train(self, train_set, train_labels):\n \"\"\"\n TODO code duplication below for on_vs_the_rest logic, refactor with KL and RF into common superclass\n :param train_set:\n :param train_labels:\n :return:\n \"\"\"\n # Construct and train classifier\n logger.debug(\"Training %s with %s samples and %s features ...\", self.name, train_set.shape[0],\n train_set.shape[1])\n if self.one_vs_rest:\n return self._train_one_vs_rest(train_set, train_labels)\n else:\n classifier = self._create_classifier()\n classifier.fit(train_set, train_labels)\n return classifier\n\n def _normalize_relevance_per_frame(self, relevance_per_frame):\n for i in range(relevance_per_frame.shape[0]):\n # Not removing negative relevance in per frame analysis\n # ind_negative = np.where(relevance_per_frame[i, :] < 0)[0]\n # relevance_per_frame[i, ind_negative] = 0\n relevance_per_frame[i, :] = (relevance_per_frame[i, :] - np.min(relevance_per_frame[i, :])) / \\\n (np.max(relevance_per_frame[i, :]) - np.min(relevance_per_frame[i, :]) + 1e-9)\n return relevance_per_frame\n\n def _perform_lrp(self, classifier, data, labels):\n nclusters = labels.shape[1] if self.supervised else 1\n nfeatures = data.shape[1]\n relevance_per_cluster = np.zeros((nfeatures, nclusters))\n per_frame_relevance = np.zeros(data.shape)\n for c_idx in range(nclusters):\n # Get all frames belonging to a cluster\n if self.supervised:\n frame_indices = labels[:, c_idx] == 1\n cluster_data = data[frame_indices]\n cluster_labels = np.zeros((len(cluster_data), nclusters))\n cluster_labels[:, c_idx] = 1 # Only look at one class at the time\n else:\n # TODO refactor to break unsupervised code out of here. Unsupervised method have no concept of clusters/labels\n cluster_labels = labels\n frame_indices = [i for i in range(len(data))]\n cluster_data = data\n if len(cluster_data) == 0:\n continue\n # Now see what makes these frames belong to that class\n # Time for LRP\n layers = self._create_layers(classifier)\n propagator = relprop.RelevancePropagator(layers)\n cluster_frame_relevance = propagator.propagate(cluster_data, cluster_labels)\n # Rescale relevance according to min and max relevance in each frame\n cluster_frame_relevance = self._normalize_relevance_per_frame(cluster_frame_relevance)\n relevance_per_cluster[:, c_idx] = cluster_frame_relevance.mean(axis=0)\n per_frame_relevance[frame_indices] += cluster_frame_relevance\n per_frame_relevance = self._normalize_relevance_per_frame(per_frame_relevance)\n return per_frame_relevance, relevance_per_cluster\n\n def get_feature_importance(self, classifier, data, labels):\n logger.debug(\"Extracting feature importance using MLP ...\")\n if self.one_vs_rest:\n return self._get_feature_importance_binaryclass(classifier, data, labels)\n else:\n return self._get_feature_importance_multiclass(classifier, data, labels)\n\n def _get_feature_importance_binaryclass(self, classifiers, data, labels):\n n_features = data.shape[1]\n n_frames = data.shape[0]\n n_states = labels.shape[1] if len(labels.shape) > 1 else 1\n feature_importances = np.zeros((n_features, self.n_clusters))\n for i_cluster in range(n_states):\n # TODO a bit inefficent approach below where we consistenly compute LRP for all other clusters and don't use those results.\n cluster_frames = labels[:, i_cluster] == 1\n binary_labels = np.zeros((n_frames, 2))\n binary_labels[cluster_frames, 0] = 1\n binary_labels[~cluster_frames, 1] = 1\n relevance_per_frame, relevance_per_cluster = self._perform_lrp(classifiers[i_cluster], data, binary_labels)\n feature_importances[:, i_cluster] = relevance_per_cluster[:, 0]\n if self.per_frame_importance_outfile is not None:\n cluster_frame_importances, other_labels = self._compute_frame_relevance(classifiers[i_cluster],\n relevance_per_frame,\n data,\n labels)\n if self.frame_importances is None:\n self.frame_importances = np.zeros((len(other_labels), cluster_frame_importances.shape[1]))\n other_cluster_frames = other_labels[:, 0] == 1\n if len(other_labels[other_cluster_frames]) == 0:\n # No frames in this state, just move on\n continue\n nclusters_per_frame = other_labels[other_cluster_frames].sum(axis=1)[:, np.newaxis]\n self.frame_importances[other_cluster_frames, :] += cluster_frame_importances[\n other_cluster_frames] / nclusters_per_frame\n return feature_importances\n\n def _get_feature_importance_multiclass(self, classifier, data, labels):\n relevance_per_frame, relevance_per_cluster = self._perform_lrp(classifier, data, labels)\n\n if self.per_frame_importance_outfile is not None:\n frame_importances, _ = self._compute_frame_relevance(classifier, relevance_per_frame, data, labels)\n self.frame_importances = frame_importances if self.frame_importances is None else self.frame_importances + frame_importances\n return relevance_per_cluster\n\n def _compute_frame_relevance(self, classifier, relevance_per_frame, data, labels):\n if self.per_frame_importance_samples is not None:\n if self.indices_for_filtering is None:\n other_samples = self.per_frame_importance_samples\n else:\n other_samples = self.per_frame_importance_samples[:, self.indices_for_filtering]\n if self.per_frame_importance_labels is None:\n other_labels = classifier.predict(other_samples)\n else:\n other_labels = self.per_frame_importance_labels\n other_samples = self.scaler.transform(other_samples)\n frame_relevance, _ = self._perform_lrp(classifier, other_samples, other_labels)\n else:\n logger.info(\"Using same trajectory for per frame importance as was used for training.\")\n if self.n_splits != 1:\n logger.error(\n \"Cannot average frame importance to outfile if n_splits != 1. n_splits is now set to %s\",\n self.n_splits)\n if self.shuffle_datasets:\n logger.error(\"Data set has been shuffled, per frame importance will not be properly mapped\")\n frame_relevance = relevance_per_frame\n other_labels = labels\n # for every feature in every frame...\n frame_importances = np.zeros(\n (data if self.per_frame_importance_samples is None else self.per_frame_importance_samples).shape) - 1\n if self.indices_for_filtering is not None:\n frame_importances[:, self.indices_for_filtering] = 0\n niters = self.n_iterations * self.n_splits\n for frame_idx, rel in enumerate(frame_relevance):\n if self.indices_for_filtering is None:\n frame_importances[frame_idx] += rel / niters\n else:\n frame_importances[frame_idx, self.indices_for_filtering] += rel / niters\n return frame_importances, other_labels\n\n def _create_layers(self, classifier):\n weights = classifier.coefs_\n biases = classifier.intercepts_\n layers = []\n for idx, weight in enumerate(weights):\n\n if idx == 0:\n l = relprop.FirstLinear(min_val=0, max_val=1, weight=weight, bias=biases[idx])\n else:\n l = relprop.layer_for_string(self.activation, weight=weight, bias=biases[idx])\n if l is None:\n raise Exception(\n \"Cannot create layer at index {} for activation function {}\".format(idx, self.activation))\n layers.append(l)\n if idx < len(weights) - 1:\n # Add activation to all except output layer\n activation = relprop.layer_activation_for_string(self.activation)\n if activation is None:\n raise Exception(\"Unknown activation function {}\".format(self.activation))\n layers.append(activation)\n else:\n if self.backend == 'scikit-learn':\n # For scikit implementation see # https://stats.stackexchange.com/questions/243588/how-to-apply-softmax-as-activation-function-in-multi-layer-perceptron-in-scikit\n # or https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/multilayer_perceptron.py\n out_activation = relprop.layer_activation_for_string(classifier.out_activation_)\n if out_activation is None:\n raise Exception(\"Unknown activation function {}\".format(self.activation))\n layers.append(out_activation)\n else:\n raise Exception(\"Unsupported MLP backend {}\".format(self.backend))\n\n return layers\n\n def _create_classifier(self):\n return MLPRegressor(**self.classifier_kwargs) if self.use_regression \\\n else MLPClassifier(**self.classifier_kwargs)\n\n def postprocessing(self, **kwargs):\n return PerFrameImportancePostProcessor(extractor=self,\n per_frame_importance_outfile=self.per_frame_importance_outfile,\n frame_importances=self.frame_importances,\n **kwargs)\n"
] | [
[
"numpy.zeros",
"sklearn.neural_network.MLPClassifier",
"numpy.max",
"numpy.min",
"sklearn.neural_network.MLPRegressor"
]
] |
JayWu7/Machine-Learning-Courses-Study-Record | [
"7586c3429514bc21c7cfe42f85ca8c0fcf8f072b"
] | [
"Algorithmic Methods of Data Mining/Final_project/graph_partitioning1.py"
] | [
"import numpy as np\nfrom sklearn.cluster import KMeans\nimport time\nfrom scipy.sparse.linalg import eigs\nfrom scipy.sparse import csr_matrix\n\n\nclass Graph:\n\n def __init__(self, data_name):\n self.filename = data_name\n self.n = None\n self.k = None\n self.edges = self.form_graph()\n # self.e = None # number of edges\n self.adj = None # adjacency list\n self.lap = None\n self.U = None\n self.labels = None\n\n def form_graph(self):\n '''\n form a graph from the .txt file\n :param file: data file\n :return: graph, in the shape used latter\n n, k\n '''\n with open('./data/{}'.format(self.filename), 'r') as f:\n first_line = f.readline()[:-1] # remove '\\n' at the end\n meta = first_line.split(' ')\n yield int(meta[2]), int(meta[-1])\n\n for i, edge in enumerate(f.readlines()):\n s, t = edge[:-1].split(' ')\n yield int(s), int(t)\n\n def generate_adj(self):\n '''\n generate the adjacency matrix of a graph\n :param graph: the edges of a graph\n :param n: the number of vertices in this graph\n :return: adjacency matrix\n '''\n a = time.time()\n self.n, self.k = next(self.edges)\n adj = [set() for _ in range(self.n)]\n for s, t in self.edges:\n adj[s].add(t)\n adj[t].add(s)\n b = time.time()\n print('Generate adjacency matrix cost: {}s'.format(b-a))\n return adj\n\n def generate_lap(self):\n '''\n From adjacency matrix and diagonal matrix build Laplacian matrix\n :param dia: diagonal matrix\n :param adj: adjacency matrix\n :return: Laplacian matrix\n '''\n a = time.time()\n self.lap = np.ndarray((self.n, self.n))\n for i, row in enumerate(self.adj):\n row_dia = np.zeros(self.n)\n row_dia[i] = len(row)\n row_adj = [1 if j in row else 0 for j in range(self.n)]\n self.lap[i] = row_dia - row_adj\n x = np.linalg.norm(self.lap)\n self.lap = self.lap / x\n b = time.time()\n print('Genearte Laplacian matrix cost: {}s'.format(b-a))\n\n def get_U(self):\n '''\n Using scipy.sparse.linalg.eigs to calculate matrix U that we need for kmeans algorithm\n :param lap: laplacian matrix\n :param k: a number\n :return: matrix U\n '''\n s = time.time()\n self.lap = csr_matrix(self.lap)\n _, first_k = eigs(self.lap, self.k, sigma=0)\n U = first_k.real\n # normalize U\n x = np.linalg.norm(U)\n U = U / x\n t = time.time()\n print('Generate U cost: {}s'.format(t - s))\n return U\n\n def k_means(self):\n '''\n Using K-means algorithm to cluster the data\n :param data: n points\n :param k: number of clusters\n :return: clusters\n '''\n s = time.time()\n kmeans = KMeans(n_clusters=self.k, algorithm='auto')\n kmeans.fit(self.U)\n t = time.time()\n print('Run k-means algorithm cost: {}s'.format(t - s))\n return kmeans.labels_\n\n def write_clusters(self):\n '''\n return the clusters of vertices\n :param labels: labels generated from kmeans method\n :return: clusters\n '''\n with open('./result/{}_res.txt'.format(self.filename[:-4]), 'w') as f:\n for i, l in enumerate(self.labels):\n f.write('{} {}\\n'.format(i, l))\n\n def main(self):\n self.adj = self.generate_adj()\n self.generate_lap()\n self.U = self.get_U()\n self.labels = self.k_means()\n self.write_clusters()\n\n\nif __name__ == '__main__':\n graph = Graph('soc-Epinions1.txt')\n graph.main()\n\n"
] | [
[
"numpy.zeros",
"scipy.sparse.csr_matrix",
"scipy.sparse.linalg.eigs",
"numpy.ndarray",
"sklearn.cluster.KMeans",
"numpy.linalg.norm"
]
] |
webclinic017/time-series-pipeline | [
"5ac418b91e395a48cba397f95d25d221adfff9bd"
] | [
"EOD_api/test_EOD_api.py"
] | [
"import os\nimport re\nimport datetime\nimport unittest\nfrom io import StringIO\nfrom unittest.mock import patch\n\nimport pandas as pd\n\nimport EOD_api as eod\n\nTOKEN = os.environ[\"EOD_TOKEN\"]\n\n\ndef date_parser(string):\n date_pattern = re.compile(\"([0-9]{4}-[0-9]{2}-[0-9]{2})[ ]\", re.VERBOSE)\n return date_pattern.sub(r\"\\1T\", string)\n\n\nclass TestGetEod(unittest.TestCase):\n # @classmethod\n # def setUp(cls):\n # pass\n # def tearDown(cls):\n # pass\n\n def test_idempotent__addtickers(self):\n d1 = eod.OhlcvIntraday(\n [\"AAPL.US\"], TOKEN, \"2020-10-13\", \"2020-10-17\", intraday_frec=\"5m\"\n ).add_tickers([\"MSFT.US\"])\n d2 = (\n eod.OhlcvIntraday(\n [\"AAPL.US\"], TOKEN, \"2020-10-13\", \"2020-10-17\", intraday_frec=\"5m\"\n )\n .add_tickers([\"MSFT.US\"])\n .add_tickers([\"MSFT.US\"])\n )\n self.assertEqual(d1, d2)\n\n def test_idempotent_truncate_dates(self):\n d1 = eod.Fundamental(\n [\"AAPL.US\"], TOKEN, \"2020-10-13\", \"2020-10-17\"\n ).truncate_dates(\"2020-10-14\", \"2020-10-16\")\n d2 = (\n eod.Fundamental([\"AAPL.US\"], TOKEN, \"2020-10-13\", \"2020-10-17\")\n .truncate_dates(\"2020-10-14\", \"2020-10-16\")\n .truncate_dates(\"2020-10-14\", \"2020-10-16\")\n )\n self.assertEqual(d1, d2)\n\n def test_idempotent_remove_tickers(self):\n d1 = eod.Fundamental(\n [\"AAPL.US\", \"MSFT.US\"], TOKEN, \"2020-10-13\", \"2020-10-17\"\n ).remove_tickers([\"MSFT.US\"])\n d2 = (\n eod.Fundamental([\"AAPL.US\", \"MSFT.US\"], TOKEN, \"2020-10-13\", \"2020-10-17\")\n .remove_tickers([\"MSFT.US\"])\n .remove_tickers([\"MSFT.US\"])\n )\n self.assertEqual(d1, d2)\n\n def test_add_remove(self):\n d1 = eod.OhlcvIntraday([\"AAPL.US\"], TOKEN, \"2020-10-13\", \"2020-10-17\", \"1m\")\n d2 = (\n eod.OhlcvIntraday([\"AAPL.US\"], TOKEN, \"2020-10-13\", \"2020-10-17\", \"1m\")\n .add_tickers([\"MSFT.US\"])\n .remove_tickers([\"MSFT.US\"])\n )\n self.assertEqual(d1, d2)\n\n def test_remove_all_tickers(self):\n with self.assertRaises(Exception):\n eod.Ohlcv([\"AAPL.US\"], TOKEN, \"2020-10-13\", \"2020-10-17\").remove_tickers(\n [\"AAPL.US\"]\n ).retrieve_data()\n\n def test_misspelled_input(self):\n with self.assertRaises(Exception):\n eod.OhlcvIntraday(\n [\"AAPL.US\"], TOKEN, \"2020-10-13\", \"2020-10-17\", intraday_frec=\"Daoly\"\n )\n\n def test_ohlcv_data_format_hasnt_changed(\n self,\n ): # Cambiar de antes de formatting a después de formatting\n expected_aapl = pd.read_csv(\n StringIO(\n \"\"\"\n Date Open High Low Close Adjusted_close Volume\n 2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0\n 2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0\n 2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0\n 2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0\n 275 NaN NaN NaN NaN NaN NaN\n \"\"\"\n ),\n sep=\"\\\\s+\",\n )\n\n url = \"https://eodhistoricaldata.com/api/eod/AAPL.US?api_token={}&from=2020-10-13&to=2020-10-17&period=d\".format(\n TOKEN\n )\n actual = pd.read_csv(\n url,\n usecols=[\n \"Date\",\n \"Volume\",\n \"Open\",\n \"Close\",\n \"High\",\n \"Low\",\n \"Adjusted_close\",\n ],\n )\n with patch.object(pd, \"read_csv\") as mock_read:\n mock_read.autospec = True\n mock_read.return_value = expected_aapl\n expected = pd.read_csv(\n url,\n usecols=[\n \"Date\",\n \"Volume\",\n \"Open\",\n \"Close\",\n \"High\",\n \"Low\",\n \"Adjusted_close\",\n ],\n )\n pd.testing.assert_frame_equal(actual, expected, rtol=5e-3)\n\n def test_index_formatting(self):\n expected_aapl = pd.read_csv(\n StringIO(\n \"\"\"\n Date Open High Low Close Adjusted_close Volume\n 2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0\n 2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0\n 2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0\n 2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0\n 275 NaN NaN NaN NaN NaN NaN\n \"\"\"\n ),\n sep=\"\\\\s+\",\n )\n expected_aapl_formatted = pd.read_csv(\n StringIO(\n date_parser(\n \"\"\"\n Stock Date Open High Low Close Adjusted_close Volume \n AAPL.US 2020-10-13 00:00:00+00:00 125.27 125.390 119.65 121.10 120.7110 262330500.0\n AAPL.US 2020-10-14 00:00:00+00:00 121.00 123.030 119.62 121.19 120.8008 151062297.0\n AAPL.US 2020-10-15 00:00:00+00:00 118.72 121.200 118.15 120.71 120.3223 112559203.0\n AAPL.US 2020-10-16 00:00:00+00:00 121.28 121.548 118.81 119.02 118.6377 115393797.0\n \"\"\"\n )\n ),\n sep=\"\\\\s+\",\n index_col=[0, 1],\n converters={\"Date\": lambda col: datetime.datetime.fromisoformat(col)},\n )\n\n with patch.object(pd, \"read_csv\") as mock_read:\n mock_read.autospec = True\n mock_read.return_value = expected_aapl\n formatted_mock = eod.Ohlcv(\n [\"AAPL.US\"], TOKEN, \"2020-10-13\", \"2020-10-17\"\n ).retrieve_data()\n pd.testing.assert_frame_equal(\n formatted_mock, expected_aapl_formatted, rtol=5e-3\n )\n\n\n# TODO? Write more tests:\n# Check that the data is concated/merged/joined properly, particularly when the indexes come with Nans\n# Check except clauses\n# Check duplicate df values\n# Assert errors with wrong args\n# etc\n\n# expected_ohlcv_concatted = pd.read_csv( StringIO( date_parser( \"\"\"\n# Stock Date Gmtoffset Datetime Open High Low Close Volume Returns\n# BP.LSE 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN\n# BP.LSE 2020-10-14 00:00:00+00:00 0.0 2020-10-13 15:25:00 213.649993 214.000000 213.550003 213.856994 1210380.0 -0.001601\n# BP.LSE 2020-10-15 00:00:00+00:00 0.0 2020-10-14 15:25:00 213.000000 213.149993 212.600006 212.649993 1182246.0 0.019660\n# BP.LSE 2020-10-16 00:00:00+00:00 0.0 2020-10-15 15:25:00 207.149993 207.199996 206.500000 206.850006 1626720.0 -0.013826\n# AAPL.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN\n# AAPL.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 121.139999 121.279998 121.029998 121.050003 4585723.0 0.003648\n# AAPL.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 121.580001 121.709999 121.139999 121.180000 3420583.0 0.015419\n# AAPL.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 120.790000 120.849998 120.580001 120.699996 3436603.0 -0.003550\n# MSFT.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN\n# MSFT.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 223.320007 223.389999 222.750000 222.830001 1457493.0 0.000651\n# MSFT.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 221.199996 221.414993 220.600006 220.759994 1122912.0 0.012377\n# MSFT.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 219.639999 219.880004 219.490005 219.660003 1201342.0 -0.003900\n# \"\"\" ) ), sep=\"\\\\s+\", index_col=[0,1,2], converters = {'Date' : lambda col: datetime.datetime.fromisoformat( col ) \\\n# , 'Datetime' : lambda col: pd.to_datetime(col, format='%Y-%m-%dT%H:%M:%S', utc=True) } )\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"pandas.read_csv",
"pandas.testing.assert_frame_equal"
]
] |
Leylasaadi/MACT20.21_Digital_tools_Big_Data_part_2 | [
"94cafa0581ec36a305867ebfdcb91c787aa77a16"
] | [
"session4/e_animations_2axis.py"
] | [
"# encoding: utf-8\n\n##################################################\n# This script shows how to create animated plots using matplotlib and a basic dataset\n# Multiple tutorials inspired the current design but they mostly came from:\n# hhttps://towardsdatascience.com/how-to-create-animated-graphs-in-python-bb619cc2dec1\n# Note: the project keeps updating every course almost yearly\n##################################################\n#\n##################################################\n# Author: Diego Pajarito\n# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]\n# License: Apache License Version 2.0\n# Version: 1.0.0\n# Maintainer: Diego Pajarito\n# Email: [email protected]\n# Status: development\n##################################################\n\nimport matplotlib\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\nimport numpy as np\n# We need to import numpy and matplotlib library\n# importing libraries\nimport pandas as pd\nimport seaborn as sns\n\n# Read files and prepare data\ndata = pd.read_csv('../data/2021_seguiment-covid19-bcn.csv')\n#data = pd.read_csv('https://opendata-ajuntament.barcelona.cat/data/dataset/4f3ffbda-d5be-4f2a-a836-26a77be6df1a/resource/f627ac0a-d05f-416d-9773-eeb464a3fc44/download')\ndata.columns = ['date_indicator', 'frequency_indicator', 'place', 'name_indicator',\n 'name_variable', 'value', 'unit', 'source']\n# We will use two datasets to generate plots\ndata_daily = data[data['name_indicator'] == 'Casos de COVID-19 a Barcelona (diari)']\ndata_accumulated = data[data['name_indicator'] == 'Casos de COVID-19 a Barcelona (acumulat)']\n\n# We need the data to be in time format to calculate values in days after day zero\ndata_daily.loc[:, 'date_indicator'] = pd.to_datetime(data_daily['date_indicator'])\ninitial_day = data_daily['date_indicator'].min()\ndata_daily.loc[:, 'day_after_zero'] = data_daily['date_indicator'] - initial_day\ndata_daily.loc[:, 'day_after_zero'] = data_daily['day_after_zero']/np.timedelta64(1, 'D')\n# We need the data to be in time format to calculate values in days after day zero\ndata_accumulated.loc[:, 'date_indicator'] = pd.to_datetime(data_accumulated['date_indicator'])\ndata_accumulated.loc[:, 'day_after_zero'] = data_accumulated['date_indicator'] - initial_day\ndata_accumulated.loc[:, 'day_after_zero'] = data_accumulated['day_after_zero']/np.timedelta64(1, 'D')\n\n# we also extract some values to set the plot limits\nmax_day = data_daily['day_after_zero'].max().astype(int)\nmax_cases_daily = data_daily['value'].max()\nmax_cases_accumulated = data_accumulated['value'].max()\ntitle = 'Barcelona: '\n\n# We then prepare the writer and animation file options\nWriter = animation.writers['ffmpeg']\nwriter = Writer(fps=20, metadata=dict(artist='MaCTResearcher'), bitrate=1800)\n# If error using anaconda try to install ffmpeg\n# conda install -c conda-forge ffmpeg\n\n# We create an initial plot with basic configuration a single line\nfig, ax1 = plt.subplots()\nfig.set_size_inches(10, 6)\nplt.title(title + 'Covid-19 cases', fontsize=18)\nplt.xlabel('Day after case 1', fontsize=14)\nplt.ylim(0, max_cases_accumulated)\nplt.ylabel('Accumulated', fontsize=18)\n\n# # now we configure the secondary axis\nax2 = ax1.twinx()\nplt.ylim(0, max_cases_daily*2)\ncases_ticks = np.arange(0, max_day, 50)\n\n\n# We need to set an animation function to handle individual behaviour per frame\n# variable \"i\" is the frame id that can be used to handle queries or filters for your data\ndef animate(i):\n frame_data_daily = data_daily[data_daily['day_after_zero'] <= i]\n frame_data_accumulated = data_accumulated[data_accumulated['day_after_zero'] <= i]\n sns.lineplot(x='day_after_zero', y='value', data=frame_data_accumulated, color=\"r\", ax=ax1)\n sns.barplot(x='day_after_zero', y='value', data=frame_data_daily, color='b', ax=ax2)\n plt.ylabel('Daily', fontsize=18)\n plt.xlim(0, max_day)\n plt.xticks(cases_ticks)\n plt.xlabel('Day after case 1', fontsize=18)\n # Handling secondary axis implies different management in the animate function\n\n\nani = matplotlib.animation.FuncAnimation(fig, animate, frames=max_day, repeat=True)\nani.save('covid_cases_bcn_2axis.mp4', writer=writer)\nprint('end')\n"
] | [
[
"numpy.timedelta64",
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"numpy.arange",
"pandas.to_datetime",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.xlabel"
]
] |
genepattern/genepattern-utils | [
"950d748301b3c4d07ad8d24c9b037bbb9b4c80e2"
] | [
"genepattern/utils/clustering.py"
] | [
"\"\"\"\nCopied and modified from the dev branch of:\nhttps://github.com/genepattern/HierarchicalClustering\non 2018-01-31\n\"\"\"\nimport sys\nimport numpy as np\nfrom statistics import mode\nfrom sklearn.metrics import pairwise\nfrom sklearn import metrics\n\nfrom scipy.cluster.hierarchy import dendrogram\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport itertools\nfrom sklearn.cluster import AgglomerativeClustering\nimport scipy\nimport itertools\nfrom collections import defaultdict\nfrom .elemental import *\nfrom .information import *\n\n# check if these are repeated:\nimport os\nimport sys\n\ntasklib_path = os.path.dirname(os.path.realpath(sys.argv[0]))\n# sys.path.append(tasklib_path + \"/ccalnoir\")\n\n# 2018-02-06 Maybe uncomment these next two\n# import matplotlib as mpl\n# mpl.use('Agg')\n\n# This is forprinting the hyperlink\nfrom IPython.core.display import display, HTML\n\n# import pandas as pd\n# import numpy as np\nimport scipy\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom matplotlib import gridspec\nfrom sklearn.cluster import AgglomerativeClustering\n\n# from time import time\n# import cuzcatlan as cusca\nsns.set_style(\"white\")\nimport matplotlib as mpl\n\nmpl.rcParams['ytick.labelsize'] = 16\nmpl.rcParams['xtick.labelsize'] = 16\nmpl.rcParams['axes.titlesize'] = 24\nmpl.rcParams['axes.labelsize'] = 20\n\nSIGNIFICANT_DIGITS = 7\n\ninput_col_distance_dict = {\n # These are the values I expect\n \"No column clustering\": \"No_column_clustering\",\n \"Uncentered correlation\": \"uncentered_pearson\",\n \"Pearson correlation\": \"pearson\",\n \"Uncentered correlation, absolute value\": \"absolute_uncentered_pearson\",\n \"Pearson correlation, absolute value\": \"absolute_pearson\",\n \"Spearman's rank correlation\": \"spearman\",\n \"Kendall's tau\": \"kendall\",\n \"Euclidean distance\": \"euclidean\",\n \"City-block distance\": \"manhattan\",\n \"No_column_clustering\": \"No_column_clustering\",\n # These are the values the GpUnit tests give\n \"0\": \"No_column_clustering\",\n \"1\": \"uncentered_pearson\",\n \"2\": \"pearson\",\n \"3\": \"absolute_uncentered_pearson\",\n \"4\": \"absolute_pearson\",\n \"5\": \"spearman\",\n \"6\": \"kendall\",\n \"7\": \"euclidean\",\n \"8\": \"manhattan\",\n \"9\": \"information_coefficient\",\n # These are the values I expect from the comand line\n \"no_col\": \"No_column_clustering\",\n \"uncentered_pearson\": \"uncentered_pearson\",\n \"pearson\": \"pearson\",\n \"absolute_uncentered_pearson\": \"absolute_uncentered_pearson\",\n \"absolute_pearson\": \"absolute_pearson\",\n \"spearman\": \"spearman\",\n \"kendall\": \"kendall\",\n \"euclidean\": \"euclidean\",\n \"manhattan\": \"manhattan\",\n \"Cosine\": \"cosine\",\n \"cosine\": \"cosine\",\n \"ic\": \"information_coefficient\",\n \"information_coefficient\": \"information_coefficient\",\n \"Information Coefficient\": \"information_coefficient\",\n}\n\ninput_row_distance_dict = {\n # These are the values I expect\n \"No row clustering\": \"No_row_clustering\",\n \"Uncentered correlation\": \"uncentered_pearson\",\n \"Pearson correlation\": \"pearson\",\n \"Uncentered correlation, absolute value\": \"absolute_uncentered_pearson\",\n \"Pearson correlation, absolute value\": \"absolute_pearson\",\n \"Spearman's rank correlation\": \"spearman\",\n \"Kendall's tau\": \"kendall\",\n \"Euclidean distance\": \"euclidean\",\n \"City-block distance\": \"manhattan\",\n \"No_row_clustering\": \"No_row_clustering\",\n # These are the values the GpUnit tests give\n \"0\": \"No_row_clustering\",\n \"1\": \"uncentered_pearson\",\n \"2\": \"pearson\",\n \"3\": \"absolute_uncentered_pearson\",\n \"4\": \"absolute_pearson\",\n \"5\": \"spearman\",\n \"6\": \"kendall\",\n \"7\": \"euclidean\",\n \"8\": \"manhattan\",\n \"9\": \"information_coefficient\",\n # These are the values I expect from the comand line\n \"no_row\": \"No_row_clustering\",\n \"uncentered_pearson\": \"uncentered_pearson\",\n \"pearson\": \"pearson\",\n \"absolute_uncentered_pearson\": \"absolute_uncentered_pearson\",\n \"absolute_pearson\": \"absolute_pearson\",\n \"spearman\": \"spearman\",\n \"kendall\": \"kendall\",\n \"euclidean\": \"euclidean\",\n \"manhattan\": \"manhattan\",\n \"Cosine\": \"cosine\",\n \"cosine\": \"cosine\",\n \"ic\": \"information_coefficient\",\n \"information_coefficient\": \"information_coefficient\",\n \"Information Coefficient\": \"information_coefficient\",\n}\n\ninput_clustering_method = {\n # These are the values I expect\n 'Pairwise complete-linkage': 'complete',\n 'Pairwise average-linkage': 'average',\n 'Pairwise ward-linkage': 'ward',\n # These are the values the GpUnit test give\n 'm': 'complete',\n 'a': 'average', # I think this is the default\n}\n\ninput_row_centering = {\n # These are the values I expect\n 'No': None,\n 'Subtract the mean from each row': 'Mean',\n 'Subtract the median from each row': 'Median',\n # These are the values the GpUnit test give\n 'None': None,\n 'Median': 'Median',\n 'Mean': 'Mean',\n}\n\ninput_row_normalize = {\n # These are the values I expect\n 'No': False,\n 'Yes': True,\n # These are the values the GpUnit test give\n 'False': False,\n 'True': True,\n}\n\ninput_col_centering = {\n # These are the values I expect\n 'No': None,\n 'Subtract the mean from each column': 'Mean',\n 'Subtract the median from each column': 'Median',\n # These are the values the GpUnit test give\n 'None': None,\n 'Median': 'Median',\n 'Mean': 'Mean',\n}\n\ninput_col_normalize = {\n # These are the values I expect\n 'No': False,\n 'Yes': True,\n # These are the values the GpUnit test give\n 'False': False,\n 'True': True,\n}\n\n\ndef parse_inputs(args=sys.argv):\n # inp = []\n # inp = args\n # Error handling:\n arg_n = len(args)\n if arg_n == 1:\n sys.exit(\"Not enough parameters files were provided. This module needs a GCT file to work.\")\n elif arg_n == 2:\n gct_name = args[1]\n col_distance_metric = 'euclidean'\n output_distances = False\n row_distance_metric = 'No_row_clustering'\n clustering_method = 'Pairwise average-linkage'\n output_base_name = 'HC_out'\n row_normalization = False\n col_normalization = False\n row_centering = None\n col_centering = None\n print(\"Using:\")\n print(\"\\tgct_name =\", gct_name)\n print(\"\\tcol_distance_metric = euclidean (default value)\")\n print(\"\\toutput_distances =\", output_distances, \"(default: not computing it and creating a file)\")\n print(\"\\trow_distance_metric =\", row_distance_metric, \"(default: No row clustering)\")\n print(\"\\tclustering_method =\", clustering_method, \"(default: Pairwise average-linkage)\")\n print(\"\\toutput_base_name =\", output_base_name, \"(default: HC_out)\")\n print(\"\\trow_normalization =\", row_normalization, \"(default: False)\")\n print(\"\\tcol_normalization =\", col_normalization, \"(default: False)\")\n print(\"\\trow_centering =\", row_centering, \"(default: None)\")\n print(\"\\tcol_centering =\", col_centering, \"(default: None)\")\n elif arg_n == 3:\n gct_name = args[1]\n col_distance_metric = args[2]\n output_distances = False\n row_distance_metric = 'No_row_clustering'\n clustering_method = 'Pairwise average-linkage'\n output_base_name = 'HC_out'\n row_normalization = False\n col_normalization = False\n row_centering = None\n col_centering = None\n print(\"Using:\")\n print(\"\\tgct_name =\", gct_name)\n print(\"\\tcol_distance_metric =\", input_col_distance_dict[col_distance_metric])\n print(\"\\toutput_distances =\", output_distances, \"(default: not computing it and creating a file)\")\n print(\"\\trow_distance_metric =\", row_distance_metric, \"(default: No row clustering)\")\n print(\"\\tclustering_method =\", clustering_method, \"(default: Pairwise average-linkage)\")\n print(\"\\toutput_base_name =\", output_base_name, \"(default: HC_out)\")\n print(\"\\trow_normalization =\", row_normalization, \"(default: False)\")\n print(\"\\tcol_normalization =\", col_normalization, \"(default: False)\")\n print(\"\\trow_centering =\", row_centering, \"(default: None)\")\n print(\"\\tcol_centering =\", col_centering, \"(default: None)\")\n elif arg_n == 4:\n gct_name = args[1]\n col_distance_metric = args[2]\n output_distances = args[3]\n row_distance_metric = 'No_row_clustering'\n clustering_method = 'Pairwise average-linkage'\n output_base_name = 'HC_out'\n row_normalization = False\n col_normalization = False\n row_centering = None\n col_centering = None\n\n col_distance_metric = input_col_distance_dict[col_distance_metric]\n if (output_distances == 'False') or (output_distances == 'F') \\\n or (output_distances == 'false') or (output_distances == 'f'):\n output_distances = False\n else:\n output_distances = True\n print(\"Using:\")\n print(\"\\tgct_name =\", gct_name)\n print(\"\\tcol_distance_metric =\", col_distance_metric)\n print(\"\\toutput_distances =\", output_distances)\n print(\"\\trow_distance_metric =\", row_distance_metric, \"(default: No row clustering)\")\n print(\"\\tclustering_method =\", clustering_method, \"(default: Pairwise average-linkage)\")\n print(\"\\toutput_base_name =\", output_base_name, \"(default: HC_out)\")\n print(\"\\trow_normalization =\", row_normalization, \"(default: False)\")\n print(\"\\tcol_normalization =\", col_normalization, \"(default: False)\")\n print(\"\\trow_centering =\", row_centering, \"(default: None)\")\n print(\"\\tcol_centering =\", col_centering, \"(default: None)\")\n elif arg_n == 5:\n gct_name = args[1]\n col_distance_metric = args[2]\n output_distances = args[3]\n row_distance_metric = args[4]\n clustering_method = 'Pairwise average-linkage'\n # clustering_method = 'Pairwise complete-linkage'\n output_base_name = 'HC_out'\n row_normalization = False\n col_normalization = False\n row_centering = None\n col_centering = None\n\n col_distance_metric = input_col_distance_dict[col_distance_metric]\n row_distance_metric = input_row_distance_dict[row_distance_metric]\n if (output_distances == 'False') or (output_distances == 'F') \\\n or (output_distances == 'false') or (output_distances == 'f'):\n output_distances = False\n else:\n output_distances = True\n\n print(\"Using:\")\n print(\"\\tgct_name =\", gct_name)\n print(\"\\tcol_distance_metric =\", col_distance_metric)\n print(\"\\toutput_distances =\", output_distances)\n print(\"\\trow_distance_metric =\", row_distance_metric)\n print(\"\\tclustering_method =\", clustering_method, \"(default: Pairwise average-linkage)\")\n print(\"\\toutput_base_name =\", output_base_name, \"(default: HC_out)\")\n print(\"\\trow_normalization =\", row_normalization, \"(default: False)\")\n print(\"\\tcol_normalization =\", col_normalization, \"(default: False)\")\n print(\"\\trow_centering =\", row_centering, \"(default: None)\")\n print(\"\\tcol_centering =\", col_centering, \"(default: None)\")\n elif arg_n == 6:\n gct_name = args[1]\n col_distance_metric = args[2]\n output_distances = args[3]\n row_distance_metric = args[4]\n clustering_method = args[5]\n\n col_distance_metric = input_col_distance_dict[col_distance_metric]\n row_distance_metric = input_row_distance_dict[row_distance_metric]\n clustering_method = input_clustering_method[clustering_method]\n if clustering_method not in linkage_dic.keys():\n exit(\"Clustering method chosen not supported. This should not have happened.\")\n\n if (linkage_dic[clustering_method] == 'ward') and (col_distance_metric != 'average'):\n exit(\"When choosing 'Pairwise ward-linkage' the distance metric *must* be 'average' \")\n\n output_base_name = 'HC_out'\n row_normalization = False\n col_normalization = False\n row_centering = None\n col_centering = None\n if (output_distances == 'False') or (output_distances == 'F') \\\n or (output_distances == 'false') or (output_distances == 'f'):\n output_distances = False\n else:\n output_distances = True\n\n print(\"Using:\")\n print(\"\\tgct_name =\", gct_name)\n print(\"\\tcol_distance_metric =\", col_distance_metric)\n print(\"\\toutput_distances =\", output_distances)\n print(\"\\trow_distance_metric =\", row_distance_metric)\n print(\"\\tclustering_method =\", clustering_method)\n print(\"\\toutput_base_name =\", output_base_name, \"(default: HC_out)\")\n print(\"\\trow_normalization =\", row_normalization, \"(default: False)\")\n print(\"\\tcol_normalization =\", col_normalization, \"(default: False)\")\n print(\"\\trow_centering =\", row_centering, \"(default: None)\")\n print(\"\\tcol_centering =\", col_centering, \"(default: None)\")\n elif arg_n == 7:\n gct_name = args[1]\n col_distance_metric = args[2]\n output_distances = args[3]\n row_distance_metric = args[4]\n clustering_method = args[5]\n output_base_name = args[6]\n row_normalization = False\n col_normalization = False\n row_centering = None\n col_centering = None\n\n col_distance_metric = input_col_distance_dict[col_distance_metric]\n row_distance_metric = input_row_distance_dict[row_distance_metric]\n clustering_method = input_clustering_method[clustering_method]\n if (output_distances == 'False') or (output_distances == 'F') \\\n or (output_distances == 'false') or (output_distances == 'f'):\n output_distances = False\n else:\n output_distances = True\n\n print(\"Using:\")\n print(\"\\tgct_name =\", gct_name)\n print(\"\\tcol_distance_metric =\", col_distance_metric)\n print(\"\\toutput_distances =\", output_distances)\n print(\"\\trow_distance_metric =\", row_distance_metric)\n print(\"\\tclustering_method =\", clustering_method)\n print(\"\\toutput_base_name =\", output_base_name)\n print(\"\\trow_normalization =\", row_normalization, \"(default: False)\")\n print(\"\\tcol_normalization =\", col_normalization, \"(default: False)\")\n print(\"\\trow_centering =\", row_centering, \"(default: None)\")\n print(\"\\tcol_centering =\", col_centering, \"(default: None)\")\n elif arg_n == 8:\n gct_name = args[1]\n col_distance_metric = args[2]\n output_distances = args[3]\n row_distance_metric = args[4]\n clustering_method = args[5]\n output_base_name = args[6]\n row_normalization = args[7]\n col_normalization = False\n row_centering = None\n col_centering = None\n\n col_distance_metric = input_col_distance_dict[col_distance_metric]\n row_distance_metric = input_row_distance_dict[row_distance_metric]\n clustering_method = input_clustering_method[clustering_method]\n if (output_distances == 'False') or (output_distances == 'F') \\\n or (output_distances == 'false') or (output_distances == 'f'):\n output_distances = False\n else:\n output_distances = True\n\n row_normalization = input_row_normalize[row_normalization]\n # if (row_normalization == 'False') or (row_normalization == 'F') \\\n # or (row_normalization == 'false') or (row_normalization == 'f'):\n # row_normalization = False\n # else:\n # row_normalization = True\n\n print(\"Using:\")\n print(\"\\tgct_name =\", gct_name)\n print(\"\\tcol_distance_metric =\", col_distance_metric)\n print(\"\\toutput_distances =\", output_distances)\n print(\"\\trow_distance_metric =\", row_distance_metric)\n print(\"\\tclustering_method =\", clustering_method)\n print(\"\\toutput_base_name =\", output_base_name)\n print(\"\\trow_normalization =\", row_normalization)\n print(\"\\tcol_normalization =\", col_normalization, \"(default: False)\")\n print(\"\\trow_centering =\", row_centering, \"(default: None)\")\n print(\"\\tcol_centering =\", col_centering, \"(default: None)\")\n elif arg_n == 9:\n gct_name = args[1]\n col_distance_metric = args[2]\n output_distances = args[3]\n row_distance_metric = args[4]\n clustering_method = args[5]\n output_base_name = args[6]\n row_normalization = args[7]\n col_normalization = args[8]\n row_centering = None\n col_centering = None\n\n col_distance_metric = input_col_distance_dict[col_distance_metric]\n row_distance_metric = input_row_distance_dict[row_distance_metric]\n clustering_method = input_clustering_method[clustering_method]\n if (output_distances == 'False') or (output_distances == 'F') \\\n or (output_distances == 'false') or (output_distances == 'f'):\n output_distances = False\n else:\n output_distances = True\n\n # Row normalization\n row_normalization = input_row_normalize[row_normalization]\n # if (row_normalization == 'False') or (row_normalization == 'F') \\\n # or (row_normalization == 'false') or (row_normalization == 'f'):\n # row_normalization = False\n # else:\n # row_normalization = True\n\n # Column normalization\n col_normalization = input_col_normalize[col_normalization]\n # if (col_normalization == 'False') or (col_normalization == 'F') \\\n # or (col_normalization == 'false') or (col_normalization == 'f'):\n # col_normalization = False\n # else:\n # col_normalization = True\n\n print(\"Using:\")\n print(\"\\tgct_name =\", gct_name)\n print(\"\\tcol_distance_metric =\", col_distance_metric)\n print(\"\\toutput_distances =\", output_distances)\n print(\"\\trow_distance_metric =\", row_distance_metric)\n print(\"\\tclustering_method =\", clustering_method)\n print(\"\\toutput_base_name =\", output_base_name)\n print(\"\\trow_normalization =\", row_normalization)\n print(\"\\tcol_normalization =\", col_normalization)\n print(\"\\trow_centering =\", row_centering, \"(default: None)\")\n print(\"\\tcol_centering =\", col_centering, \"(default: None)\")\n elif arg_n == 10:\n gct_name = args[1]\n col_distance_metric = args[2]\n output_distances = args[3]\n row_distance_metric = args[4]\n clustering_method = args[5]\n output_base_name = args[6]\n row_normalization = args[7]\n col_normalization = args[8]\n row_centering = args[9]\n col_centering = None\n\n col_distance_metric = input_col_distance_dict[col_distance_metric]\n row_distance_metric = input_row_distance_dict[row_distance_metric]\n clustering_method = input_clustering_method[clustering_method]\n\n if (output_distances == 'False') or (output_distances == 'F') \\\n or (output_distances == 'false') or (output_distances == 'f'):\n output_distances = False\n else:\n output_distances = True\n\n # Row normalization\n row_normalization = input_row_normalize[row_normalization]\n # if (row_normalization == 'False') or (row_normalization == 'F') \\\n # or (row_normalization == 'false') or (row_normalization == 'f'):\n # row_normalization = False\n # else:\n # row_normalization = True\n\n # Column normalization\n col_normalization = input_col_normalize[col_normalization]\n # if (col_normalization == 'False') or (col_normalization == 'F') \\\n # or (col_normalization == 'false') or (col_normalization == 'f'):\n # col_normalization = False\n # else:\n # col_normalization = True\n\n # row_centering\n row_centering = input_row_centering[row_centering]\n if (row_centering == 'None') or (col_normalization == 'N') \\\n or (row_centering == 'none') or (col_normalization == 'n'):\n col_normalization = None\n\n print(\"Using:\")\n print(\"\\tgct_name =\", gct_name)\n print(\"\\tcol_distance_metric =\", col_distance_metric)\n print(\"\\toutput_distances =\", output_distances)\n print(\"\\trow_distance_metric =\", row_distance_metric)\n print(\"\\tclustering_method =\", clustering_method)\n print(\"\\toutput_base_name =\", output_base_name)\n print(\"\\trow_normalization =\", row_normalization)\n print(\"\\tcol_normalization =\", col_normalization)\n print(\"\\trow_centering =\", row_centering)\n print(\"\\tcol_centering =\", col_centering, \"(default: None)\")\n elif arg_n == 11:\n gct_name = args[1]\n col_distance_metric = args[2]\n output_distances = args[3]\n row_distance_metric = args[4]\n clustering_method = args[5]\n output_base_name = args[6]\n row_normalization = args[7]\n col_normalization = args[8]\n row_centering = args[9]\n col_centering = args[10]\n\n col_distance_metric = input_col_distance_dict[col_distance_metric]\n row_distance_metric = input_row_distance_dict[row_distance_metric]\n clustering_method = input_clustering_method[clustering_method]\n\n if (output_distances == 'False') or (output_distances == 'F') \\\n or (output_distances == 'false') or (output_distances == 'f'):\n output_distances = False\n else:\n output_distances = True\n\n # Row normalization\n row_normalization = input_row_normalize[row_normalization]\n # if (row_normalization == 'False') or (row_normalization == 'F') \\\n # or (row_normalization == 'false') or (row_normalization == 'f'):\n # row_normalization = False\n # else:\n # row_normalization = True\n\n # Column normalization\n col_normalization = input_col_normalize[col_normalization]\n # if (col_normalization == 'False') or (col_normalization == 'F') \\\n # or (col_normalization == 'false') or (col_normalization == 'f'):\n # col_normalization = False\n # else:\n # col_normalization = True\n\n # row_centering\n row_centering = input_row_centering[row_centering]\n if (row_centering == 'None') or (col_normalization == 'N') \\\n or (row_centering == 'none') or (col_normalization == 'n'):\n col_normalization = None\n\n # col_centering\n col_centering = input_col_centering[col_centering]\n if (col_centering == 'None') or (col_centering == 'N') \\\n or (col_centering == 'none') or (col_centering == 'n'):\n col_centering = None\n\n print(\"Using:\")\n print(\"\\tgct_name =\", gct_name)\n print(\"\\tcol_distance_metric =\", col_distance_metric)\n print(\"\\toutput_distances =\", output_distances)\n print(\"\\trow_distance_metric =\", row_distance_metric)\n print(\"\\tclustering_method =\", clustering_method)\n print(\"\\toutput_base_name =\", output_base_name)\n print(\"\\trow_normalization =\", row_normalization)\n print(\"\\tcol_normalization =\", col_normalization)\n print(\"\\trow_centering =\", row_centering)\n print(\"\\tcol_centering =\", col_centering)\n else:\n sys.exit(\"Too many inputs. This module needs only a GCT file to work, \"\n \"plus an optional input choosing between Pearson Correlation or Information Coefficient.\")\n\n print(args)\n return gct_name, col_distance_metric, output_distances, row_distance_metric, clustering_method, output_base_name, \\\n row_normalization, col_normalization, row_centering, col_centering\n\n\ndef plot_dendrogram(model, data, tree, axis, dist=mydist, clustering_method='average',\n title='no_title.png', color_threshold=None, orientation='top', **kwargs):\n # plt.clf()\n\n # modified from https://github.com/scikit-learn/scikit-learn/pull/3464/files\n # Children of hierarchical clustering\n children = model.children_\n # Distances between each pair of children\n # TODO: Fix this mydist\n # distance = dendodist(children, euclidian_similarity)\n # distance = dendodist(children, dist)\n\n og_distances = better_dendodist(children, dist, tree, data, axis=axis, clustering_method=clustering_method)\n # print(og_distances)\n # og_distances = [abs(temp) for temp in og_distances]\n\n # Turn similarity into non-negative value Scipy's dendrogram needs this\n if dist in [custom_euclidean_sim, absolute_uncentered_pearson_corr, absolute_pearson_corr]:\n # These similarities are already nonnegative [0,inf) or [0,1]\n # og_distances = og_distances\n pass\n else: # all the correlation similarities [-1,-1]\n og_distances = [temp + 1 for temp in og_distances]\n\n # Now that all similarities are nonnegative, we turn them into a distance for plotting purposes\n og_distances = [1 / temp for temp in og_distances]\n\n # print(og_distances)\n distance = np.cumsum(og_distances)\n # distance = og_distances\n # distance = better_dendodist(children, dist, tree, data, axis=axis)\n\n # norm_distances = []\n # for value in distance:\n # norm_distances.append(1/value)\n # norm_distances = distance\n\n list_of_children = list(get_children(tree, leaves_are_self_children=False).values())\n no_of_observations = [len(i) for i in list_of_children if i]\n no_of_observations.append(len(no_of_observations) + 1)\n # print(len(no_of_observations))\n\n # print(children)\n\n # print(list(tree.values()))\n\n # print(norm_distances)\n\n # print(distance)\n if all(value == 0 for value in distance):\n # If all distances are zero, then use uniform distance\n distance = np.arange(len(distance))\n\n # print(distance)\n # print(np.cumsum(distance))\n\n # The number of observations contained in each cluster level\n # no_of_observations = np.arange(2, children.shape[0]+2)\n # print(no_of_observations)\n\n\n # Create linkage matrix and then plot the dendrogram\n # linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)\n # linkage_matrix = np.column_stack([children, np.cumsum(distance), no_of_observations]).astype(float)\n linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)\n # linkage_matrix = np.column_stack([children, norm_distances, no_of_observations]).astype(float)\n # print(linkage_matrix)\n # Plot the corresponding dendrogram\n\n # print(scipy.cluster.hierarchy.cut_tree(linkage_matrix, n_clusters=5))\n # print(color_threshold)\n\n # find what the height at which to cut the dendrogram\n if color_threshold is not None:\n if color_threshold == 1:\n color_threshold = 2\n if color_threshold > (len(linkage_matrix) + 1):\n color_threshold = (len(linkage_matrix) + 1)\n # print('Finding the right cut')\n color_threshold = linkage_matrix[-(color_threshold - 1)][2] - np.finfo(float).eps\n # color_threshold = linkage_matrix[-(color_threshold - 1)][2] + 10*np.finfo(float).eps # Adding more wiggle room\n # print(color_threshold)\n\n R = dendrogram(linkage_matrix, color_threshold=color_threshold, orientation=orientation, **kwargs)\n # R = dendrogram(linkage_matrix, **kwargs)\n # [label.set_rotation(90) for label in plt.gca().get_xticklabels()]\n order_of_columns = R['ivl']\n # # print(order_of_columns)\n # plt.gca().get_yaxis().set_visible(False)\n # plt.savefig(title, dpi=300)\n # plt.show()\n\n # n = len(linkage_matrix) + 1\n # cache = dict()\n # for k in range(len(linkage_matrix)):\n # c1, c2 = int(linkage_matrix[k][0]), int(linkage_matrix[k][1])\n # c1 = [c1] if c1 < n else cache.pop(c1)\n # c2 = [c2] if c2 < n else cache.pop(c2)\n # cache[n + k] = c1 + c2\n # order_of_columns = cache[2 * len(linkage_matrix)]\n\n # print(order_of_columns)\n # print(linkage_matrix)\n # print(\"---\")\n # print(no_of_observations)\n # print(\"---\")\n # print(list_of_children)\n # print(\"---\")\n #\n # print(len(order_of_columns))\n # print(color_threshold)\n # clusters2idxs, idxs2clusters = get_cluster_classes(R)\n #\n # print(clusters2idxs)\n # print(idxs2clusters)\n # print(\"---\")\n # print(get_children(tree, leaves_are_self_children=False))\n # print(\"---\")\n # print(get_children(tree, leaves_are_self_children=False, only_leaves_are_children=False))\n\n\n return order_of_columns, linkage_matrix\n\n\ndef get_clusters(tree):\n return\n\n\n\ndef get_cluster_classes(den, label='ivl'):\n # from http://www.nxn.se/valent/extract-cluster-elements-by-color-in-python\n clusters2idxs = defaultdict(list)\n idxs2clusters = {}\n # for c, pi in zip(den['color_list'], den['icoord']):\n # for leg in pi[1:3]:\n # i = (leg - 5.0) / 10.0\n # if abs(i - int(i)) < 1e-5:\n # clusters2idxs[c].append(int(i))\n # idxs2clusters[int(i)] = c\n # # print(c, i)\n\n # cluster_classes = Clusters()\n # for c, l in cluster_idxs.items():\n # i_l = [den[label][i] for i in l]\n # cluster_classes[c] = i_l\n\n # Trying something new:\n print(den.keys())\n print(len(den['icoord']))\n print(len(den['dcoord']))\n print(len(den['ivl']))\n print(len(den['leaves']))\n print(den['leaves'])\n print(len(den['color_list']))\n print(den['color_list'])\n\n return clusters2idxs, idxs2clusters\n\n\ndef order_leaves(model, data, tree, labels, axis=0, dist=mydist, reverse=False):\n # Adapted from here: https://stackoverflow.com/questions/12572436/calculate-ordering-of-dendrogram-leaves\n\n children = model.children_\n # distance = better_dendodist(children, dist, tree, data, axis=axis)\n # if all(value == 0 for value in distance):\n # distance = np.arange(len(distance))\n\n # list_of_children = list(get_children(tree, leaves_are_self_children=False).values())\n # no_of_observations = [len(i) for i in list_of_children if i]\n # no_of_observations.append(len(no_of_observations)+1)\n\n # Create linkage matrix and then plot the dendrogram\n # linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)\n pseudo_linkage_matrix = np.column_stack([children]).astype(float)\n\n n = len(pseudo_linkage_matrix) + 1\n\n # This orders leaves by number of clusters\n cache = dict()\n for k in range(len(pseudo_linkage_matrix)):\n c1, c2 = int(pseudo_linkage_matrix[k][0]), int(pseudo_linkage_matrix[k][1])\n c1 = [c1] if c1 < n else cache.pop(c1)\n c2 = [c2] if c2 < n else cache.pop(c2)\n cache[n + k] = c1 + c2\n numeric_order_of_leaves = cache[2 * len(pseudo_linkage_matrix)]\n\n if reverse:\n numeric_order_of_leaves = list(reversed(numeric_order_of_leaves))\n\n return [labels[i] for i in numeric_order_of_leaves]\n\n\ndef two_plot_two_dendrogram(model, dist=mydist, **kwargs):\n # modified from https://github.com/scikit-learn/scikit-learn/pull/3464/files\n # Children of hierarchical clustering\n children = model.children_\n # Distances between each pair of children\n distance = dendodist(children, dist)\n if all(value == 0 for value in distance):\n # If all distances are zero, then use uniform distance\n distance = np.arange(len(distance))\n\n # The number of observations contained in each cluster level\n no_of_observations = np.arange(2, children.shape[0] + 2)\n # Create linkage matrix and then plot the dendrogram\n linkage_matrix = np.column_stack([children, distance, no_of_observations]).astype(float)\n # Plot the corresponding dendrogram\n R = dendrogram(linkage_matrix, color_threshold=0, orientation='left', **kwargs)\n # [label.set_rotation(90) for label in plt.gca().get_xticklabels()]\n order_of_rows = R['ivl']\n # print(order_of_columns)\n plt.gca().get_xaxis().set_visible(False)\n\n return list(reversed(order_of_rows))\n\n\ndef my_affinity_generic(M, metric):\n return np.array([np.array([metric(a, b) for a in M]) for b in M])\n\n\ndef my_affinity_i(M):\n return np.array([[information_coefficient_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_ai(M):\n return np.array([[absolute_information_coefficient_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_p(M):\n return np.array([[custom_pearson_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_s(M):\n return np.array([[custom_spearman_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_k(M):\n return np.array([[custom_kendall_tau_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_ap(M):\n return np.array([[absolute_pearson_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_u(M):\n return np.array([[uncentered_pearson_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_au(M):\n return np.array([[absolute_uncentered_pearson_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_l1(M):\n return np.array([[custom_manhattan_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_l2(M):\n return np.array([[custom_euclidean_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_m(M):\n return np.array([[custom_manhattan_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_c(M):\n return np.array([[custom_cosine_dist(a, b) for a in M] for b in M])\n\n\ndef my_affinity_e(M):\n # global dist_matrix\n # dist_matrix = np.array([[mydist(a, b) for a in M]for b in M])\n # return dist_matrix\n return np.array([[custom_euclidean_dist(a, b) for a in M] for b in M])\n\n\ndef count_diff(x):\n count = 0\n compare = x[0]\n for i in x:\n if i != compare:\n count += 1\n return count\n\n\ndef count_mislabels(labels, true_labels):\n # 2017-08-17: I will make the assumption that clusters have only 2 values.\n # clusters = np.unique(true_labels)\n # mislabels = 0\n # for curr_clust in clusters:\n # print(\"for label\", curr_clust)\n # print(\"\\t\", labels[(true_labels == curr_clust)])\n # compare_to = mode(labels[(true_labels == curr_clust)])\n # print(\"\\tcompare to:\", compare_to, \"mislables: \", np.count_nonzero(labels[(true_labels == curr_clust)] != compare_to))\n # mislabels += np.count_nonzero(labels[(true_labels == curr_clust)] != compare_to)\n\n set_a = labels[true_labels == 0]\n set_b = labels[true_labels == 1]\n\n if len(set_a) <= len(set_b):\n shorter = set_a\n longer = set_b\n else:\n shorter = set_b\n longer = set_a\n\n long_mode = mode(longer) # this what the label of the longer cluster should be.\n short_mode = 1 if long_mode == 0 else 0 # Choose the other value for the label of the shorter cluster\n\n # start with the longer vector:\n # print(\"The long set is\", longer, \"it has\", np.count_nonzero(longer != long_mode), 'mislabels.')\n # print(\"The short set is\", shorter, \"it has\", np.count_nonzero(shorter != short_mode), 'mislabels.')\n\n # np.count_nonzero(longer != long_mode) + np.count_nonzero(shorter != short_mode)\n\n return np.count_nonzero(longer != long_mode) + np.count_nonzero(shorter != short_mode)\n\n\ndef plot_heatmap(df, col_order, row_order, top=5, title_text='differentially expressed genes per phenotype'):\n if not (len(col_order), len(list(df))):\n exit(\"Number of columns in dataframe do not match the columns provided for ordering.\")\n if not (len(row_order), len(df)):\n exit(\"Number of rows in dataframe do not match the columns provided for ordering.\")\n # print(list(df), col_order)\n df = df[col_order]\n df = df.reindex(row_order)\n\n plt.clf()\n sns.heatmap(df.iloc[np.r_[0:top, -top:0], :], cmap='viridis')\n plt.yticks(rotation=0)\n plt.xticks(rotation=90)\n plt.title('Top {} {}'.format(top, title_text))\n plt.ylabel('Genes')\n plt.xlabel('Sample')\n plt.savefig('heatmap.png', dpi=300, bbox_inches=\"tight\")\n\n\ndef parse_data(gct_name, row_normalization=False, col_normalization=False, row_centering=None, col_centering=None):\n # if validators.url(gct_name):\n # urlfile, __ = urllib.request.urlretrieve(gct_name)\n # else:\n # urlfile = gct_name\n # f = open(urlfile)\n # f.readline()\n # size = f.readline().strip('\\n').split('\\t')\n\n try:\n data_df = pd.read_csv(gct_name, sep='\\t', skiprows=2)\n except ValueError:\n data_df = gct_name\n # print(size)\n # print(list(data_df))\n # exit(data_df.shape)\n\n if data_df.index.name is 'Name':\n data_df['Name'] = data_df.index\n else:\n if 'Name' not in list(data_df):\n data_df['Name'] = data_df.iloc[:, 0]\n data_df.drop(data_df.columns[0], axis=1, inplace=True)\n\n if 'Description' not in list(data_df):\n data_df['Description'] = data_df['Name']\n\n data_df.set_index(data_df['Name'], inplace=True)\n og_full_gct = data_df.copy()\n og_full_gct.drop(['Name'], axis=1, inplace=True)\n data_df.drop(['Name', 'Description'], axis=1, inplace=True)\n plot_labels = list(og_full_gct.drop(['Description'], axis=1, inplace=False))\n data = data_df.as_matrix()\n row_labels = data_df.index.values\n\n og_data = data.copy()\n\n # if row_centering is not None:\n # if row_centering == 'Mean':\n # row_means = np.mean(data, axis=1)\n # row_means_col_vec = row_means.reshape((data.shape[0], 1))\n # data = data - row_means_col_vec\n # if row_centering == 'Median':\n # row_medians = np.median(data, axis=1)\n # row_medians_col_vec = row_medians.reshape((data.shape[0], 1))\n # data = data - row_medians_col_vec\n #\n # if row_normalization:\n # row_norm = np.sum(data * data, axis=1)\n # row_norm_col_vec = row_norm.reshape((data.shape[0], 1))\n # data = data / np.sqrt(row_norm_col_vec)\n #\n # if col_centering is not None:\n # if col_centering == 'Mean':\n # col_means = np.mean(data, axis=0)\n # data = data - col_means\n # if col_centering == 'Median':\n # col_medians = np.median(data, axis=0)\n # data = data - col_medians\n #\n # if col_normalization:\n # col_norm = np.sum(data*data, axis=0)\n # data = data/np.sqrt(col_norm)\n\n data = normalize_dataframe(data_df, log_normalize=None,\n row_centering=row_centering, row_normalization=row_normalization,\n col_centering=col_centering, col_normalization=col_normalization).as_matrix()\n\n # print(data_df)\n # print(data)\n new_data_df = pd.DataFrame(data=data, index=data_df.index, columns=list(data_df))\n # print(new_data_df)\n # print(og_full_gct)\n new_full_gct = new_data_df.copy()\n new_full_gct.insert(0, column='Description', value=og_full_gct['Description'])\n # print(new_full_gct)\n # exit()\n\n return og_data, data_df, data, new_data_df, plot_labels, row_labels, og_full_gct, new_full_gct\n\n\nstr2func = {\n 'custom_euclidean': my_affinity_e,\n 'uncentered_pearson': my_affinity_u,\n 'absolute_uncentered_pearson': my_affinity_au,\n 'information_coefficient': my_affinity_i,\n 'pearson': my_affinity_p,\n 'spearman': my_affinity_s,\n 'kendall': my_affinity_k,\n 'absolute_pearson': my_affinity_ap,\n 'l1': 'l1',\n 'l2': 'l2',\n 'manhattan': 'manhattan',\n 'cosine': 'cosine',\n 'euclidean': 'euclidean',\n}\n\nstr2affinity_func = {\n 'custom_euclidean': my_affinity_e,\n 'uncentered_pearson': my_affinity_u,\n 'absolute_uncentered_pearson': my_affinity_au,\n 'information_coefficient': my_affinity_i,\n 'pearson': my_affinity_p,\n 'spearman': my_affinity_s,\n 'kendall': my_affinity_k,\n 'absolute_pearson': my_affinity_ap,\n 'l1': my_affinity_l1,\n 'l2': my_affinity_l2,\n 'manhattan': my_affinity_m,\n 'cosine': my_affinity_c,\n 'euclidean': my_affinity_e,\n}\n\nstr2dist = {\n 'custom_euclidean': custom_euclidean_dist,\n 'uncentered_pearson': uncentered_pearson_dist,\n 'absolute_uncentered_pearson': absolute_uncentered_pearson_dist,\n 'information_coefficient': information_coefficient_dist,\n 'pearson': custom_pearson_dist,\n 'spearman': custom_spearman_dist,\n 'kendall': custom_kendall_tau_dist,\n 'absolute_pearson': absolute_pearson_dist,\n 'l1': custom_manhattan_dist,\n 'l2': custom_euclidean_dist,\n 'manhattan': custom_manhattan_dist,\n 'cosine': custom_cosine_dist,\n 'euclidean': custom_euclidean_dist,\n}\n\nstr2similarity = {\n 'custom_euclidean': custom_euclidean_sim,\n 'uncentered_pearson': uncentered_pearson_corr,\n 'absolute_uncentered_pearson': absolute_uncentered_pearson_corr,\n 'information_coefficient': information_coefficient,\n 'pearson': custom_pearson_corr,\n 'spearman': custom_spearman_corr,\n 'kendall': custom_kendall_tau_corr,\n 'absolute_pearson': absolute_pearson_corr,\n 'l1': custom_manhattan_sim,\n 'l2': custom_euclidean_sim,\n 'manhattan': custom_manhattan_sim,\n 'cosine': custom_cosine_sim,\n # 'euclidean': pairwise.paired_euclidean_distances,\n 'euclidean': custom_euclidean_sim,\n # 'euclidean': custom_euclidean_dist,\n}\n\nlinkage_dic = {\n 'Pairwise average-linkage': 'average',\n 'Pairwise complete-linkage': 'complete',\n 'Pairwise ward-linkage': 'ward',\n 'average': 'average',\n 'complete': 'complete',\n 'ward': 'ward',\n}\n\n\ndef make_tree(model, data=None):\n \"\"\"\n Modified from:\n https://stackoverflow.com/questions/27386641/how-to-traverse-a-tree-from-sklearn-agglomerativeclustering\n import numpy as np\n from sklearn.cluster import AgglomerativeClustering\n import itertools\n\n X = np.concatenate([np.random.randn(3, 10), np.random.randn(2, 10) + 100])\n model = AgglomerativeClustering(linkage=\"average\", affinity=\"cosine\")\n model.fit(X)\n\n ii = itertools.count(X.shape[0])\n [{'node_id': next(ii), 'left': x[0], 'right':x[1]} for x in model.children_]\n\n ---\n\n You can also do dict(enumerate(model.children_, model.n_leaves_))\n which will give you a dictionary where the each key is the ID of a node\n and the value is the pair of IDs of its children. – user76284\n\n :param model:\n :return: a dictionary where the each key is the ID of a node and the value is the pair of IDs of its children.\n \"\"\"\n # ii = itertools.count(data.shape[0]) # Setting the counter at the number of leaves.\n # tree = [{'node_id': next(ii), 'left': x[0], 'right':x[1]} for x in model.children_]\n # print(tree)\n # return tree\n\n return dict(enumerate(model.children_, model.n_leaves_))\n # return dict(enumerate(model.children_, 1))\n\n\ndef make_cdt(data, order_of_columns, order_of_rows, name='test.cdt', atr_companion=True, gtr_companion=False):\n # TODO: if order_of_columns == None, then do arange(len(list(data)))\n # TODO: if order_of_rows == None, then do arange(len(list(data)))\n # exit(data.to_csv())\n data.index.name = \"ID\"\n data.rename(columns={'Description': 'Name'}, inplace=True)\n\n temp = np.ones(len(data))\n data.insert(loc=1, column='GWEIGHT', value=temp) # adding an extra column\n\n # These three lines add a row\n data.loc['EWEIGHT'] = list(np.ones(len(list(data))))\n newIndex = ['EWEIGHT'] + [ind for ind in data.index if ind != 'EWEIGHT']\n data = data.reindex(index=newIndex)\n\n if atr_companion:\n new_AID = ['', '']\n for element in range(len(order_of_columns)):\n temp = 'ARRY' + str(element) + 'X'\n new_AID.append(temp)\n\n data.loc['AID'] = new_AID\n newIndex = ['AID'] + [ind for ind in data.index if ind != 'AID']\n data = data.reindex(index=newIndex)\n data = data[['Name', 'GWEIGHT'] + order_of_columns]\n if gtr_companion:\n new_GID = ['']\n if atr_companion:\n new_GID = ['AID', 'EWEIGHT'] # This is to make sure we fit the CDT format\n # for element in np.sort(np.unique(GID)):\n # if 'NODE' in element:\n # # print(element, 'GTR delete')\n # pass\n # else:\n # new_GID.append(element)\n for element in range(len(order_of_rows)):\n temp = 'GENE' + str(element) + 'X'\n new_GID.append(temp)\n\n data.insert(loc=0, column='GID', value=new_GID) # adding an extra column\n data.insert(loc=0, column=data.index.name, value=data.index) # Making the index a column\n\n # reorder to match dendogram\n temp = ['AID', 'EWEIGHT'] + order_of_rows\n # data = data.loc[temp]\n # print(data['GID'])\n data = data.reindex(temp)\n # print(data['GID'])\n\n # print(list(data.index))\n # print(data['GID'])\n # print(data['Name'])\n\n # Making the 'GID' the index -- for printing purposes\n data.index = data['GID']\n data.index.name = 'GID'\n data.drop(['GID'], axis=1, inplace=True)\n # print(list(data.index))\n\n # The first three lines need to be written separately due to a quirk in the CDT file format:\n\n # print(data.to_csv(sep='\\t', index=True, header=True))\n f = open(name, 'w')\n f.write(data.to_csv(sep='\\t', index=True, header=True))\n # f.write(data.to_csv(sep='\\t', index=True, header=True))\n f.close()\n # pd.options.display.float_format = '{:3.3f}'.format\n data = data.round(2)\n # print(data.to_csv())\n # exit()\n # exit(data.to_csv(sep=' ', index=True, header=True, float_format='2',))\n return\n\n\ndef make_atr(col_tree_dic, data, dist, clustering_method='average', file_name='test.atr'):\n max_val = len(col_tree_dic)\n # AID = []\n\n # compute distances\n distance_dic = {}\n for node, children in col_tree_dic.items():\n val = centroid_distances(children[0], children[1], tree=col_tree_dic, data=data, axis=1,\n distance=dist, clustering_method=clustering_method)\n # print(dist, children, val)\n # print(\"Value is\", val)\n distance_dic[node] = val\n\n # if dist == custom_euclidean_sim:\n # print(\"Euclidean distance is especial, normalizing using this scheme:\")\n # low_norm = min(distance_dic.values())\n # high_norm = max(distance_dic.values())\n # for key in distance_dic.keys():\n # # distance -= norm\n # # distance_dic[key] = distance_dic[key]/high_norm\n # # distance_dic[key] = (distance_dic[key]-low_norm)/high_norm\n # # distance_dic[key] = distance_dic[key]/high_norm\n # # distance_dic[key] = ((1/distance_dic[key])-high_norm)/low_norm\n # print(distance_dic[key])\n\n f = open(file_name, 'w')\n for node, children in col_tree_dic.items():\n elements = [translate_tree(node, max_val, 'atr'), translate_tree(children[0], max_val, 'atr'),\n translate_tree(children[1], max_val, 'atr'),\n \"{num:.{width}f}\".format(num=distance_dic[node], width=SIGNIFICANT_DIGITS)]\n # print('\\t', '\\t'.join(elements))\n # AID.append(translate_tree(children[0], max_val, 'atr'))\n # AID.append(translate_tree(children[1], max_val, 'atr'))\n f.write('\\t'.join(elements) + '\\n')\n # print('\\t'.join(elements) + '\\n')\n f.close()\n\n return\n\n\ndef make_gtr(row_tree_dic, data, dist, clustering_method='average', file_name='test.gtr'):\n max_val = len(row_tree_dic)\n # GID = []\n\n # compute distances\n distance_dic = {}\n for node, children in row_tree_dic.items():\n val = centroid_distances(children[0], children[1], tree=row_tree_dic, data=data, axis=0,\n distance=dist, clustering_method=clustering_method)\n distance_dic[node] = val\n\n f = open(file_name, 'w')\n for node, children in row_tree_dic.items():\n elements = [translate_tree(node, max_val, 'gtr'), translate_tree(children[0], max_val, 'gtr'),\n translate_tree(children[1], max_val, 'gtr'),\n \"{num:.{width}f}\".format(num=distance_dic[node], width=SIGNIFICANT_DIGITS)]\n # GID.append(translate_tree(children[0], max_val, 'gtr'))\n # GID.append(translate_tree(children[1], max_val, 'gtr'))\n f.write('\\t'.join(elements) + '\\n')\n # val -= 1\n f.close()\n\n return\n\n\ndef translate_tree(what, length, g_or_a):\n if 'a' in g_or_a:\n if what <= length:\n translation = 'ARRY' + str(what) + 'X'\n else:\n translation = 'NODE' + str(what - length) + 'X'\n elif 'g' in g_or_a:\n if what <= length:\n translation = 'GENE' + str(what) + 'X'\n else:\n translation = 'NODE' + str(what - length) + 'X'\n else:\n translation = []\n print('This function does not support g_or_a=', g_or_a)\n return translation\n\n\n# def get_children_recursively(k, model, node_dict, leaf_count, n_samples, data, verbose=False, left=None, right=None):\n# # print(k)\n# i, j = model.children_[k]\n#\n# if k in node_dict:\n# return node_dict[k]['children']\n#\n# if i < leaf_count:\n# # print(\"i if\")\n# left = [i]\n# else:\n# # print(\"i else\")\n# # read the AgglomerativeClustering doc. to see why I select i-n_samples\n# left, node_dict = get_children_recursively(i - n_samples, model, node_dict,\n# leaf_count, n_samples, data, verbose, left, right)\n#\n# if j < leaf_count:\n# # print(\"j if\")\n# right = [j]\n# else:\n# # print(\"j else\")\n# right, node_dict = get_children_recursively(j - n_samples, model, node_dict,\n# leaf_count, n_samples, data, verbose, left, right)\n#\n# if verbose:\n# print(k, i, j, left, right)\n# temp = map(lambda ii: data[ii], left)\n# left_pos = np.mean(list(temp), axis=0)\n# temp = map(lambda ii: data[ii], right)\n# right_pos = np.mean(list(temp), axis=0)\n#\n# # this assumes that agg_cluster used euclidean distances\n# dist = metrics.pairwise_distances([left_pos, right_pos], metric='euclidean')[0, 1]\n#\n# all_children = [x for y in [left, right] for x in y]\n# pos = np.mean(list(map(lambda ii: data[ii], all_children)), axis=0)\n#\n# # store the results to speed up any additional or recursive evaluations\n# node_dict[k] = {'top_child': [i, j], 'children': all_children, 'pos': pos, 'dist': dist,\n# 'node_i': k + n_samples}\n# return all_children, node_dict\n\n# def recursive_atr\n\n\ndef get_children(tree, leaves_are_self_children=False):\n # this is a recursive function\n expanded_tree = {}\n for node in range(max(tree.keys())):\n if node <= len(tree):\n if leaves_are_self_children:\n expanded_tree[node] = [node]\n else:\n expanded_tree[node] = []\n\n else:\n # expanded_tree[node] = list_children_single_node(node, tree)\n expanded_tree[node] = list_children_single_node(node, tree, leaves_are_self_children)\n\n return expanded_tree\n\n\ndef list_children_single_node(node, tree, leaves_are_self_children=False, only_leaves_are_children=True):\n # children = []\n if node <= len(tree):\n if leaves_are_self_children:\n children = [node]\n else:\n children = []\n\n else:\n children = list(tree[node])\n\n # Check each child, and add their children to the list\n for child in children:\n if child <= len(tree):\n pass\n else:\n children += list_children_single_node(child, tree, only_leaves_are_children=True)\n if only_leaves_are_children:\n # print(sorted(np.unique(i for i in children if i <= len(tree))))\n # print()\n return [i for i in sorted(np.unique(children)) if i <= len(tree)]\n else:\n return sorted(np.unique(children))\n\n\ndef centroid_distances(node_a, node_b, tree, data, axis=0, distance=mydist, clustering_method='average'):\n if axis == 0:\n pass\n elif axis == 1:\n data = np.transpose(data)\n else:\n exit(\"Variable 'data' does not have that many axises (╯°□°)╯︵ ┻━┻\")\n\n children_of_a = list_children_single_node(node_a, tree=tree, leaves_are_self_children=True)\n children_of_b = list_children_single_node(node_b, tree=tree, leaves_are_self_children=True)\n\n # if distance == custom_euclidean_sim:\n # print(\"Euclidean distance is especial, normalizing using this scheme:\")\n # distance = custom_euclidean_dist\n\n distances_list = []\n if clustering_method == 'average':\n for pair in itertools.product(data[children_of_a], data[children_of_b]):\n distances_list.append(distance(pair[0], pair[1]))\n return np.average(distances_list)\n elif clustering_method == 'complete':\n for pair in itertools.product(data[children_of_a], data[children_of_b]):\n distances_list.append(distance(pair[0], pair[1]))\n return np.min(distances_list)\n else:\n exit(\"Ony 'average' and 'complete' clustering methods are accepted at the moment (>_<)\")\n\n\ndef euclidian_similarity(x, y):\n dist = mydist(x, y)\n # return 1/(1+dist)\n return 1 / (np.exp(dist))\n\n\ndef better_dendodist(children, distance, tree, data, axis, clustering_method='average'):\n distances_list = []\n for pair in children:\n distances_list.append(centroid_distances(pair[0], pair[1], tree, data, axis, distance=distance,\n clustering_method=clustering_method))\n # print(distance, pair, distances_list[-1])\n return distances_list\n\n\ndef HierarchicalClustering(pwd: \"The current directory\",\n gct_name: \"Gene expression data filename (.gct file) or Pandas DataFrame \"\n \"where rows are genes and columns are samples\",\n col_distance_metric: \"The function to be used when comparing the distance/similarity of \"\n \"the columns in the gct_name dataset\",\n row_distance_metric: \"The function to be used when comparing the distance/similarity of \"\n \"the rows in the gct_name dataset\",\n clustering_method: \"Type of linkage to use\" = 'average',\n output_base_name: \"Base name for output file\" = 'HC_output',\n row_normalization: \"Whether to normalize each row (gene) in the data\" = False,\n col_normalization: \"Whether to normalize each column (sample) in the data\" = False,\n row_centering: \"How to center each row (gene) in the data\" = 'Mean',\n col_centering: \"How to center each column (sample) in the data\" = 'Mean',\n output_distances: \"Whether or not output the pair-wise distance matrix. \"\n \"If true, the distance between each column will be called, \"\n \"which can be very computationally intensive. \"\n \"If unsure, leave as False.\" = False,\n custom_plot: \"Plot the dendrograms by Genes, Samples, or Both\" = 'Both',\n clusters_to_highlight: \"How many clusters to highlight in the dendrogram\" = 2,\n show: \"Whether to show the plot at the end\" = False):\n \"\"\"\n This function performs hierarchical clustering to group samples (columns) with similar phenotypes\n and/or genes (rows) with similar expression profiles.\n :param pwd: The current directory\n :param gct_name: Gene expression data filename (.gct file) or Pandas DataFrame where rows are genes and\n columns are samples\n :param col_distance_metric: The function to be used when comparing the distance/similarity of\n the columns in the gct_name dataset\n :param row_distance_metric: The function to be used when comparing the distance/similarity of\n the rows in the gct_name dataset\n :param clustering_method: Type of linkage to use\n :param output_base_name: Base name for output file\n :param row_normalization: Whether to normalize each row (gene) in the data\n :param col_normalization: Whether to normalize each column (sample) in the data\n :param row_centering: How to center each row (gene) in the data\n :param col_centering: How to center each column (sample) in the data\n :param output_distances: Whether or not output the pair-wise distance matrix.\n If true, the distance between each column will be called,\n which can be very computationally intensive.\n If unsure, leave as False\n :param custom_plot: Plot the dendrograms by Genes, Samples, or Both\n :param clusters_to_highlight: How many clusters to highlight in the dendrogram\n :param show: Whether to show the plot at the end\n :return:\n \"\"\"\n\n # gct_name, col_distance_metric, output_distances, row_distance_metric, clustering_method, output_base_name, \\\n # row_normalization, col_normalization, row_centering, col_centering = parse_inputs(sys.argv)\n\n if col_distance_metric == \"No_column_clustering\":\n custom_plot = 'Genes'\n if row_distance_metric == \"No_row_clustering\":\n custom_plot = 'Samples'\n\n og_data, og_data_df, data, data_df, col_labels, row_labels, og_full_gct, new_full_gct = \\\n parse_data(gct_name, row_normalization, col_normalization, row_centering, col_centering)\n order_of_columns = list(data_df)\n order_of_rows = list(data_df.index)\n\n data_transpose = np.transpose(data)\n\n # print(data)\n # print(data_df)\n\n atr_companion = False\n col_model = None\n col_tree = None\n\n gtr_companion = False\n row_model = None\n row_tree = None\n\n AID = None\n GID = None\n\n if col_distance_metric != 'No_column_clustering':\n atr_companion = True\n col_model = AgglomerativeClustering(linkage=linkage_dic[clustering_method], n_clusters=clusters_to_highlight,\n affinity=str2func[col_distance_metric])\n\n col_model.fit(data_transpose)\n col_tree = make_tree(col_model)\n order_of_columns = order_leaves(col_model, tree=col_tree, data=data_transpose,\n dist=str2similarity[col_distance_metric], labels=col_labels, reverse=True)\n\n path_to_atr = output_base_name + '.atr'\n make_atr(col_tree, file_name=path_to_atr, data=data,\n dist=str2similarity[col_distance_metric], clustering_method=linkage_dic[clustering_method])\n\n if row_distance_metric != 'No_row_clustering':\n gtr_companion = True\n row_model = AgglomerativeClustering(linkage=linkage_dic[clustering_method], n_clusters=clusters_to_highlight,\n affinity=str2func[row_distance_metric])\n # y_col = row_model.fit_predict(np.transpose(data))\n # print(y_col)\n row_model.fit(data)\n row_tree = make_tree(row_model)\n order_of_rows = order_leaves(row_model, tree=row_tree, data=data,\n dist=str2similarity[row_distance_metric], labels=row_labels)\n path_to_gtr = output_base_name + '.gtr'\n make_gtr(row_tree, data=data, file_name=output_base_name + '.gtr', dist=str2similarity[row_distance_metric])\n\n if output_distances:\n # TODO: check which col or row was selected, or both\n row_distance_matrix = str2affinity_func[row_distance_metric](data)\n # col_distance_matrix = str2affinity_func[col_distance_metric](np.transpose(data))\n dist_file = open(output_base_name + '_pairwise_distances.csv', 'w')\n dist_file.write('labels,')\n dist_file.write(\",\".join(col_model.labels_.astype(str)) + \"\\n\")\n dist_file.write('samples,')\n dist_file.write(\",\".join(list(data_df)) + \"\\n\")\n i = 0\n for row in row_distance_matrix:\n dist_file.write('distances row=' + str(i) + \",\" + \",\".join(row.astype(str)) + \"\\n\")\n i += 1\n\n path_to_cdt = output_base_name + '.cdt'\n make_cdt(data=new_full_gct, name=path_to_cdt, atr_companion=atr_companion,\n gtr_companion=gtr_companion,\n order_of_columns=order_of_columns, order_of_rows=order_of_rows)\n\n if custom_plot == 'Samples':\n # Plotting the heatmap with dendrogram\n plt.clf()\n # fig = plt.figure(figsize=(16, 9), dpi=300)\n fig = plt.figure(figsize=(16, 9))\n gs = gridspec.GridSpec(2, 1, height_ratios=[1, 5])\n gs.update(wspace=0.0, hspace=0.0)\n ax0 = plt.subplot(gs[0]) # Doing dendrogram first\n ax0.axis('off')\n\n col_order, link = plot_dendrogram(col_model, data, col_tree, axis=1,\n dist=str2similarity[col_distance_metric],\n clustering_method=clustering_method,\n color_threshold=clusters_to_highlight,\n title='no_title.png', orientation='top')\n col_order = [int(i) for i in col_order]\n\n # print(col_order)\n named_col_order = [col_labels[i] for i in col_order]\n # print(named_col_order)\n # print(col_order)\n # print(col_model.labels_)\n\n ax1 = plt.subplot(gs[1])\n\n # Row-normalizing for display purposes only:\n data_df = data_df.subtract(data_df.min(axis=1), axis=0)\n data_df = data_df.div(data_df.max(axis=1), axis=0)\n\n sns.heatmap(data_df[named_col_order], ax=ax1, cbar=False, cmap='bwr')\n # ax1.xaxis.tick_top()\n [label.set_rotation(90) for label in ax1.get_xticklabels()]\n file_path_plot = output_base_name + '.pdf'\n plt.savefig(file_path_plot, bbox_inches='tight')\n\n print(\"----------------------------------------------------------------------\")\n print(\"The PDF of this heatmap can be downloaded here:\")\n display(HTML('<a href=\"' + file_path_plot + '\" target=\"_blank\">PDF of the heatmap</a>'))\n print(\"----------------------------------------------------------------------\")\n print(\"The CDF which is compatible with HierarchicalClusteringViewer is here:\")\n display(HTML('<a href=\"' + path_to_cdt + '\" target=\"_blank\">TXT containing the output data</a>'))\n print(\"----------------------------------------------------------------------\")\n print(\"The ATR which is compatible with HierarchicalClusteringViewer is here:\")\n display(HTML('<a href=\"' + path_to_atr + '\" target=\"_blank\">TXT containing the output data</a>'))\n print(\"----------------------------------------------------------------------\")\n\n if show:\n # plt.show()\n pass\n\n # col_order = [int(i) for i in col_order]\n # print(col_order)\n # named_col_order = [col_labels[i] for i in col_order]\n # print(named_col_order)\n # print(col_order)\n # print(idxs2clusters)\n cls_list = col_model.labels_\n # for i in range(len(col_order)):\n # cls_list.append(idxs2clusters[i])\n # print(cls_list)\n # order_by = [col_order.index(i) for i in range(len(col_order))]\n # list2intlist(cls_list, custom_order=order_by)\n # in_list = np.array(cls_list)\n # print(cls_list)\n # print(np.array(list2intlist(cls_list, custom_order=order_by)))\n\n list2cls(np.array(list2intlist(cls_list)), name_of_out=output_base_name+'.cls', sep=' ')\n\n\n if custom_plot == 'Genes':\n # Plotting the heatmap with dendrogram\n plt.clf()\n # fig = plt.figure(figsize=(16, 9), dpi=300)\n fig = plt.figure(figsize=(16, 9))\n gs = gridspec.GridSpec(1, 2, width_ratios=[5, 1])\n gs.update(wspace=0.0, hspace=0.0)\n ax0 = plt.subplot(gs[1]) # Doing dendrogram first\n ax0.axis('off')\n\n row_order, link = plot_dendrogram(row_model, data_transpose, row_tree, axis=1,\n dist=str2similarity[row_distance_metric],\n clustering_method=clustering_method,\n color_threshold=clusters_to_highlight,\n orientation='right', title='no_title.png')\n # row_order = [int(i) for i in row_order]\n\n # named_row_order = [row_labels[i] for i in row_order]\n\n ax1 = plt.subplot(gs[0])\n\n # Row-normalizing for display purposes only:\n data_df = data_df.subtract(data_df.min(axis=1), axis=0)\n data_df = data_df.div(data_df.max(axis=1), axis=0)\n\n sns.heatmap(data_df.iloc[row_order], ax=ax1, cbar=False, cmap='bwr')\n # ax1.xaxis.tick_top()\n [label.set_rotation(90) for label in ax1.get_xticklabels()]\n file_path_plot = output_base_name + '.pdf'\n plt.savefig(file_path_plot, bbox_inches='tight')\n\n print(\"----------------------------------------------------------------------\")\n print(\"The PDF of this heatmap can be downloaded here:\")\n display(HTML('<a href=\"' + file_path_plot + '\" target=\"_blank\">PDF of the heatmap</a>'))\n print(\"----------------------------------------------------------------------\")\n print(\"The CDF which is compatible with HierarchicalClusteringViewer is here:\")\n display(HTML('<a href=\"' + path_to_cdt + '\" target=\"_blank\">TXT containing the output data</a>'))\n print(\"----------------------------------------------------------------------\")\n print(\"The GTR which is compatible with HierarchicalClusteringViewer is here:\")\n display(HTML('<a href=\"' + path_to_gtr + '\" target=\"_blank\">TXT containing the output data</a>'))\n print(\"----------------------------------------------------------------------\")\n\n if show:\n plt.show()\n\n if custom_plot == 'Both':\n # Plotting the heatmap with dendrogram\n plt.clf()\n # fig = plt.figure(figsize=(16, 9), dpi=300)\n fig = plt.figure(figsize=(16, 9))\n gs = gridspec.GridSpec(2, 2, width_ratios=[5, 1], height_ratios=[1, 5])\n gs.update(wspace=0.0, hspace=0.0)\n\n # Doing TOP dendrogram first\n ax0 = plt.subplot(gs[0])\n ax0.axis('off')\n\n col_order, link = plot_dendrogram(col_model, data, col_tree, axis=1,\n dist=str2similarity[col_distance_metric],\n clustering_method=clustering_method,\n color_threshold=clusters_to_highlight,\n title='no_title.png', orientation='top')\n col_order = [int(i) for i in col_order]\n named_col_order = [col_labels[i] for i in col_order]\n\n # Doing RIGHT dendrogram\n ax3 = plt.subplot(gs[3])\n ax3.axis('off')\n\n row_order, link = plot_dendrogram(row_model, data_transpose, row_tree, axis=1,\n dist=str2similarity[row_distance_metric],\n clustering_method=clustering_method,\n color_threshold=clusters_to_highlight,\n orientation='right', title='no_title.png')\n\n # Plotting the heatmap now\n ax1 = plt.subplot(gs[2])\n\n # Row-normalizing for display purposes only:\n data_df = data_df.subtract(data_df.min(axis=1), axis=0)\n data_df = data_df.div(data_df.max(axis=1), axis=0)\n\n sns.heatmap(data_df[named_col_order].iloc[row_order], ax=ax1, cbar=False, cmap='bwr')\n # ax1.xaxis.tick_top()\n [label.set_rotation(90) for label in ax1.get_xticklabels()]\n file_path_plot = output_base_name + '.pdf'\n plt.savefig(file_path_plot, bbox_inches='tight')\n\n print(\"----------------------------------------------------------------------\")\n print(\"The PDF of this heatmap can be downloaded here:\")\n display(HTML('<a href=\"' + file_path_plot + '\" target=\"_blank\">PDF of the heatmap</a>'))\n print(\"----------------------------------------------------------------------\")\n print(\"The CDF which is compatible with HierarchicalClusteringViewer is here:\")\n display(HTML('<a href=\"' + path_to_cdt + '\" target=\"_blank\">TXT containing the output data</a>'))\n print(\"----------------------------------------------------------------------\")\n print(\"The GTR which is compatible with HierarchicalClusteringViewer is here:\")\n display(HTML('<a href=\"' + path_to_gtr + '\" target=\"_blank\">TXT containing the output data</a>'))\n print(\"----------------------------------------------------------------------\")\n\n if show:\n plt.show()\n\n\n return col_model, row_model\n\n\ndef hc_samples(\n input_gene_expression: \"gene expression data filename (.gct file) where rows are genes and columns are samples\",\n clustering_type: \"single or consensus -- Only single is suported at the moment\",\n distance_metric: \"the function to be used when comparing the distance/similarity of the columns in the \"\n \"input_gene_expression dataset\",\n file_basename: \"the name to use when naming output files\" = 'HC_out',\n clusters_to_highlight: \"how many clusters to highlight in the dendrogram\" = None):\n \"\"\"\n Perform hierarchical clustering to group samples with similar phenotypes.\n :param input_gene_expression: str; gene expression data filename (.gct file)\n where rows are genes and columns are samples\n :param clustering_type: str; single or consensus\n :param distance_metric: str; the function to be used when comparing the distance/similarity of the columns\n in the input_gene_expression dataset\n :param file_basename: str; the name to use when naming output files\n :param clusters_to_highlight: int; how many clusters to highlight in the dendrogram\n :return: object; Sklearn's AgglomerativeClustering fitted model\n \"\"\"\n\n print(\"Currenty clustering_type is being ignored, only 'single' is supported.\")\n pwd = '.'\n gct_name = input_gene_expression\n col_distance_metric = distance_metric\n output_distances = False\n row_distance_metric = 'No_row_clustering'\n clustering_method = 'average'\n output_base_name = file_basename\n row_normalization = False\n col_normalization = False\n row_centering = 'Mean'\n col_centering = 'Mean'\n custom_plot = 'Samples'\n show = True\n\n # print(\"This are the parameters to be used (for debugging purposes)\")\n # print(\"\"\"\n # pwd = '.'\n # gct_name = {gct_name}\n # col_distance_metric = {col_distance_metric}\n # output_distances = {output_distances}\n # row_distance_metric = {row_distance_metric}\n # clustering_method = {clustering_method}\n # output_base_name = {output_base_name}\n # row_normalization = {row_normalization}\n # col_normalization = {col_normalization}\n # row_centering = {row_centering}\n # col_centering = {col_centering}\n # \"\"\".format(\n # gct_name=gct_name, col_distance_metric=col_distance_metric,\n # output_distances=str(output_distances),\n # row_distance_metric=row_distance_metric, clustering_method=clustering_method,\n # output_base_name=output_base_name,\n # row_normalization=str(row_normalization), col_normalization=str(col_normalization),\n # row_centering=row_centering, col_centering=col_centering\n # )\n # )\n print(\"Now we will start performing hierarchical clustering, this may take a little while.\")\n\n col_model, row_model = HierarchicalClustering(pwd,\n gct_name,\n col_distance_metric,\n row_distance_metric,\n clustering_method,\n output_base_name,\n row_normalization,\n col_normalization,\n row_centering,\n col_centering,\n output_distances,\n custom_plot,\n clusters_to_highlight,\n show)\n print(\"Done with Hierarchical Clustering!\")\n\n return col_model\n\n\ndef hc_genes(\n input_gene_expression: \"gene expression data filename (.gct file) where rows are genes and columns are samples\",\n clustering_type: \"single or consensus -- Only single is suported at the moment\",\n distance_metric: \"the function to be used when comparing the distance/similarity of the rows in the \"\n \"input_gene_expression dataset\",\n file_basename: \"the name to use when naming output files\" = 'HC_out',\n clusters_to_highlight: \"how many clusters to highlight in the dendrogram\" = None):\n \"\"\"\n Perform hierarchical clustering to group genes with similar expression profile.\n :param input_gene_expression: str; gene expression data filename (.gct file)\n where rows are genes and columns are samples\n :param clustering_type: str; single or consensus\n :param distance_metric: str; the function to be used when comparing the distance/similarity of the rows\n in the input_gene_expression dataset\n :param file_basename: str; the name to use when naming output files\n :param clusters_to_highlight: int; how many clusters to highlight in the dendrogram\n :return: object; Sklearn's AgglomerativeClustering fitted model\n \"\"\"\n\n print(\"Currenty clustering_type is being ignored, only 'single' is supported.\")\n pwd = '.'\n gct_name = input_gene_expression\n col_distance_metric = 'No_column_clustering'\n output_distances = False\n row_distance_metric = distance_metric\n clustering_method = 'average'\n output_base_name = file_basename\n row_normalization = False\n col_normalization = False\n row_centering = 'Mean'\n col_centering = 'Mean'\n custom_plot = 'Genes'\n show = True\n\n # print(\"This are the parameters to be used (for debugging purposes)\")\n # print(\"\"\"\n # pwd = '.'\n # gct_name = {gct_name}\n # col_distance_metric = {col_distance_metric}\n # output_distances = {output_distances}\n # row_distance_metric = {row_distance_metric}\n # clustering_method = {clustering_method}\n # output_base_name = {output_base_name}\n # row_normalization = {row_normalization}\n # col_normalization = {col_normalization}\n # row_centering = {row_centering}\n # col_centering = {col_centering}\n # \"\"\".format(\n # gct_name=gct_name, col_distance_metric=col_distance_metric,\n # output_distances=str(output_distances),\n # row_distance_metric=row_distance_metric, clustering_method=clustering_method,\n # output_base_name=output_base_name,\n # row_normalization=str(row_normalization), col_normalization=str(col_normalization),\n # row_centering=row_centering, col_centering=col_centering\n # )\n # )\n print(\"Now we will start performing hierarchical clustering, this may take a little while.\")\n\n col_model, row_model = HierarchicalClustering(pwd,\n gct_name,\n col_distance_metric,\n row_distance_metric,\n clustering_method,\n output_base_name,\n row_normalization,\n col_normalization,\n row_centering,\n col_centering,\n output_distances,\n custom_plot,\n clusters_to_highlight,\n show)\n print(\"Done with Hierarchical Clustering!\")\n\n return row_model\n\n\ndef normalize_dataframe(df, log_normalize=None,\n row_centering='Mean', row_normalization=True,\n col_centering='Mean', col_normalization=True):\n \"\"\"\n This function Takes in a DataFrame and some flags and normalizes the data it contains. Order of operations is:\n 1- Log-normalize\n 2- Row (gene) center\n 3- Row (gene) normalize\n 4- Column (sample) center\n 5- Column (sample) normalize\n\n :param df: (Pandas DataFrame) A DataFrame to be normalized\n :param log_normalize:(float, None) Whether to log-normalize the data. Value is the base of the logarithm to use\n :param row_centering: Whether or not to subtract the mean or median from every element of each row\n :param row_normalization: Whether or not to set the maximum value of a row to 1 and the minimum value to 0\n :param col_centering: Whether or not to subtract the mean or median from every element of each column\n :param col_normalization: Whether or not to set the maximum value of a column to 1 and the minimum value to 0\n :return:\n \"\"\"\n\n if (log_normalize is None) \\\n and (row_centering == 'No') and (col_centering == 'No') \\\n and (row_normalization is False) and (col_normalization is False):\n print(\"No normalization has been requested ಠ_ಠ¯\")\n return df\n\n data = df.as_matrix()\n\n # Log Normalizing\n if log_normalize is not None:\n print(\"I'm sorry, log-normalization is not supported at the moment (u_u)\")\n\n # Row Centering\n if row_centering != 'No':\n if row_centering == 'Mean':\n row_means = np.mean(data, axis=1)\n row_means_col_vec = row_means.reshape((data.shape[0], 1))\n data = data - row_means_col_vec\n elif row_centering == 'Median':\n row_medians = np.median(data, axis=1)\n row_medians_col_vec = row_medians.reshape((data.shape[0], 1))\n data = data - row_medians_col_vec\n else:\n print(\"row_centering has an unexpected value:\", row_centering)\n\n # Row Normalizing\n if row_normalization:\n row_norm = np.sum(data * data, axis=1)\n row_norm_col_vec = row_norm.reshape((data.shape[0], 1))\n data = data / np.sqrt(row_norm_col_vec)\n\n # Column Centering\n if col_centering != 'No':\n if col_centering == 'Mean':\n col_means = np.mean(data, axis=0)\n data = data - col_means\n elif col_centering == 'Median':\n col_medians = np.median(data, axis=0)\n data = data - col_medians\n else:\n print(\"col_centering has an unexpected value: \", col_centering)\n\n # Column Normalizing\n if col_normalization:\n col_norm = np.sum(data * data, axis=0)\n data = data / np.sqrt(col_norm)\n\n normalized_df = pd.DataFrame(data=data, index=df.index, columns=list(df))\n\n return normalized_df\n\n\ndef display_heatmap(data,\n name='heatmap',\n log_normalize=None,\n row_centering: \"How to center each row (gene) in the data\" = 'No',\n row_normalization: \"Whether to normalize each row (gene) in the data\" = True,\n col_centering: \"How to center each column (sample) in the data\" = 'No',\n col_normalization: \"Whether to normalize each column (sample) in the data\" = False,\n mostrar=False):\n\n if isinstance(data, pd.DataFrame):\n data_to_plot = data.copy()\n elif os.path.isfile(data):\n data_to_plot = pd.read_table(data, skiprows=2, sep='\\t')\n data_to_plot.set_index('Name', inplace=True)\n data_to_plot.drop('Description', axis=1, inplace=True)\n else:\n try:\n data_to_plot = pd.read_table(data, skiprows=2, sep='\\t')\n except urllib.error.HTTPError:\n print(\"I don't know what the variable 'data' contains.\")\n print('data=')\n print(data)\n exit(\"If this is a url it may not be accessible.\\n\"\n \"(╯°□°)╯︵ ┻━┻\")\n data_to_plot.set_index('Name', inplace=True)\n data_to_plot.drop('Description', axis=1, inplace=True)\n\n data_to_plot = normalize_dataframe(data_to_plot, log_normalize=log_normalize,\n row_centering=row_centering, row_normalization=row_normalization,\n col_centering=col_centering, col_normalization=col_normalization)\n\n plt.clf()\n\n # # figure reshape from:\n # # https://stackoverflow.com/questions/35127920/overlapping-yticklabels-is-it-possible-to-control-cell-size-of-heatmap-in-seabo\n # # and from:\n # # https://matplotlib.org/users/customizing.html\n\n # get the tick label font size\n fontsize_pt = plt.rcParams['ytick.labelsize']\n dpi = 72.27\n\n # compute the matrix height in points and inches\n matrix_height_pt = fontsize_pt * data_to_plot.as_matrix().shape[0]\n matrix_height_in = (matrix_height_pt / dpi) * 1.2\n\n # compute the required figure height\n top_margin = 0.01 # in percentage of the figure height\n bottom_margin = 0.01 # in percentage of the figure height\n figure_height = matrix_height_in / (1 - top_margin - bottom_margin)\n\n # build the figure instance with the desired height\n fig, ax = plt.subplots(\n figsize=(6, figure_height),\n gridspec_kw=dict(top=1 - top_margin, bottom=bottom_margin))\n\n sns.heatmap(data_to_plot, cmap='bwr', yticklabels=True, square=True,\n cbar_kws={'use_gridspec': False,\n 'location': \"right\",\n 'shrink': 0.5,\n 'label': ''}\n\n )\n\n if not name.endswith('.pdf'):\n name = name + '.pdf'\n\n plt.savefig(name, dpi=dpi, bbox_inches='tight')\n # plt.savefig(name, dpi=dpi)\n print(name, \"has been created!\")\n\n if mostrar:\n # print(data_to_plot.head())\n plt.show()\n\n print(\"The PDF of this heatmap can be downloaded here:\")\n display(HTML('<a href=\"' + name + '\" target=\"_blank\">PDF of the heatmap</a>'))\n return\n"
] | [
[
"pandas.read_table",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"numpy.transpose",
"scipy.cluster.hierarchy.dendrogram",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.gridspec.GridSpec",
"numpy.average",
"numpy.unique",
"numpy.mean",
"pandas.read_csv",
"numpy.column_stack",
"matplotlib.pyplot.clf",
"numpy.count_nonzero",
"numpy.arange",
"numpy.median",
"numpy.min",
"numpy.finfo",
"numpy.cumsum",
"numpy.exp",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.sqrt",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xlabel",
"sklearn.cluster.AgglomerativeClustering"
]
] |
jsevo/taxumap | [
"1a02518dca822a65847994910177c74607243dae"
] | [
"taxumap-manuscript-notebooks/embeddings.py"
] | [
"from sklearn.manifold import TSNE \nfrom sklearn.decomposition import PCA, KernelPCA\nfrom umap import UMAP\nfrom sklearn.preprocessing import MinMaxScaler\n\nRUNEMBEDDINGS = False\nif RUNEMBEDDINGS:\n #simple PCA\n pcaembedding = PCA(n_components=2).fit_transform(XASV.fillna(0))\n \n #base embedding (kernel pca)\n kernelpcaembedding = KernelPCA(n_components=2).fit_transform(XASV.fillna(0))\n \n # non-phylo umap\n embedding_non_phylo_unscaled = UMAP(n_neighbors=120,min_dist=0.2, metric=\"manhattan\").fit_transform(XASV)\n \n \n # embedding_non_phylo_scaled = UMAP(n_neighbors=120,min_dist=0.2, metric=\"manhattan\").fit_transform(MinMaxScaler().fit_transform(XASV))\n\n\nRUNTAXUMAPS = False\nif RUNTAXUMAPS: \n from taxumap.taxumap import taxumap\n agg_levels = [\"Phylum\", \"Family\"]\n withscaling = False # do not scale the columns of X\n distanceperlevel = False # do not calculate a separate distance matrix at each phylogenetic level because we are using the manhattan distance \n distancemetric = \"manhattan\"\n printfigure=False\n printwithdiversity=False #dont plot the average diversity in the background of the scatter plot\n X_in = XASV\n tax = taxonomy\n withusercolors=taxonomy_meta[[\"HexColor\"]]\n\n\n# TAXUMAP, X_embedded, taxumap_Xscaled, taxumap_X = taxumap(agg_levels,\n# withscaling,\n# distanceperlevel,\n# distancemetric,\n# printfigure,\n# printwithdiversity,\n# X_in,\n# tax,\n# withusercolors,\n# debug=True, #return tables\n# save_embedding=False #save xy coordinates\n# );\n \n TAXUMAP_alllevels, X_embedded_alllevels, taxumap_Xscaled_alllevels, taxumap_X_alllevels = taxumap([\"Phylum\", \"Class\", \"Order\", \"Family\", \"Genus\"],\n withscaling,\n distanceperlevel,\n distancemetric,\n printfigure,\n printwithdiversity,\n X_in,\n tax,\n withusercolors,\n debug=True, #return tables\n save_embedding=False #save xy coordinates\n );\n\n# TAXUMAPSCALED, X_embedded_scaled, taxumap_Xscaled_scaled, taxumap_X_scaled = taxumap(\n# agg_levels,\n# True,\n# False,\n# \"euclidean\",\n# printfigure,\n# printwithdiversity,\n# X_in,\n# tax,\n# withusercolors,\n# debug=True, #return tables\n# save_embedding=True#save xy coordinates\n# );\n\n# TAXUMAPSCALEDeuclidean, X_embedded_scaledeuclidean, taxumap_Xscaled_scaledeuclidean, taxumap_X_scaledeuclidean = taxumap(\n# agg_levels,\n# True,\n# False,\n# \"euclidean\",\n# printfigure,\n# printwithdiversity,\n# X_in,\n# tax,\n# withusercolors,\n# debug=True, #return tables\n# save_embedding=True#save xy coordinates\n# );\nLOADPCoAS = False\nif LOADPCoAS:\n pcoa_embedding_unweighted_unifrac = PCA(n_components=2).fit_transform(unweighted_unifrac.set_index(\"SampleID\"))\n #Weighted Unifrac\n pcoa_embedding_weighted_unifrac = PCA(n_components=2).fit_transform(weighted_unifrac.set_index(\"SampleID\"))\n\n \ndel unweighted_unifrac\ndel weighted_unifrac\n#del TAXUMAPSCALED, taxumap_Xscaled_scaled, taxumap_X_scaled\n#del TAXUMAPSCALEDeuclidean, taxumap_Xscaled_scaledeuclidean, taxumap_X_scaledeuclidean\ndel TAXUMAP_alllevels, taxumap_Xscaled_alllevels, taxumap_X_alllevels\n\nwrite_now=False\nif write_now:\n for (em,n) in zip(\n [pcaembedding,\n pcoa_embedding_unweighted_unifract[:,0:2], \n pcoa_embedding_weighted_unifract, \n embedding_non_phylo_unscaled,\n X_embedded_alllevels.values,\n X_embedded.values],\n [\"pcaembedding\",\n \"pcoa_unweighted_unifrac_embedding\", \n \"pcoa_weighted_unifrac_embedding\",\n \"embedding_nontax_umap_unscaled\",\n \"taxumap_alllevels\",\n \"current_taxumap_embedding\"]):\n pd.DataFrame(em, index=XASV.index).to_csv(\"results/%s.csv\"%n)"
] | [
[
"sklearn.decomposition.KernelPCA",
"sklearn.decomposition.PCA"
]
] |
catalys1/dnnutil | [
"a55a73ae59c5ac0117f58d8d8136bdd32902141f"
] | [
"dnnutil/training.py"
] | [
"import torch\nimport numpy as np\nimport dnnutil.network as network\nimport time\n\n\n__all__ = ['calculate_accuracy', 'Trainer', 'ClassifierTrainer', 'AutoencoderTrainer']\n\n\ndef calculate_accuracy(prediction, label, axis=1):\n '''calculate_accuracy(prediction, label)\n \n Computes the mean accuracy over a batch of predictions and corresponding\n ground-truth labels.\n\n Args:\n prediction (Tensor): A batch of predictions. Assumed to have shape\n [batch-size, nclasses, [d0, d1, ...]].\n label (LongTensor): A batch of labels. Assumed to have shape\n [batch-size, [d0, d1, ...]]). The number of dimensions should be\n one less than prediction.\n\n Returns:\n accuracy (Tensor): A single-element Tensor containing the percent of\n correct predictions in the batch as a value between 0 and 1.\n '''\n return torch.eq(prediction.argmax(axis), label).float().mean().item()\n\n\nclass Trainer(object):\n '''Trainer(net, optim, loss_fn, accuracy_metric=None)\n \n Base class for all network trainers. Network trainer classes provide \n methods to facilitate training and testing deep network models. The goal\n is to encapsulate the common functionality, to reduce the boilerplate\n code that needs to be repeated across projects.\n\n Args:\n net (torch.nn.Module): An instance of a network that inherits from\n torch.nn.Module.\n optim (torch.optim.Optimizer): An instance of an optimizer that\n inherits from torch.optim.Optimizer.\n loss_fn (callable): A callable that calculates and returns a loss\n value. The loss value should be a single-element Tensor.\n accuracy_metric (callable): A callabel that calculates and returns\n an accuracy value. Usually this will be a floating point number\n in [0, 1].\n '''\n def __init__(self, net, optim, loss_fn, accuracy_metric=None):\n self.net = net\n self.loss_fn = loss_fn\n self.optim = optim\n if accuracy_metric is not None:\n self.measure_accuracy = accuracy_metric\n else:\n self.measure_accuracy = calculate_accuracy\n\n self.train_loss = 0.\n self.train_acc = 0.\n self.test_loss = 0.\n self.test_acc = 0.\n \n def _set_train_stats(self, stats):\n '''TODO:docs\n '''\n self.train_loss = stats[0]\n self.train_acc = stats[1]\n\n def _set_test_stats(self, stats):\n '''TODO:docs\n '''\n self.test_loss = stats[0]\n self.test_acc = stats[1]\n\n def get_stats(self):\n '''TODO:docs\n '''\n return (self.train_loss, self.train_acc,\n self.test_loss, self.test_acc)\n\n def train(self, dataloader, epoch):\n '''Train the Trainer's network.\n\n Args:\n dataloader (torch.utils.data.DataLoader): An instance of a\n DataLoader, which will provide access to the training data.\n epoch (int): The current epoch.\n\n Returns:\n loss (float): The mean loss over the epoch.\n accuracy (float): The mean accuracy over the epoch (in [0, 1]).\n '''\n self.net.train()\n stats = self._run_epoch(dataloader, epoch)\n self._set_train_stats(stats)\n return stats\n\n def eval(self, dataloader, epoch):\n '''Evaluate the Trainer's network.\n\n Args:\n dataloader (torch.utils.data.DataLoader): An instance of a\n DataLoader, which will provide access to the testing data.\n epoch (int): The current epoch.\n Returns:\n loss (float): The mean loss over the epoch.\n accuracy (float): The mean accuracy over the epoch (in [0, 1]).\n '''\n self.net.eval()\n stats = self._run_epoch(dataloader, epoch)\n self._set_test_stats(stats)\n return stats\n \n def _run_epoch(self, dataloader, epoch):\n '''Perform a single epoch of either training or evaluation.\n\n Args:\n dataloader (torch.utils.data.DataLoader): An instance of a\n DataLoader, which will provide access to the testing data.\n epoch (int): The current epoch.\n Returns:\n loss (float): The mean loss over the epoch.\n accuracy (float): The mean accuracy over the epoch (in [0, 1]).\n '''\n N = len(dataloader.batch_sampler)\n msg = 'train' if self.net.training else 'test'\n func = self.train_batch if self.net.training else self.test_batch\n loss = []\n acc = []\n at = 0\n for i, batch in enumerate(dataloader):\n t = time.time()\n if self.net.training:\n self.update_lr(epoch * N + i + 1)\n batch_loss, batch_acc = func(batch)\n t = time.time() - t\n if i == 0:\n at = t\n else:\n at = at * i / (i + 1) + t / (i + 1)\n\n loss.append(batch_loss)\n acc.append(batch_acc)\n\n print(f'\\rEPOCH {epoch}: {msg} '\n f'batch {i + 1:04d}/{N} '\n f'lr[ {self.optim.param_groups[0][\"lr\"]:1.3e} ] '\n f'[ {t:.3f} ({at:.3f}) secs ]'\n f'{\" \"*10}',\n end='', flush=True)\n\n loss = np.mean(loss)\n acc = np.mean(acc)\n\n return loss, acc\n\n def update_lr(self, i=None):\n '''Update the optimizer's learning rate. Used for batch-level\n learning rate scheduling. If using an epoch-level scheduler, \n define and use it in the epoch loop. If the iteration number is\n not provided (None) or the Trainer has no lr_schedule attribute,\n this function does nothing and returns.\n\n Args:\n i (int): iteration number (starts at 1 for the first batch).\n '''\n if i is None or not hasattr(self, 'lr_schedule'):\n return\n self.lr_schedule.step(i)\n \n def train_batch(self, batch):\n '''Train the Trainer's network on a single training batch.\n '''\n raise NotImplementedError()\n\n def test_batch(self, batch):\n '''Test the Trainer's network on a single testing batch.\n '''\n raise NotImplementedError()\n\n\nclass ClassifierTrainer(Trainer):\n '''ClassifierTrainer(net, optim, loss_fn, accuracy_metric=None)\n \n Trainer for training a network to do image classification.\n\n Args:\n net (torch.nn.Module): An instance of a network that inherits from\n torch.nn.Module.\n optim (torch.optim.Optimizer): An instance of an optimizer that\n inherits from torch.optim.Optimizer.\n loss_fn (callable): A callable that calculates and returns a loss\n value. The loss value should be a single-element Tensor.\n accuracy_metric (callable): A callabel that calculates and returns\n an accuracy value. Usually this will be a floating point number\n in [0, 1].\n '''\n def train_batch(self, batch):\n '''Train the Trainer's network on a single training batch.\n\n Args:\n batch (iterable): A 2-tuple of (images, labels). Images is a 4-d\n Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more\n dimensions (BxLx*) which matches images in the first (batch)\n dimension. The exact dimensionality of labels will depend on\n the application and loss function chosen, but often consists\n of integer class-indexes.\n Returns:\n loss (float): The mean loss over the batch.\n accuracy (float): The mean accuracy over the batch (in [0, 1]).\n '''\n self.optim.zero_grad()\n\n imgs, labels = network.tocuda(batch)\n\n predictions = self.net(imgs)\n loss = self.loss_fn(predictions, labels)\n\n loss.backward()\n self.optim.step()\n\n loss = loss.item()\n with torch.no_grad():\n accuracy = self.measure_accuracy(predictions, labels)\n return loss, accuracy\n\n @torch.no_grad()\n def test_batch(self, batch):\n '''Evaluate the Trainer's network on a single testing batch.\n\n Args:\n batch (iterable): A 2-tuple of (images, labels). Images is a 4-d\n Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more\n dimensions (BxLx*) which matches images in the first (batch)\n dimension. The exact dimensionality of labels will depend on\n the application and loss function chosen, but often consists\n of integer class-indexes.\n Returns:\n loss (float): The mean loss over the batch.\n accuracy (float): The mean accuracy over the batch (in [0, 1]).\n '''\n imgs, labels = network.tocuda(batch)\n predictions = self.net(imgs)\n loss = self.loss_fn(predictions, labels).item()\n accuracy = self.measure_accuracy(predictions, labels)\n return loss, accuracy\n\n\nclass AutoencoderTrainer(Trainer):\n '''AutoencoderTrainer(net, optim, loss_fn)\n\n Trainer for training an autoencoder network.\n\n Args:\n net (torch.nn.Module): An instance of a network that inherits from\n torch.nn.Module.\n optim (torch.optim.Optimizer): An instance of an optimizer that\n inherits from torch.optim.Optimizer.\n loss_fn (callable): A callable that calculates and returns a loss\n value. The loss value should be a single-element Tensor.\n '''\n def __init__(self, net, optim, loss_fn):\n super(AutoencoderTrainer, self).__init__(\n net, optim, loss_fn, None)\n delattr(self, 'measure_accuracy')\n\n def train_batch(self, batch):\n '''Train the Trainer's network on a single training batch.\n\n Args:\n batch (iterable): A 2-tuple of (images, labels). Images is a 4-d\n Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more\n dimensions (BxLx*) which matches images in the first (batch)\n dimension. The exact dimensionality of labels will depend on\n the application and loss function chosen, but often consists\n of integer class-indexes.\n Returns:\n loss (float): The mean loss over the batch.\n '''\n self.optim.zero_grad()\n\n imgs = network.tocuda(batch)\n\n predictions = self.net(imgs)\n loss = self.loss_fn(predictions, imgs)\n\n loss.backward()\n self.optim.step()\n\n loss = loss.item()\n\n return loss\n\n @torch.no_grad()\n def test_batch(self, batch):\n '''Evaluate the Trainer's network on a single testing batch.\n\n Args:\n batch (iterable): A 2-tuple of (images, labels). Images is a 4-d\n Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more\n dimensions (BxLx*) which matches images in the first (batch)\n dimension. The exact dimensionality of labels will depend on\n the application and loss function chosen, but often consists\n of integer class-indexes.\n Returns:\n loss (float): The mean loss over the batch.\n '''\n imgs = network.tocuda(batch)\n predictions = self.net(imgs)\n loss = self.loss_fn(predictions, imgs).item()\n return loss\n\n def _run_epoch(self, dataloader, epoch):\n '''Perform a single epoch of either training or evaluation.\n\n Args:\n dataloader (torch.utils.data.DataLoader): An instance of a\n DataLoader, which will provide access to the testing data.\n epoch (int): The current epoch.\n Returns:\n loss (float): The mean loss over the epoch.\n '''\n N = int(np.ceil(len(dataloader.dataset) / dataloader.batch_size))\n msg = 'train' if self.net.training else 'test'\n func = self.train_batch if self.net.training else self.test_batch\n loss = []\n for i, batch in enumerate(dataloader):\n batch_loss = func(batch)\n loss.append(batch_loss)\n\n print(f'\\rEPOCH {epoch}: {msg} batch {i:04d}/{N}{\" \"*10}',\n end='', flush=True)\n\n loss = np.mean(loss)\n\n return loss\n\n"
] | [
[
"torch.no_grad",
"numpy.mean"
]
] |
vinayphadnis/NeMo | [
"9dc7773c48e164b8a82051bb558a728c6eeb85ec"
] | [
"nemo/collections/asr/models/classification_models.py"
] | [
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nfrom typing import Dict, List, Optional, Union\n\nimport torch\nfrom omegaconf import DictConfig, ListConfig, OmegaConf\nfrom pytorch_lightning import Trainer\n\nfrom nemo.collections.asr.data.audio_to_text import AudioLabelDataset\nfrom nemo.collections.asr.models.asr_model import ASRModel\nfrom nemo.collections.asr.parts.features import WaveformFeaturizer\nfrom nemo.collections.asr.parts.perturb import process_augmentations\nfrom nemo.collections.common.losses import CrossEntropyLoss\nfrom nemo.collections.common.metrics import TopKClassificationAccuracy, compute_topk_accuracy\nfrom nemo.core.classes.common import PretrainedModelInfo, typecheck\nfrom nemo.core.neural_types import *\nfrom nemo.utils import logging\n\n__all__ = ['EncDecClassificationModel', 'MatchboxNet']\n\n\nclass EncDecClassificationModel(ASRModel):\n \"\"\"Encoder decoder CTC-based models.\"\"\"\n\n def __init__(self, cfg: DictConfig, trainer: Trainer = None):\n super().__init__(cfg=cfg, trainer=trainer)\n self._update_decoder_config(self.cfg.decoder)\n\n self.preprocessor = EncDecClassificationModel.from_config_dict(self._cfg.preprocessor)\n self.encoder = EncDecClassificationModel.from_config_dict(self._cfg.encoder)\n self.decoder = EncDecClassificationModel.from_config_dict(self._cfg.decoder)\n self.loss = CrossEntropyLoss()\n if hasattr(self._cfg, 'spec_augment') and self._cfg.spec_augment is not None:\n self.spec_augmentation = EncDecClassificationModel.from_config_dict(self._cfg.spec_augment)\n else:\n self.spec_augmentation = None\n if hasattr(self._cfg, 'crop_or_pad_augment') and self._cfg.crop_or_pad_augment is not None:\n self.crop_or_pad = EncDecClassificationModel.from_config_dict(self._cfg.crop_or_pad_augment)\n else:\n self.crop_or_pad = None\n\n # Setup metric objects\n self._accuracy = TopKClassificationAccuracy()\n\n def transcribe(self, paths2audio_files: str) -> str:\n raise NotImplementedError(\"Classification models do not transcribe audio.\")\n\n def _setup_dataloader_from_config(self, config: Optional[Dict]):\n if config.get('manifest_filepath') is None:\n return\n\n if 'augmentor' in config:\n augmentor = process_augmentations(config['augmentor'])\n else:\n augmentor = None\n\n featurizer = WaveformFeaturizer(\n sample_rate=config['sample_rate'], int_values=config.get('int_values', False), augmentor=augmentor\n )\n dataset = AudioLabelDataset(\n manifest_filepath=config['manifest_filepath'],\n labels=config['labels'],\n featurizer=featurizer,\n max_duration=config.get('max_duration', None),\n min_duration=config.get('min_duration', None),\n trim=config.get('trim_silence', True),\n load_audio=config.get('load_audio', True),\n )\n\n return torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=config['batch_size'],\n collate_fn=dataset.collate_fn,\n drop_last=config.get('drop_last', False),\n shuffle=config['shuffle'],\n num_workers=config.get('num_workers', 0),\n pin_memory=config.get('pin_memory', False),\n )\n\n def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):\n if 'shuffle' not in train_data_config:\n train_data_config['shuffle'] = True\n self._train_dl = self._setup_dataloader_from_config(config=train_data_config)\n\n def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):\n if 'shuffle' not in val_data_config:\n val_data_config['shuffle'] = False\n self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)\n\n def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):\n if 'shuffle' not in test_data_config:\n test_data_config['shuffle'] = False\n self._test_dl = self._setup_dataloader_from_config(config=test_data_config)\n\n def test_dataloader(self):\n if self._test_dl is not None:\n return self._test_dl\n\n @classmethod\n def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n\n Returns:\n List of available pre-trained models.\n \"\"\"\n result = []\n model = PretrainedModelInfo(\n pretrained_model_name=\"MatchboxNet-3x1x64-v1\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v1.nemo\",\n description=\"MatchboxNet model trained on Google Speech Commands dataset (v1, 30 classes) which obtains 97.32% accuracy on test set.\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"MatchboxNet-3x2x64-v1\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x2x64-v1.nemo\",\n description=\"MatchboxNet model trained on Google Speech Commands dataset (v1, 30 classes) which obtains 97.68% accuracy on test set.\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"MatchboxNet-3x1x64-v2\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2.nemo\",\n description=\"MatchboxNet model trained on Google Speech Commands dataset (v2, 35 classes) which obtains 97.12% accuracy on test set.\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"MatchboxNet-3x1x64-v2\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2.nemo\",\n description=\"MatchboxNet model trained on Google Speech Commands dataset (v2, 30 classes) which obtains 97.29% accuracy on test set.\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"MatchboxNet-3x1x64-v2-subset-task\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x1x64-v2-subset-task.nemo\",\n description=\"MatchboxNet model trained on Google Speech Commands dataset (v2, 10+2 classes) which obtains 98.2% accuracy on test set.\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"MatchboxNet-3x2x64-v2-subset-task\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet-3x2x64-v2-subset-task.nemo\",\n description=\"MatchboxNet model trained on Google Speech Commands dataset (v2, 10+2 classes) which obtains 98.4% accuracy on test set.\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"MatchboxNet-VAD-3x2\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/MatchboxNet_VAD_3x2.nemo\",\n description=\"Voice Activity Detection MatchboxNet model trained on google speech command (v2) and freesound background data, which obtains 0.992 accuracy on testset from same source and 0.852 TPR for FPR=0.315 on testset (ALL) of AVA movie data\",\n )\n result.append(model)\n return result\n\n @property\n def input_types(self) -> Optional[Dict[str, NeuralType]]:\n if hasattr(self.preprocessor, '_sample_rate'):\n audio_eltype = AudioSignal(freq=self.preprocessor._sample_rate)\n else:\n audio_eltype = AudioSignal()\n return {\n \"input_signal\": NeuralType(('B', 'T'), audio_eltype),\n \"input_signal_length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n def output_types(self) -> Optional[Dict[str, NeuralType]]:\n return {\"outputs\": NeuralType(('B', 'D'), LogitsType())}\n\n @typecheck()\n def forward(self, input_signal, input_signal_length):\n processed_signal, processed_signal_len = self.preprocessor(\n input_signal=input_signal, length=input_signal_length,\n )\n # Crop or pad is always applied\n if self.crop_or_pad is not None:\n processed_signal, processed_signal_len = self.crop_or_pad(\n input_signal=processed_signal, length=processed_signal_len\n )\n # Spec augment is not applied during evaluation/testing\n if self.spec_augmentation is not None and self.training:\n processed_signal = self.spec_augmentation(input_spec=processed_signal)\n encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_len)\n logits = self.decoder(encoder_output=encoded)\n return logits\n\n # PTL-specific methods\n def training_step(self, batch, batch_nb):\n self.training_step_end()\n audio_signal, audio_signal_len, labels, labels_len = batch\n logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n loss_value = self.loss(logits=logits, labels=labels)\n\n tensorboard_logs = {\n 'train_loss': loss_value,\n 'learning_rate': self._optimizer.param_groups[0]['lr'],\n }\n\n correct_counts, total_counts = self._accuracy(logits=logits, labels=labels)\n\n for ki in range(correct_counts.shape[-1]):\n correct_count = correct_counts[ki]\n total_count = total_counts[ki]\n top_k = self._accuracy.top_k[ki]\n\n tensorboard_logs['training_batch_accuracy_top@{}'.format(top_k)] = correct_count / float(total_count)\n\n return {'loss': loss_value, 'log': tensorboard_logs}\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n audio_signal, audio_signal_len, labels, labels_len = batch\n logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n loss_value = self.loss(logits=logits, labels=labels)\n correct_counts, total_counts = self._accuracy(logits=logits, labels=labels)\n return {'val_loss': loss_value, 'val_correct_counts': correct_counts, 'val_total_counts': total_counts}\n\n def test_step(self, batch, batch_idx, dataloader_idx=0):\n audio_signal, audio_signal_len, labels, labels_len = batch\n logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n loss_value = self.loss(logits=logits, labels=labels)\n correct_counts, total_counts = self._accuracy(logits=logits, labels=labels)\n return {'test_loss': loss_value, 'test_correct_counts': correct_counts, 'test_total_counts': total_counts}\n\n def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):\n val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()\n correct_counts = torch.stack([x['val_correct_counts'] for x in outputs])\n total_counts = torch.stack([x['val_total_counts'] for x in outputs])\n\n topk_scores = compute_topk_accuracy(correct_counts, total_counts)\n\n tensorboard_log = {'val_loss': val_loss_mean}\n for top_k, score in zip(self._accuracy.top_k, topk_scores):\n tensorboard_log['val_epoch_top@{}'.format(top_k)] = score\n\n return {'log': tensorboard_log}\n\n def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):\n test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()\n correct_counts = torch.stack([x['test_correct_counts'].unsqueeze(0) for x in outputs])\n total_counts = torch.stack([x['test_total_counts'].unsqueeze(0) for x in outputs])\n\n topk_scores = compute_topk_accuracy(correct_counts, total_counts)\n\n tensorboard_log = {'test_loss': test_loss_mean}\n for top_k, score in zip(self._accuracy.top_k, topk_scores):\n tensorboard_log['test_epoch_top@{}'.format(top_k)] = score\n\n return {'log': tensorboard_log}\n\n def change_labels(self, new_labels: List[str]):\n \"\"\"\n Changes labels used by the decoder model. Use this method when fine-tuning on from pre-trained model.\n This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would\n use it if you want to use pretrained encoder when fine-tuning on a data in another dataset.\n\n If new_labels == self.decoder.vocabulary then nothing will be changed.\n\n Args:\n\n new_labels: list with new labels. Must contain at least 2 elements. Typically, \\\n this is set of labels for the dataset.\n\n Returns: None\n\n \"\"\"\n if new_labels is not None and not isinstance(new_labels, ListConfig):\n new_labels = ListConfig(new_labels)\n\n if self._cfg.labels == new_labels:\n logging.warning(\n f\"Old labels ({self._cfg.labels}) and new labels ({new_labels}) match. Not changing anything\"\n )\n else:\n if new_labels is None or len(new_labels) == 0:\n raise ValueError(f'New labels must be non-empty list of labels. But I got: {new_labels}')\n\n # Update config\n self._cfg.labels = new_labels\n\n decoder_config = self.decoder.to_config_dict()\n new_decoder_config = copy.deepcopy(decoder_config)\n self._update_decoder_config(new_decoder_config)\n del self.decoder\n self.decoder = EncDecClassificationModel.from_config_dict(new_decoder_config)\n\n OmegaConf.set_struct(self._cfg.decoder, False)\n self._cfg.decoder = new_decoder_config\n OmegaConf.set_struct(self._cfg.decoder, True)\n\n if 'train_ds' in self._cfg and self._cfg.train_ds is not None:\n self._cfg.train_ds.labels = new_labels\n\n if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None:\n self._cfg.validation_ds.labels = new_labels\n\n if 'test_ds' in self._cfg and self._cfg.test_ds is not None:\n self._cfg.test_ds.labels = new_labels\n\n logging.info(f\"Changed decoder output to {self.decoder.num_classes} labels.\")\n\n def _update_decoder_config(self, cfg):\n \"\"\"\n Update the number of classes in the decoder based on labels provided.\n\n Args:\n cfg: The config of the decoder which will be updated.\n \"\"\"\n OmegaConf.set_struct(cfg, False)\n\n labels = self.cfg.labels\n\n if 'params' in cfg:\n cfg.params.num_classes = len(labels)\n else:\n cfg.num_classes = len(labels)\n\n OmegaConf.set_struct(cfg, True)\n\n\nclass MatchboxNet(EncDecClassificationModel):\n pass\n"
] | [
[
"torch.stack"
]
] |
nlindqv/pytorch_RVAE | [
"d9e58134965f69aad557fb3bd2478500a51210f8"
] | [
"human_eval.py"
] | [
"import argparse\r\nimport os\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport torch as t\r\nfrom torch.optim import Adam\r\nimport pickle5 as pickle\r\nimport json\r\nimport random\r\n\r\nfrom sample import sample_with_input, sample_with_beam\r\nfrom utils.batch_loader import BatchLoader, clean_str\r\nfrom model.paraphraser import Paraphraser\r\nfrom model.generator import Generator\r\nfrom synonym_paraphraser import SynonymParaphraser\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='Paraphraser')\r\n parser.add_argument('--use-cuda', type=bool, default=False, metavar='CUDA', help='use cuda (default: False)')\r\n parser.add_argument('--seq-len', default=30, metavar='SL', help='max length of sequence (default: 30)')\r\n parser.add_argument('--ml', type=bool, default=True, metavar='ML', help='sample by maximum likelihood')\r\n\r\n args = parser.parse_args()\r\n\r\n # Read data\r\n if not os.path.exists('datasets/human_test.csv'):\r\n source_file = 'datasets/test.csv'\r\n source_data = pd.read_csv(source_file)[['question1', 'question2']]\r\n sentence_categories = [[] for _ in range(5)]\r\n for i in range(len(source_data)):\r\n\r\n sent = clean_str(source_data['question1'][i])\r\n sent_len = len(sent.split())\r\n if sent_len < 6:\r\n j = 0\r\n elif sent_len < 11:\r\n j = 1\r\n elif sent_len < 16:\r\n j = 2\r\n elif sent_len < 21:\r\n j = 3\r\n else:\r\n j = 4\r\n sentence_categories[j].append([source_data['question1'][i], source_data['question2'][i]])\r\n\r\n sample_data = []\r\n for category in sentence_categories:\r\n sample_data += random.sample(category, 20)\r\n source_data = pd.DataFrame(sample_data, columns=['question1', 'question2'])\r\n source_data.to_csv('datasets/human_test.csv')\r\n else:\r\n source_data = pd.read_csv('datasets/human_test_1.csv')[['question1', 'question2']]\r\n\r\n\r\n # Sample from Guptas original model\r\n batch_loader = BatchLoader()\r\n from model.parameters import Parameters\r\n parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size)\r\n paraphraser = Paraphraser(parameters)\r\n paraphraser.load_state_dict(t.load('saved_models/trained_paraphraser_ori_32', map_location=t.device('cpu')))\r\n\r\n samples_ori, target, source_ori = sample_with_input(batch_loader, paraphraser, args,\r\n decoder_only=True,\r\n file_name='datasets/human_test.csv')\r\n\r\n ref_items = generate_items(source_ori, target, 'ref')\r\n ori_items = generate_items(source_ori, samples_ori[0], 'ori')\r\n\r\n # Sample from Guptas model with two-path-loss\r\n batch_loader = BatchLoader()\r\n parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size, use_two_path_loss=True)\r\n paraphraser = Paraphraser(parameters)\r\n paraphraser.load_state_dict(t.load('saved_models/trained_paraphraser_tpl_16_32', map_location=t.device('cpu')))\r\n\r\n samples_tpl, target, source_tpl = sample_with_input(batch_loader, paraphraser, args,\r\n decoder_only=False,\r\n file_name='datasets/human_test.csv')\r\n tpl_items = generate_items(source_tpl, samples_tpl[0], 'tpl')\r\n\r\n # Sample from GAN model\r\n batch_loader = BatchLoader()\r\n from model.parametersGAN import Parameters\r\n parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size)\r\n paraphraser = Generator(parameters)\r\n paraphraser.load_state_dict(t.load('saved_models/trained_generator_gan_140k', map_location=t.device('cpu')))\r\n samples_gan, target, source_gan = sample_with_input(batch_loader, paraphraser, args,\r\n decoder_only=False,\r\n file_name='datasets/human_test.csv')\r\n gan_items = generate_items(source_gan, samples_gan[0], 'gan')\r\n\r\n # Sample from synonym model\r\n paraphraser = SynonymParaphraser()\r\n samples_synonym = paraphraser.generate_paraphrases('datasets/human_test.csv')\r\n base_items = generate_items(source_data['question1'], samples_synonym, 'base')\r\n\r\n all_items = ref_items + ori_items + tpl_items + gan_items + base_items\r\n\r\n eval_results = {'name' : 'Paraphrase Survey Full Ordered', 'items' : all_items}\r\n res = json.dumps(eval_results, ensure_ascii=False)\r\n with open('datasets/human_test_ordered.json', 'w') as f:\r\n f.write(res)\r\n\r\n random.shuffle(all_items)\r\n\r\n eval_results = {'name' : 'Paraphrase Survey Full Shuffled', 'items' : all_items}\r\n res = json.dumps(eval_results, ensure_ascii=False)\r\n with open('datasets/human_test_shuffled.json', 'w') as f:\r\n f.write(res)\r\n\r\n for i in range(10):\r\n eval_results = {'name' : f'Paraphrase Survey Part {i+1}/{10}', 'items' : all_items[i*50:((i+1)*50)-1]}\r\n res = json.dumps(eval_results, ensure_ascii=False)\r\n with open(f'datasets/human_test_p_{i}_{10}.json', 'w') as f:\r\n f.write(res)\r\n\r\ndef generate_items(original, paraphrase, model):\r\n items = []\r\n for i in range(len(original)):\r\n\r\n questions = 'Fråga 1: ' + original[i] + '?<br>Fråga 2: ' + paraphrase[i] + '?'\r\n item = {\r\n 'question' : questions,\r\n 'required' : True,\r\n 'extra' : {'model' : model},\r\n 'order': -1,\r\n 'answer_sets' : [\r\n {\r\n \"type\": \"radio\",\r\n \"name\": \"Fråga 1 är grammatiskt korrekt: \",\r\n \"choices\": [ \"0\", \"1\", \"2\", \"3\"]\r\n },\r\n {\r\n \"type\": \"radio\",\r\n \"name\": \"Fråga 2 är grammatiskt korrekt: \",\r\n \"choices\": [ \"0\", \"1\", \"2\", \"3\"]\r\n },\r\n {\r\n \"type\": \"radio\",\r\n \"name\": \"Fråga 2 är betyder samma sak som Fråga 1: \",\r\n \"choices\": [ \"0\", \"1\", \"2\", \"3\"]\r\n }]\r\n }\r\n items.append(item)\r\n return items\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"torch.device"
]
] |
denis19973/Keras-RFCN | [
"5e1fdaf197b3a93c22a82d9476a3f9a1c804e398"
] | [
"Fashion_Test.py"
] | [
"\"\"\"\nKeras RFCN\nCopyright (c) 2018\nLicensed under the MIT License (see LICENSE for details)\nWritten by [email protected]\n\"\"\"\n\n'''\nThis is a demo to Eval a RFCN model with DeepFashion Dataset\nhttp://mmlab.ie.cuhk.edu.hk/projects/DeepFashion.html\n'''\n\nfrom KerasRFCN.Model.Model import RFCN_Model\nfrom KerasRFCN.Config import Config\nimport KerasRFCN.Utils \nimport os\nfrom keras.preprocessing import image\nimport pickle\nimport numpy as np\nimport argparse\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\nclass RFCNNConfig(Config):\n \"\"\"Configuration for training on the toy shapes dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"Fashion\"\n\n # Backbone model\n # choose one from ['resnet50', 'resnet101', 'resnet50_dilated', 'resnet101_dilated']\n BACKBONE = \"resnet101\"\n \n # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n # Number of classes (including background)\n C = 1 + 46 # background + 2 tags\n NUM_CLASSES = C\n # Use small images for faster training. Set the limits of the small side\n # the large side, and that determines the image shape.\n IMAGE_MIN_DIM = 640\n IMAGE_MAX_DIM = 768\n\n # Use smaller anchors because our image and objects are small\n RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512) # anchor side in pixels\n # Use same strides on stage 4-6 if use dilated resnet of DetNet\n # Like BACKBONE_STRIDES = [4, 8, 16, 16, 16]\n BACKBONE_STRIDES = [4, 8, 16, 32, 64]\n # Reduce training ROIs per image because the images are small and have\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n TRAIN_ROIS_PER_IMAGE = 200\n\n # Use a small epoch since the data is simple\n STEPS_PER_EPOCH = 100\n\n # use small validation steps since the epoch is small\n VALIDATION_STEPS = 5\n\n RPN_NMS_THRESHOLD = 0.7\n\n DETECTION_MIN_CONFIDENCE = 0.4\n POOL_SIZE = 7\n\n\ndef Test(model, loadpath, savepath):\n assert not loadpath == savepath, \"loadpath should'n same with savepath\"\n\n model_path = model.find_last()[1]\n # Load trained weights (fill in path to trained weights here)\n \n model.load_weights(model_path, by_name=True)\n print(\"Loading weights from \", model_path)\n\n if os.path.isdir(loadpath):\n for idx, imgname in enumerate(os.listdir(loadpath)):\n if not imgname.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):\n continue\n print(imgname)\n imageoriChannel = np.array(plt.imread( os.path.join(loadpath, imgname) )) / 255.0\n img = image.img_to_array( image.load_img(os.path.join(loadpath, imgname)) )\n TestSinglePic(img, imageoriChannel, model, savepath=savepath, imgname=imgname)\n \n elif os.path.isfile(loadpath):\n if not loadpath.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):\n print(\"not image file!\")\n return\n print(loadpath)\n imageoriChannel = np.array(plt.imread( loadpath )) / 255.0\n img = image.img_to_array( image.load_img(loadpath) )\n (filename,extension) = os.path.splitext(loadpath)\n TestSinglePic(img, imageoriChannel, model, savepath=savepath, imgname=filename)\n \ndef TestSinglePic(image, image_ori, model, savepath, imgname):\n r = model.detect([image], verbose=1)[0]\n print(r)\n def get_ax(rows=1, cols=1, size=8):\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax\n\n ax = get_ax(1)\n\n assert not savepath == \"\", \"empty save path\"\n assert not imgname == \"\", \"empty image file name\"\n\n for box in r['rois']:\n y1, x1, y2, x2 = box\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=0.7, linestyle=\"dashed\",\n edgecolor=\"red\", facecolor='none')\n ax.add_patch(p)\n ax.imshow(image_ori)\n\n plt.savefig(os.path.join(savepath, imgname),bbox_inches='tight')\n plt.clf()\n\nif __name__ == '__main__':\n ROOT_DIR = os.getcwd()\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--loadpath', required=False,\n default=\"images/\",\n metavar=\"evaluate images loadpath\",\n help=\"evaluate images loadpath\")\n parser.add_argument('--savepath', required=False,\n default=\"result/\",\n metavar=\"evaluate images savepath\",\n help=\"evaluate images savepath\")\n\n config = RFCNNConfig()\n args = parser.parse_args()\n\n model = RFCN_Model(mode=\"inference\", config=config,\n model_dir=os.path.join(ROOT_DIR, \"logs\") )\n\n Test(model, args.loadpath, args.savepath)"
] | [
[
"matplotlib.pyplot.imread",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.clf"
]
] |
KuKuXia/DeepLearningMugenKnock | [
"979cf05e65e352da36453337380a418a2a2fdccb"
] | [
"Question_prepare/answers/answer_rotation.py"
] | [
"import cv2\nimport numpy as np\nfrom glob import glob\nimport matplotlib.pyplot as plt\n\nnp.random.seed(0)\n\nnum_classes = 2\nimg_height, img_width = 64, 64\n\nCLS = ['akahara', 'madara']\n\n# get train data\ndef data_load(path, hf=False, vf=False, rot=None):\n xs = []\n ts = []\n paths = []\n \n for dir_path in glob(path + '/*'):\n for path in glob(dir_path + '/*'):\n x = cv2.imread(path)\n x = cv2.resize(x, (img_width, img_height)).astype(np.float32)\n x /= 255.\n x = x[..., ::-1]\n xs.append(x)\n\n for i, cls in enumerate(CLS):\n if cls in path:\n t = i\n \n ts.append(t)\n\n paths.append(path)\n\n if hf:\n xs.append(x[:, ::-1])\n ts.append(t)\n paths.append(path)\n\n if vf:\n xs.append(x[::-1])\n ts.append(t)\n paths.append(path)\n\n if hf and vf:\n xs.append(x[::-1, ::-1])\n ts.append(t)\n paths.append(path)\n\n if rot is not None:\n angle = rot\n scale = 1\n\n # show\n a_num = 360 // rot\n w_num = np.ceil(np.sqrt(a_num))\n h_num = np.ceil(a_num / w_num)\n count = 1\n plt.subplot(h_num, w_num, count)\n plt.axis('off')\n plt.imshow(x)\n plt.title(\"angle=0\")\n \n while angle < 360:\n _h, _w, _c = x.shape\n max_side = max(_h, _w)\n tmp = np.zeros((max_side, max_side, _c))\n tx = int((max_side - _w) / 2)\n ty = int((max_side - _h) / 2)\n tmp[ty: ty+_h, tx: tx+_w] = x.copy()\n M = cv2.getRotationMatrix2D((max_side/2, max_side/2), angle, scale)\n _x = cv2.warpAffine(tmp, M, (max_side, max_side))\n _x = _x[tx:tx+_w, ty:ty+_h]\n xs.append(x)\n ts.append(t)\n paths.append(path)\n\n # show\n count += 1\n plt.subplot(h_num, w_num, count)\n plt.imshow(_x)\n plt.axis('off')\n plt.title(\"angle={}\".format(angle))\n\n angle += rot\n plt.show()\n\n\n xs = np.array(xs, dtype=np.float32)\n ts = np.array(ts, dtype=np.int)\n \n xs = xs.transpose(0,3,1,2)\n\n return xs, ts, paths\n\n\nxs, ts, paths = data_load(\"../Dataset/train/images/\", hf=True, vf=True, rot=1)\n\nmb = 3\nmbi = 0\ntrain_ind = np.arange(len(xs))\nnp.random.seed(0)\nnp.random.shuffle(train_ind)\n\nfor i in range(10):\n if mbi + mb > len(xs):\n mb_ind = train_ind[mbi:]\n np.random.shuffle(train_ind)\n mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))\n mbi = mb - (len(xs) - mbi)\n else:\n mb_ind = train_ind[mbi: mbi+mb]\n mbi += mb\n\n print(mb_ind)\n"
] | [
[
"numpy.sqrt",
"numpy.random.shuffle",
"numpy.ceil",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.random.seed",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"numpy.array"
]
] |
mariuslindegaard/6.867_MARL_project | [
"572b88b4d491db8a1673535868f4bf9aff58f73d"
] | [
"src/modules/agents/noisy_agents.py"
] | [
"import torch.nn as nn\nimport torch.nn.functional as F\nfrom utils.noisy_liner import NoisyLinear\nfrom torch.nn import LayerNorm\n\nclass NoisyRNNAgent(nn.Module):\n def __init__(self, input_shape, args):\n super(NoisyRNNAgent, self).__init__()\n self.args = args\n\n self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)\n self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)\n self.fc2 = NoisyLinear(args.rnn_hidden_dim, args.n_actions, True, args.device)\n\n if getattr(args, \"use_layer_norm\", False):\n self.layer_norm = LayerNorm(args.rnn_hidden_dim)\n\n def init_hidden(self):\n # make hidden states on same device as model\n return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()\n\n def forward(self, inputs, hidden_state):\n b, a, e = inputs.size()\n \n inputs = inputs.view(-1, e)\n x = F.relu(self.fc1(inputs), inplace=True)\n h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)\n hh = self.rnn(x, h_in)\n\n if getattr(self.args, \"use_layer_norm\", False):\n q = self.fc2(self.layer_norm(hh))\n else:\n q = self.fc2(hh)\n\n return q.view(b, a, -1), hh.view(b, a, -1)"
] | [
[
"torch.nn.GRUCell",
"torch.nn.Linear",
"torch.nn.LayerNorm"
]
] |
aasensio/bayesDI | [
"4ddad57d89c3512b4c4ee5684ddc5608060ebdec"
] | [
"modules/flow.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom nflows import transforms, distributions, flows, utils\nimport nflows.nn.nets as nn_\nimport matplotlib.pyplot as pl\nfrom modules import resnet\n\n# https://github.com/stephengreen/lfi-gw/blob/master/lfigw/nde_flows.py\n\ndef create_linear_transform(input_dim):\n \"\"\"Create the composite linear transform PLU.\n Arguments:\n input_dim {int} -- dimension of the space\n Returns:\n Transform -- nde.Transform object\n \"\"\"\n \n permutation = transforms.RandomPermutation(features = input_dim)\n linear = transforms.LULinear(input_dim, identity_init=True)\n\n return transforms.CompositeTransform([permutation, linear])\n\ndef create_base_transform(i, \n input_dim, \n context_dim,\n hidden_dim=512,\n num_transform_blocks=2,\n activation='relu',\n dropout_probability=0.0,\n batch_norm=False,\n num_bins=8,\n tail_bound=1.,\n apply_unconditional_transform=False,\n base_transform_type='rq-coupling',\n transform_net='conv'):\n\n \"\"\"Build a base NSF transform of x, conditioned on y.\n This uses the PiecewiseRationalQuadraticCoupling transform or\n the MaskedPiecewiseRationalQuadraticAutoregressiveTransform, as described\n in the Neural Spline Flow paper (https://arxiv.org/abs/1906.04032).\n Code is adapted from the uci.py example from\n https://github.com/bayesiains/nsf.\n A coupling flow fixes half the components of x, and applies a transform\n to the remaining components, conditioned on the fixed components. This is\n a restricted form of an autoregressive transform, with a single split into\n fixed/transformed components.\n The transform here is a neural spline flow, where the flow is parametrized\n by a residual neural network that depends on x_fixed and y. The residual\n network consists of a sequence of two-layer fully-connected blocks.\n Arguments:\n i {int} -- index of transform in sequence\n param_dim {int} -- dimensionality of x\n Keyword Arguments:\n context_dim {int} -- dimensionality of y (default: {None})\n hidden_dim {int} -- number of hidden units per layer (default: {512})\n num_transform_blocks {int} -- number of transform blocks comprising the\n transform (default: {2})\n activation {str} -- activation function (default: {'relu'})\n dropout_probability {float} -- probability of dropping out a unit\n (default: {0.0})\n batch_norm {bool} -- whether to use batch normalization\n (default: {False})\n num_bins {int} -- number of bins for the spline (default: {8})\n tail_bound {[type]} -- [description] (default: {1.})\n apply_unconditional_transform {bool} -- whether to apply an\n unconditional transform to\n fixed components\n (default: {False})\n base_transform_type {str} -- type of base transform\n ([rq-coupling], rq-autoregressive)\n Returns:\n Transform -- the NSF transform\n \"\"\"\n\n if activation == 'elu':\n activation_fn = F.elu\n elif activation == 'relu':\n activation_fn = F.relu\n elif activation == 'leaky_relu':\n activation_fn = F.leaky_relu\n else:\n activation_fn = F.relu # Default\n print('Invalid activation function specified. Using ReLU.')\n\n if base_transform_type == 'rq-coupling':\n\n mask = utils.create_alternating_binary_mask(input_dim, even=(i % 2 == 0))\n\n if (transform_net == 'fc'):\n transform_net = lambda in_features, out_features: nn_.ResidualNet(\n in_features = in_features,\n out_features = out_features,\n hidden_features = hidden_dim,\n context_features = context_dim,\n num_blocks = num_transform_blocks,\n activation = activation_fn,\n dropout_probability = dropout_probability,\n use_batch_norm = batch_norm)\n\n if (transform_net == 'conv'):\n transform_net = lambda in_features, out_features: resnet.ConvResidualNet1d(\n in_channels = 1,\n out_channels = out_features // in_features,\n hidden_channels = hidden_dim,\n context_channels = context_dim,\n num_blocks = num_transform_blocks,\n activation = activation_fn,\n dropout_probability = dropout_probability,\n use_batch_norm = batch_norm)\n\n transform = transforms.PiecewiseRationalQuadraticCouplingTransform(\n mask = mask,\n transform_net_create_fn = transform_net,\n num_bins = num_bins,\n tails = 'linear',\n tail_bound = tail_bound,\n apply_unconditional_transform = apply_unconditional_transform\n )\n\n elif base_transform_type == 'rq-autoregressive':\n transform = transforms.MaskedPiecewiseRationalQuadraticAutoregressiveTransform(\n features=input_dim,\n hidden_features=hidden_dim,\n context_features=context_dim,\n num_bins=num_bins,\n tails='linear',\n tail_bound=tail_bound,\n num_blocks=num_transform_blocks,\n use_residual_blocks=True,\n random_mask=False,\n activation=activation_fn,\n dropout_probability=dropout_probability,\n use_batch_norm=batch_norm\n )\n else:\n raise ValueError\n\n return transform\n\ndef create_transform(input_dim, context_dim, num_flow_steps, base_transform_kwargs):\n \"\"\"Build a sequence of NSF transforms, which maps parameters x into the\n base distribution u (noise). Transforms are conditioned on strain data y.\n Note that the forward map is f^{-1}(x, y).\n Each step in the sequence consists of\n * A linear transform of x, which in particular permutes components\n * A NSF transform of x, conditioned on y.\n There is one final linear transform at the end.\n This function was adapted from the uci.py example in\n https://github.com/bayesiains/nsf\n Arguments:\n num_flow_steps {int} -- number of transforms in sequence\n param_dim {int} -- dimensionality of x\n context_dim {int} -- dimensionality of y\n base_transform_kwargs {dict} -- hyperparameters for NSF step\n Returns:\n Transform -- the constructed transform\n \"\"\"\n\n transform = transforms.CompositeTransform([\n transforms.CompositeTransform([\n create_linear_transform(input_dim),\n create_base_transform(i, input_dim, context_dim=context_dim, **base_transform_kwargs)\n ]) for i in range(num_flow_steps)] + [create_linear_transform(input_dim)])\n\n return transform\n\ndef fun(input_dim):\n \n return fun\n\ndef create_nsf_model(input_dim, context_dim, num_flow_steps, base_transform_kwargs, learn_normal=False):\n\n \"\"\"Build NSF (neural spline flow) model. This uses the nsf module\n available at https://github.com/bayesiains/nsf.\n This models the posterior distribution p(x|y).\n The model consists of\n * a base distribution (StandardNormal, dim(x))\n * a sequence of transforms, each conditioned on y\n Arguments:\n input_dim {int} -- dimensionality of x\n context_dim {int} -- dimensionality of y\n num_flow_steps {int} -- number of sequential transforms\n base_transform_kwargs {dict} -- hyperparameters for transform steps\n Returns:\n Flow -- the model\n \"\"\"\n \n # Define a base distribution.\n if (learn_normal):\n base_distribution = distributions.DiagonalNormal(shape=(input_dim,))\n else:\n base_distribution = distributions.StandardNormal(shape=(input_dim,))\n # if (sigma_base != 1):\n # def fun2(x): \n # n_batch, n = x.shape\n # return torch.cat([torch.zeros((n_batch, input_dim), device=x.device), sigma_base * torch.ones((n_batch, input_dim), device=x.device)], dim=1)\n # base_distribution = distributions.ConditionalDiagonalNormal(shape=(input_dim,), context_encoder=fun2)\n \n # Define the neural spline transform\n transform = create_transform(input_dim, context_dim, num_flow_steps, base_transform_kwargs)\n\n # Create the flow\n flow = flows.Flow(transform=transform, distribution=base_distribution)\n\n # Add the hyperparameters for reconstructing the model after loading\n flow.model_hyperparams = {\n 'input_dim': input_dim,\n 'num_flow_steps': num_flow_steps,\n 'context_dim': context_dim,\n 'base_transform_kwargs': base_transform_kwargs\n }\n \n return flow\n\ndef obtain_samples(flow, y, nsamples, device=None, batch_size=512):\n \"\"\"Draw samples from the posterior.\n Arguments:\n flow {Flow} -- NSF model\n y {array} -- strain data\n nsamples {int} -- number of samples desired\n Keyword Arguments:\n device {torch.device} -- model device (CPU or GPU) (default: {None})\n batch_size {int} -- batch size for sampling (default: {512})\n Returns:\n Tensor -- samples\n \"\"\"\n\n with torch.no_grad():\n flow.eval()\n\n y = torch.from_numpy(y).unsqueeze(0).to(device)\n\n num_batches = nsamples // batch_size\n num_leftover = nsamples % batch_size\n\n samples = [flow.sample(batch_size, y) for _ in range(num_batches)]\n if num_leftover > 0:\n samples.append(flow.sample(num_leftover, y))\n\n # The batching in the nsf package seems screwed up, so we had to do it\n # ourselves, as above. They are concatenating on the wrong axis.\n\n # samples = flow.sample(nsamples, context=y, batch_size=batch_size)\n\n return torch.cat(samples, dim=1)[0]\n \n\nif (__name__ == '__main__'):\n \n base_transform_kwargs = {\n 'hidden_dim': 50,\n 'num_transform_blocks': 2,\n 'activation': 'relu',\n 'dropout_probability': 0.0,\n 'batch_norm': False,\n 'num_bins': 10,\n 'tail_bound': 3.0,\n 'apply_unconditional_transform': False\n }\n model = create_nsf_model(20, 1, 3, base_transform_kwargs)\n\n # context = np.array([[2.]])\n # context = torch.tensor(context.astype('float32'))\n\n # samples = model.sample(5000, context).detach().cpu().numpy()\n # pl.plot(samples[0,:,0], samples[0,:,1], '.')\n # pl.show()"
] | [
[
"torch.no_grad",
"torch.cat",
"torch.from_numpy"
]
] |
zhaipro/MySceneDetect | [
"fbbe085b05e916d52253ffddd91848c3e85b2fe9"
] | [
"scenedetect/main.py"
] | [
"import sys\nimport time\n\nimport cv2\nimport numpy as np\n\n\ndef scenedetect(cap, threshold=30, min_scene_len=15):\n w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n downscale_factor = int(w / 200)\n last_hsv = None\n first = 0\n curr = 0\n\n while True:\n ret, im = cap.read()\n if not ret:\n break\n\n curr_hsv = im[::downscale_factor, ::downscale_factor]\n curr_hsv = cv2.cvtColor(curr_hsv, cv2.COLOR_BGR2HSV)\n curr_hsv = curr_hsv.astype('int32')\n if last_hsv is not None:\n delta_hsv = np.mean(np.abs(curr_hsv - last_hsv))\n if delta_hsv >= threshold and curr - first >= min_scene_len:\n yield first, curr, delta_hsv\n first = curr\n\n last_hsv = curr_hsv\n curr += 1\n yield first, curr, 0\n\n\nfn = 'video.rmvb'\ncap = cv2.VideoCapture(fn)\nstart = time.time()\nfor first, last, delta_hsv in scenedetect(cap):\n print(first, last, delta_hsv)\nprint(time.time() - start)\ncap.release()\n"
] | [
[
"numpy.abs"
]
] |
lightyang/tensorflow | [
"14c58e1d380b2001ffdf7ef782d44ad1a21f763c"
] | [
"tensorflow/python/keras/layers/preprocessing/categorical.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras categorical preprocessing layers.\"\"\"\n# pylint: disable=g-classes-have-attributes\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import string_ops\n\n\nclass CategoryLookup(Layer):\n \"\"\"Category lookup layer.\n\n This layer looks up tokens (int or string) in a vocabulary table,\n and return their indices (int). It converts a sequence of int or string to a\n sequence of int.\n\n Attributes:\n max_tokens: The maximum size of the vocabulary for this layer. If None,\n there is no cap on the size of the vocabulary. This is used when `adapt`\n is called.\n num_oov_tokens: Non-negative integer. The number of out-of-vocab tokens. All\n out-of-vocab inputs will be assigned IDs in the range of [0,\n num_oov_tokens) based on a hash.\n vocabulary: The vocabulary to lookup the input. If it is a file, it\n represents the source vocab file; If it is a list/tuple, it represents the\n source vocab list. If it is None, the vocabulary can later be set.\n name: Name to give to the layer.\n **kwargs: Keyword arguments to construct a layer.\n Input shape: A string or int tensor of shape `[batch_size, d1, ..., dm]`\n Output shape: An int tensor of shape `[batch_size, d1, .., dm]`\n Example: Consider a batch of a single input sample, `[[\"a\", \"c\", \"d\", \"a\",\n \"x\"]]`. Let's say the vocabulary is `[\"a\", \"b\", \"c\", \"d\"]` and a single OOV\n token is used (`num_oov_tokens=1`). Then the corresponding output is `[[1,\n 3, 4, 1, 0]]`. 0 stands for an OOV token.\n \"\"\"\n\n def __init__(self,\n max_tokens=None,\n num_oov_tokens=1,\n vocabulary=None,\n name=None,\n **kwargs):\n if max_tokens is not None:\n raise ValueError('`max_tokens` and `adapt` is not supported yet.')\n if vocabulary is None:\n raise ValueError('for now, you must pass a `vocabulary` argument')\n self.max_tokens = max_tokens\n self.num_oov_tokens = num_oov_tokens\n self.vocabulary = vocabulary\n super(CategoryLookup, self).__init__(name=name, **kwargs)\n\n def __call__(self, inputs, *args, **kwargs):\n if isinstance(inputs, (np.ndarray, float, int)):\n inputs = ops.convert_to_tensor(inputs)\n self._input_dtype = inputs.dtype\n return super(CategoryLookup, self).__call__(inputs, *args, **kwargs)\n\n def build(self, input_shape):\n # categorical with vocabulary list.\n if isinstance(self.vocabulary, (tuple, list, np.ndarray)):\n self.table = lookup_ops.index_table_from_tensor(\n vocabulary_list=self.vocabulary,\n num_oov_buckets=self.num_oov_tokens,\n dtype=self._input_dtype)\n # categorical with vocabulary file.\n elif self.vocabulary:\n self.table = lookup_ops.index_table_from_file(\n vocabulary_file=self.vocabulary,\n num_oov_buckets=self.num_oov_tokens,\n key_dtype=self._input_dtype)\n\n def call(self, inputs):\n return self.table.lookup(inputs)\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def compute_output_signature(self, input_spec):\n output_shape = self.compute_output_shape(input_spec.shape.as_list())\n output_dtype = dtypes.int64\n if isinstance(input_spec, sparse_tensor.SparseTensorSpec):\n return sparse_tensor.SparseTensorSpec(\n shape=output_shape, dtype=output_dtype)\n else:\n return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)\n\n def get_config(self):\n config = {\n 'max_tokens': self.max_tokens,\n 'num_oov_tokens': self.num_oov_tokens,\n 'vocabulary': self.vocabulary\n }\n base_config = super(CategoryLookup, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass CategoryCrossing(Layer):\n \"\"\"Category crossing layer.\n\n This layer transforms multiple categorical inputs to categorical outputs\n by Cartesian product, and hash the output if necessary. Without hashing\n (`num_bins=None`) the output dtype is string, with hashing the output dtype\n is int64.\n\n Arguments:\n depth: depth of input crossing. By default None, all inputs are crossed into\n one output. It can also be an int or tuple/list of ints. Passing an\n integer will create combinations of crossed outputs with depth up to that\n integer, i.e., [1, 2, ..., `depth`), and passing a tuple of integers will\n create crossed outputs with depth for the specified values in the tuple,\n i.e., `depth`=(N1, N2) will create all possible crossed outputs with depth\n equal to N1 or N2. Passing `None` means a single crossed output with all\n inputs. For example, with inputs `a`, `b` and `c`, `depth=2` means the\n output will be [a;b;c;cross(a, b);cross(bc);cross(ca)].\n num_bins: Number of hash bins. By default None, no hashing is performed.\n name: Name to give to the layer.\n **kwargs: Keyword arguments to construct a layer.\n\n Input shape: a list of string or int tensors or sparse tensors of shape\n `[batch_size, d1, ..., dm]`\n\n Output shape: a single string or int tensor or sparse tensor of shape\n `[batch_size, d1, ..., dm]`\n\n Example: (`depth`=None)\n If the layer receives three inputs:\n `a=[[1], [4]]`, `b=[[2], [5]]`, `c=[[3], [6]]`\n the output will be a string tensor if not hashed:\n `[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`\n the output will be an int64 tensor if hashed:\n `[[hash(b'1_X_2_X_3')], [hash(b'4_X_5_X_6')]]`\n\n Example: (`depth` is an integer)\n With the same input above, and if `depth`=2,\n the output will be a list of 6 string tensors if not hashed:\n `[[b'1'], [b'4']]`\n `[[b'2'], [b'5']]`\n `[[b'3'], [b'6']]`\n `[[b'1_X_2'], [b'4_X_5']]`,\n `[[b'2_X_3'], [b'5_X_6']]`,\n `[[b'3_X_1'], [b'6_X_4']]`\n the output will be a list of 6 int64 tensors if hashed:\n `[[hash(b'1')], [hash(b'4')]]`\n `[[hash(b'2')], [hash(b'5')]]`\n `[[hash(b'3')], [hash(b'6')]]`\n `[[hash(b'1_X_2')], [hash(b'4_X_5')]]`,\n `[[hash(b'2_X_3')], [hash(b'5_X_6')]]`,\n `[[hash(b'3_X_1')], [hash(b'6_X_4')]]`\n\n Example: (`depth` is a tuple/list of integers)\n With the same input above, and if `depth`=(2, 3)\n the output will be a list of 4 string tensors if not hashed:\n `[[b'1_X_2'], [b'4_X_5']]`,\n `[[b'2_X_3'], [b'5_X_6']]`,\n `[[b'3_X_1'], [b'6_X_4']]`,\n `[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`\n the output will be a list of 4 int64 tensors if hashed:\n `[[hash(b'1_X_2')], [hash(b'4_X_5')]]`,\n `[[hash(b'2_X_3')], [hash(b'5_X_6')]]`,\n `[[hash(b'3_X_1')], [hash(b'6_X_4')]]`,\n `[[hash(b'1_X_2_X_3')], [hash(b'4_X_5_X_6')]]`\n \"\"\"\n\n def __init__(self, depth=None, num_bins=None, name=None, **kwargs):\n # TODO(tanzheny): Add support for depth.\n # TODO(tanzheny): Consider making seperator configurable.\n if depth is not None:\n raise NotImplementedError('`depth` is not supported yet.')\n self.num_bins = num_bins\n self.depth = depth\n super(CategoryCrossing, self).__init__(name=name, **kwargs)\n\n def call(self, inputs):\n sparse_output = False\n if any([isinstance(inp, sparse_tensor.SparseTensor) for inp in inputs]):\n sparse_output = True\n if self.num_bins is not None:\n output = sparse_ops.sparse_cross_hashed(\n inputs, num_buckets=self.num_bins)\n else:\n output = sparse_ops.sparse_cross(inputs)\n if not sparse_output:\n output = sparse_ops.sparse_tensor_to_dense(output)\n return output\n\n def compute_output_shape(self, input_shape):\n if not isinstance(input_shape, (tuple, list)):\n raise ValueError('A `CategoryCrossing` layer should be called '\n 'on a list of inputs.')\n input_shapes = input_shape\n batch_size = None\n for inp_shape in input_shapes:\n inp_tensor_shape = tensor_shape.TensorShape(inp_shape).as_list()\n if len(inp_tensor_shape) != 2:\n raise ValueError('Inputs must be rank 2, get {}'.format(input_shapes))\n if batch_size is None:\n batch_size = inp_tensor_shape[0]\n # The second dimension is dynamic based on inputs.\n output_shape = [batch_size, None]\n return tensor_shape.TensorShape(output_shape)\n\n def compute_output_signature(self, input_spec):\n input_shapes = [x.shape for x in input_spec]\n output_shape = self.compute_output_shape(input_shapes)\n output_dtype = dtypes.int64 if self.num_bins else dtypes.string\n return sparse_tensor.SparseTensorSpec(\n shape=output_shape, dtype=output_dtype)\n\n def get_config(self):\n config = {'depth': self.depth, 'num_bins': self.num_bins}\n base_config = super(CategoryCrossing, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Hashing(Layer):\n \"\"\"Implements categorical feature hashing, also known as \"hashing trick\".\n\n This layer transforms categorical inputs to hashed output. It converts a\n sequence of int or string to a sequence of int. The stable hash function uses\n tensorflow::ops::Fingerprint to produce universal output that is consistent\n across platforms.\n\n Usage:\n ```python\n layer = Hashing(num_bins=3)\n inp = np.asarray([['A', 'B'], ['C', 'A']])\n layer(inputs)\n [[0, 0], [1, 0]]\n ```\n\n Arguments:\n num_bins: Number of hash bins.\n name: Name to give to the layer.\n **kwargs: Keyword arguments to construct a layer.\n\n Input shape: A string, int32 or int64 tensor of shape\n `[batch_size, d1, ..., dm]`\n\n Output shape: An int64 tensor of shape `[batch_size, d1, ..., dm]`\n\n Example:\n If the input is a 5 by 1 string tensor '[['A'], ['B'], ['C'], ['D'], ['E']]'\n with `num_bins=2`, then output is 5 by 1 integer tensor\n [[hash('A')], [hash('B')], [hash('C')], [hash('D')], [hash('E')]].\n \"\"\"\n\n def __init__(self, num_bins, name=None, **kwargs):\n # TODO(tanzheny): consider adding strong hash variant.\n self._num_bins = num_bins\n super(Hashing, self).__init__(name=name, **kwargs)\n\n def call(self, inputs):\n # TODO(tanzheny): Add ragged support.\n # TODO(tanzheny): Add int support.\n if isinstance(inputs, sparse_tensor.SparseTensor):\n sparse_values = inputs.values\n sparse_hashed_values = string_ops.string_to_hash_bucket_fast(\n sparse_values, self._num_bins, name='lookup')\n return sparse_tensor.SparseTensor(\n indices=inputs.indices,\n values=sparse_hashed_values,\n dense_shape=inputs.dense_shape)\n # string_to_hash_bucket_fast uses FarmHash as hash function.\n return string_ops.string_to_hash_bucket_fast(\n inputs, self._num_bins, name='lookup')\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def compute_output_signature(self, input_spec):\n output_shape = self.compute_output_shape(input_spec.shape.as_list())\n output_dtype = dtypes.int64\n if isinstance(input_spec, sparse_tensor.SparseTensorSpec):\n return sparse_tensor.SparseTensorSpec(\n shape=output_shape, dtype=output_dtype)\n else:\n return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)\n\n def get_config(self):\n config = {'num_bins': self._num_bins}\n base_config = super(Hashing, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n"
] | [
[
"tensorflow.python.ops.lookup_ops.index_table_from_file",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.ops.sparse_ops.sparse_cross_hashed",
"tensorflow.python.framework.sparse_tensor.SparseTensorSpec",
"tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.lookup_ops.index_table_from_tensor",
"tensorflow.python.ops.sparse_ops.sparse_cross",
"tensorflow.python.ops.string_ops.string_to_hash_bucket_fast",
"tensorflow.python.framework.ops.convert_to_tensor"
]
] |
stillmatic/pandas | [
"da067b2fe4cdc43eac5349e0648cfbbe4b96dbbd"
] | [
"pandas/tests/categorical/test_algos.py"
] | [
"import pytest\nimport numpy as np\n\nimport pandas as pd\nimport pandas.util.testing as tm\n\n\[email protected]('ordered', [True, False])\[email protected]('categories', [\n ['b', 'a', 'c'],\n ['a', 'b', 'c', 'd'],\n])\ndef test_factorize(categories, ordered):\n cat = pd.Categorical(['b', 'b', 'a', 'c', None],\n categories=categories,\n ordered=ordered)\n labels, uniques = pd.factorize(cat)\n expected_labels = np.array([0, 0, 1, 2, -1], dtype=np.intp)\n expected_uniques = pd.Categorical(['b', 'a', 'c'],\n categories=categories,\n ordered=ordered)\n\n tm.assert_numpy_array_equal(labels, expected_labels)\n tm.assert_categorical_equal(uniques, expected_uniques)\n\n\ndef test_factorized_sort():\n cat = pd.Categorical(['b', 'b', None, 'a'])\n labels, uniques = pd.factorize(cat, sort=True)\n expected_labels = np.array([1, 1, -1, 0], dtype=np.intp)\n expected_uniques = pd.Categorical(['a', 'b'])\n\n tm.assert_numpy_array_equal(labels, expected_labels)\n tm.assert_categorical_equal(uniques, expected_uniques)\n\n\ndef test_factorized_sort_ordered():\n cat = pd.Categorical(['b', 'b', None, 'a'],\n categories=['c', 'b', 'a'],\n ordered=True)\n\n labels, uniques = pd.factorize(cat, sort=True)\n expected_labels = np.array([0, 0, -1, 1], dtype=np.intp)\n expected_uniques = pd.Categorical(['b', 'a'],\n categories=['c', 'b', 'a'],\n ordered=True)\n\n tm.assert_numpy_array_equal(labels, expected_labels)\n tm.assert_categorical_equal(uniques, expected_uniques)\n"
] | [
[
"pandas.util.testing.assert_numpy_array_equal",
"pandas.Categorical",
"numpy.array",
"pandas.util.testing.assert_categorical_equal",
"pandas.factorize"
]
] |
supersamdam/ConversationalAI | [
"bb6013c33f6332aee57abbae310577c056c6fdc1"
] | [
"Prototype.py"
] | [
"import numpy as np\nimport pandas as pd\nimport re\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nimport pickle\nimport joblib\n\n# Class starts from here\nclass CONVAI:\n #this is the empty vocabulary (vectorizer)\n cv = CountVectorizer(max_features = 20000) #change in no of features will result in how many different/unique words it will have\n classifier = GaussianNB() #this is the main algorith which works on probablistic approach\n no = 1000 #change this to change the number of data in terms of line you want to fed in model\n \n def init(self): #basic function \n dataset = pd.read_csv('data.csv') #dataset loaded\n no=self.no\n corpus = [] #corpus will have cleaned data\n for i in range(0, no):\n review = re.sub('[^a-zA-Z]', ' ', dataset['0'][i])\n review = review.lower()\n review = review.split()\n ps = PorterStemmer()\n all_stopwords = stopwords.words('english')\n all_stopwords.remove('not')\n review = [ps.stem(word) for word in review if not word in set(all_stopwords)]\n review = ' '.join(review)\n corpus.append(review)\n \n print(corpus)\n \n \n X = self.cv.fit_transform(corpus).toarray() #divided dataset into 2 parts this will be like questions\n y = dataset.iloc[0:no, 2].values #this will be like answer to the abouve question\n # print(X)\n \n\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0) #splitted dataset into train and test\n \n \n \n sav = self.classifier.fit(X_train, y_train) \n \n y_pred = self.classifier.predict(X_test) #all the action is done here\n print(np.concatenate((y_pred.reshape(len(y_pred),1,), y_test.reshape(len(y_test),1)),1),) #printing the current actions\n \n\n cm = confusion_matrix(y_test, y_pred) \n print(cm)\n a = accuracy_score(y_test, y_pred)\n print(a)\n joblib.dump(self.cv, \"vectorizer1.pkl\") #vocabulary is saved here\n joblib.dump(self.classifier, \"classifier1.pkl\") #algorithm is saved here\n\n\n # with open('model.pkl', 'wb') as fout:\n # pickle.dump((cv, classifier), fout)\n\n # filename = 'finalized_model.sav'\n # pickle.dump(cv, open(filename, 'wb'))\n # filename = 'finalized.sav' \n # pickle.dump(cv, open(filename, 'wb'))\n\n\n # saved_model = pickle.dumps(classifier)\n\n \n def Test(self,query): #this is the function for implementation of new inputs\n vectorizer = joblib.load(\"vectorizer.pkl\") #vocabulary is loaded\n classifier = joblib.load(\"classifier.pkl\") #algoritm is loaded\n\n # with open('model.pkl', 'rb') as fin:\n # cv, classifier = pickle.load(fin)\n \n #This is known as preprocessing the data\n cv = self.cv\n classifier = self.classifier\n #query = input()\n new_review = query\n new_review = re.sub('[^a-zA-Z]', ' ', new_review)\n new_review = new_review.lower() \n new_review = new_review.split()\n ps = PorterStemmer()\n all_stopwords = stopwords.words('english')\n all_stopwords.remove('not')\n new_review = [ps.stem(word) for word in new_review if not word in set(all_stopwords)]\n new_review = ' '.join(new_review)\n new_corpus = [new_review]\n new_X_test = cv.transform(new_corpus).toarray() \n new_y_pred = classifier.predict(new_X_test)\n print(new_y_pred) #output from the algorithm is printed\n return new_y_pred #output from the algorithm is returned\n \nif __name__ == \"__main__\": #main class\n a=CONVAI() #created instance(object) of the class CONVAI\n a.init() #called the function which will start training\n a.Test(\"hello\") #enter different type of input here to get new output results \n\n"
] | [
[
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.read_csv",
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.train_test_split",
"sklearn.naive_bayes.GaussianNB"
]
] |
XiaotingChen/tfmodisco | [
"17cbafe806942304a02e8134fe10224bdff38b0c"
] | [
"modisco/value_provider.py"
] | [
"from __future__ import division, print_function, absolute_import\nimport numpy as np\nimport scipy.stats\n\n\nclass AbstractValueProvider(object):\n\n def __call__(self, seqlet):\n raise NotImplementedError()\n\n @classmethod\n def from_hdf5(cls, grp):\n the_class = eval(grp.attrs[\"class\"])\n return the_class.from_hdf5(grp) \n\n\nclass CoorScoreValueProvider(AbstractValueProvider):\n\n def __call__(self, seqlet):\n return seqlet.coor.score \n\n def save_hdf5(self, grp):\n grp.attrs[\"class\"] = type(self).__name__\n\n @classmethod\n def from_hdf5(cls, grp):\n return cls()\n\n\nclass TransformCentralWindowValueProvider(AbstractValueProvider):\n\n def __init__(self, track_name, central_window, val_transformer):\n if isinstance(track_name, str):\n self.track_name = track_name\n else: \n self.track_name = track_name.decode('utf-8')\n self.central_window = central_window\n self.val_transformer = val_transformer\n\n def __call__(self, seqlet):\n val = self.get_val(seqlet=seqlet)\n return self.val_transformer(val=val)\n\n def get_val(self, seqlet):\n flank_to_ignore = int(0.5*(len(seqlet)-self.central_window))\n track_values = seqlet[self.track_name]\\\n .fwd[flank_to_ignore:(len(seqlet)-flank_to_ignore)]\n return np.sum(track_values)\n\n def save_hdf5(self, grp):\n grp.attrs[\"class\"] = type(self).__name__\n grp.attrs[\"track_name\"] = self.track_name\n grp.attrs[\"central_window\"] = self.central_window\n self.val_transformer.save_hdf5(grp.create_group(\"val_transformer\")) \n\n @classmethod\n def from_hdf5(cls, grp):\n if isinstance(grp.attrs[\"track_name\"], str):\n track_name = grp.attrs[\"track_name\"]\n else:\n track_name = grp.attrs[\"track_name\"].decode('utf-8')\n central_window = grp.attrs[\"central_window\"] \n val_transformer = AbstractValTransformer.from_hdf5(\n grp[\"val_transformer\"]) \n return cls(track_name=track_name,\n central_window=central_window,\n val_transformer=val_transformer)\n\n\nclass AbstractValTransformer(object):\n\n def __call__(self, val):\n raise NotImplementedError()\n\n @classmethod\n def from_hdf5(cls, grp):\n the_class = eval(grp.attrs[\"class\"])\n return the_class.from_hdf5(grp) \n\n\nclass AbsPercentileValTransformer(AbstractValTransformer):\n\n def __init__(self, distribution):\n self.distribution = np.array(sorted(np.abs(distribution)))\n\n @classmethod\n def from_hdf5(cls, grp):\n distribution = np.array(grp[\"distribution\"][:])\n return cls(distribution=distribution) \n\n def save_hdf5(self, grp):\n grp.attrs[\"class\"] = type(self).__name__\n grp.create_dataset(\"distribution\", data=self.distribution)\n\n def __call__(self, val):\n return np.sign(val)*np.searchsorted(\n a=self.distribution,\n v=abs(val))/float(len(self.distribution))\n\n\nclass SignedPercentileValTransformer(AbstractValTransformer):\n\n def __init__(self, distribution):\n self.distribution = np.array(distribution)\n self.pos_dist = np.array(sorted(\n [x for x in self.distribution if x > 0]))\n self.abs_neg_dist = np.array(sorted(\n [abs(x) for x in self.distribution if x < 0]))\n\n @classmethod\n def from_hdf5(cls, grp):\n distribution = np.array(grp[\"distribution\"][:])\n return cls(distribution=distribution) \n\n def save_hdf5(self, grp):\n grp.attrs[\"class\"] = type(self).__name__\n grp.create_dataset(\"distribution\", data=self.distribution)\n\n def __call__(self, val):\n if (val == 0):\n return 0\n elif (val > 0):\n #add 1E-7 for complicated numerical stability issues \n # basically need robustness when dealing with ties\n return np.searchsorted(\n a=self.pos_dist, v=(val+1E-7))/float(len(self.pos_dist))\n else:\n #add 1E-7 for complicated numerical stability issues \n # basically need robustness when dealing with ties\n return np.searchsorted(\n a=self.abs_neg_dist, v=(abs(val)+1E-7))/float(\n len(self.abs_neg_dist))\n"
] | [
[
"numpy.sum",
"numpy.sign",
"numpy.searchsorted",
"numpy.abs",
"numpy.array"
]
] |
bgalbraith/macarico | [
"448e3e7f088dde0f4eb016fbdee857221b9523fb"
] | [
"macarico/actors/bow.py"
] | [
"from __future__ import division, generators, print_function\n\nimport torch\nimport torch.nn as nn\n\nimport macarico\nimport macarico.util as util\nfrom macarico.util import Var, Varng\n\nclass BOWActor(macarico.Actor):\n def __init__(self, attention, n_actions, act_history_length=1, obs_history_length=0):\n self.att_dim = sum((att.dim for att in attention))\n super().__init__(n_actions,\n self.att_dim + \n act_history_length * n_actions + \\\n obs_history_length * self.att_dim,\n attention)\n self.act_history_length = act_history_length\n self.obs_history_length = obs_history_length\n self._reset()\n\n def _forward(self, state, x):\n feats = x[:]\n if self.act_history_length > 0:\n f = util.zeros(self, 1, self.act_history_length * self.n_actions)\n for i in range(min(self.act_history_length, len(state._trajectory))):\n a = state._trajectory[-i]\n f[0, i * self.n_actions + a] = 1\n feats.append(Varng(f))\n if self.obs_history_length > 0:\n for i in range(self.obs_history_length):\n feats.append(Varng(self.obs_history[(self.obs_history_pos+i) % self.obs_history_length]))\n # update history\n self.obs_history[self.obs_history_pos] = torch.cat(x, dim=1).data\n self.obs_history_pos = (self.obs_history_pos + 1) % self.obs_history_length\n return torch.cat(feats, dim=1)\n\n def _reset(self):\n self.obs_history = []\n for _ in range(self.obs_history_length):\n self.obs_history.append(util.zeros(self, 1, self.att_dim))\n self.obs_history_pos = 0\n \n"
] | [
[
"torch.cat"
]
] |
FrancisLiang/models-1 | [
"e14d5bc1ab36d0dd11977f27cff54605bf99c945"
] | [
"PaddleNLP/emotion_detection/run_classifier.py"
] | [
"\"\"\"\nEmotion Detection Task\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport argparse\nimport multiprocessing\nimport sys\nsys.path.append(\"../\")\n\nimport paddle\nimport paddle.fluid as fluid\nimport numpy as np\n\nfrom models.classification import nets\nimport reader\nimport config\nimport utils\n\nparser = argparse.ArgumentParser(__doc__)\nmodel_g = utils.ArgumentGroup(parser, \"model\", \"model configuration and paths.\")\nmodel_g.add_arg(\"config_path\", str, None, \"Path to the json file for EmoTect model config.\")\nmodel_g.add_arg(\"init_checkpoint\", str, None, \"Init checkpoint to resume training from.\")\nmodel_g.add_arg(\"output_dir\", str, None, \"Directory path to save checkpoints\")\n\ntrain_g = utils.ArgumentGroup(parser, \"training\", \"training options.\")\ntrain_g.add_arg(\"epoch\", int, 10, \"Number of epoches for training.\")\ntrain_g.add_arg(\"save_steps\", int, 10000, \"The steps interval to save checkpoints.\")\ntrain_g.add_arg(\"validation_steps\", int, 1000, \"The steps interval to evaluate model performance.\")\ntrain_g.add_arg(\"lr\", float, 0.002, \"The Learning rate value for training.\")\n\nlog_g = utils.ArgumentGroup(parser, \"logging\", \"logging related\")\nlog_g.add_arg(\"skip_steps\", int, 10, \"The steps interval to print loss.\")\nlog_g.add_arg(\"verbose\", bool, False, \"Whether to output verbose log\")\n\ndata_g = utils.ArgumentGroup(parser, \"data\", \"Data paths, vocab paths and data processing options\")\ndata_g.add_arg(\"data_dir\", str, None, \"Directory path to training data.\")\ndata_g.add_arg(\"vocab_path\", str, None, \"Vocabulary path.\")\ndata_g.add_arg(\"batch_size\", int, 256, \"Total examples' number in batch for training.\")\ndata_g.add_arg(\"random_seed\", int, 0, \"Random seed.\")\n\nrun_type_g = utils.ArgumentGroup(parser, \"run_type\", \"running type options.\")\nrun_type_g.add_arg(\"use_cuda\", bool, False, \"If set, use GPU for training.\")\nrun_type_g.add_arg(\"task_name\", str, None, \"The name of task to perform sentiment classification.\")\nrun_type_g.add_arg(\"do_train\", bool, False, \"Whether to perform training.\")\nrun_type_g.add_arg(\"do_val\", bool, False, \"Whether to perform evaluation.\")\nrun_type_g.add_arg(\"do_infer\", bool, False, \"Whether to perform inference.\")\n\nparser.add_argument('--enable_ce', action='store_true', help='If set, run the task with continuous evaluation logs.')\n\nargs = parser.parse_args()\n\ndef create_model(args,\n pyreader_name,\n emotect_config,\n num_labels,\n is_infer=False):\n \"\"\"\n Create Model for sentiment classification\n \"\"\"\n if is_infer:\n pyreader = fluid.layers.py_reader(\n capacity=16,\n shapes=[[-1, 1]],\n dtypes=['int64'],\n lod_levels=[1],\n name=pyreader_name,\n use_double_buffer=False)\n else:\n pyreader = fluid.layers.py_reader(\n capacity=16,\n shapes=([-1, 1], [-1, 1]),\n dtypes=('int64', 'int64'),\n lod_levels=(1, 0),\n name=pyreader_name,\n use_double_buffer=False)\n\n if emotect_config['model_type'] == \"cnn_net\":\n network = nets.cnn_net\n elif emotect_config['model_type'] == \"bow_net\":\n network = nets.bow_net\n elif emotect_config['model_type'] == \"lstm_net\":\n network = nets.lstm_net\n elif emotect_config['model_type'] == \"bilstm_net\":\n network = nets.bilstm_net\n elif emotect_config['model_type'] == \"gru_net\":\n network = nets.gru_net\n elif emotect_config['model_type'] == \"textcnn_net\":\n network = nets.textcnn_net\n else:\n raise ValueError(\"Unknown network type!\")\n\n if is_infer:\n data = fluid.layers.read_file(pyreader)\n probs = network(data, None, emotect_config[\"vocab_size\"], class_dim=num_labels, is_infer=True)\n return pyreader, probs\n\n data, label = fluid.layers.read_file(pyreader)\n avg_loss, probs = network(data, label, emotect_config[\"vocab_size\"], class_dim=num_labels)\n num_seqs = fluid.layers.create_tensor(dtype='int64')\n accuracy = fluid.layers.accuracy(input=probs, label=label, total=num_seqs)\n return pyreader, avg_loss, accuracy, num_seqs\n\n\ndef evaluate(exe, test_program, test_pyreader, fetch_list, eval_phase):\n \"\"\"\n Evaluation Function\n \"\"\"\n test_pyreader.start()\n total_cost, total_acc, total_num_seqs = [], [], []\n time_begin = time.time()\n while True:\n try:\n np_loss, np_acc, np_num_seqs = exe.run(program=test_program,\n fetch_list=fetch_list,\n return_numpy=False)\n np_loss = np.array(np_loss)\n np_acc = np.array(np_acc)\n np_num_seqs = np.array(np_num_seqs)\n total_cost.extend(np_loss * np_num_seqs)\n total_acc.extend(np_acc * np_num_seqs)\n total_num_seqs.extend(np_num_seqs)\n except fluid.core.EOFException:\n test_pyreader.reset()\n break\n time_end = time.time()\n print(\"[%s evaluation] avg loss: %f, avg acc: %f, elapsed time: %f s\" %\n (eval_phase, np.sum(total_cost) / np.sum(total_num_seqs),\n np.sum(total_acc) / np.sum(total_num_seqs), time_end - time_begin))\n\n\ndef infer(exe, infer_program, infer_pyreader, fetch_list, infer_phase):\n infer_pyreader.start()\n time_begin = time.time()\n while True:\n try:\n batch_probs = exe.run(program=infer_program,\n fetch_list=fetch_list,\n return_numpy=True)\n for probs in batch_probs[0]:\n print(\"%d\\t%f\\t%f\\t%f\" % (np.argmax(probs), probs[0], probs[1], probs[2]))\n except fluid.core.EOFException as e:\n infer_pyreader.reset()\n break\n time_end = time.time()\n print(\"[%s] elapsed time: %f s\" % (infer_phase, time_end - time_begin))\n\n\ndef main(args):\n \"\"\"\n Main Function\n \"\"\"\n emotect_config = config.EmoTectConfig(args.config_path)\n\n if args.use_cuda:\n place = fluid.CUDAPlace(int(os.getenv('FLAGS_selected_gpus', '0')))\n else:\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n task_name = args.task_name.lower()\n processor = reader.EmoTectProcessor(data_dir=args.data_dir,\n vocab_path=args.vocab_path,\n random_seed=args.random_seed)\n num_labels = len(processor.get_labels())\n\n if not (args.do_train or args.do_val or args.do_infer):\n raise ValueError(\"For args `do_train`, `do_val` and `do_infer`, at \"\n \"least one of them must be True.\")\n\n startup_prog = fluid.Program()\n if args.random_seed is not None:\n startup_prog.random_seed = args.random_seed\n\n if args.do_train:\n train_data_generator = processor.data_generator(\n batch_size=args.batch_size,\n phase='train',\n epoch=args.epoch)\n\n num_train_examples = processor.get_num_examples(phase=\"train\")\n max_train_steps = args.epoch * num_train_examples // args.batch_size + 1\n\n print(\"Num train examples: %d\" % num_train_examples)\n print(\"Max train steps: %d\" % max_train_steps)\n\n train_program = fluid.Program()\n if args.random_seed is not None:\n train_program.random_seed = args.random_seed\n\n with fluid.program_guard(train_program, startup_prog):\n with fluid.unique_name.guard():\n train_pyreader, loss, accuracy, num_seqs = create_model(\n args,\n pyreader_name='train_reader',\n emotect_config=emotect_config,\n num_labels=num_labels,\n is_infer=False)\n\n sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr)\n sgd_optimizer.minimize(loss)\n\n if args.verbose:\n lower_mem, upper_mem, unit = fluid.contrib.memory_usage(\n program=train_program, batch_size=args.batch_size)\n print(\"Theoretical memory usage in training: %.3f - %.3f %s\" %\n (lower_mem, upper_mem, unit))\n\n if args.do_val:\n test_prog = fluid.Program()\n with fluid.program_guard(test_prog, startup_prog):\n with fluid.unique_name.guard():\n test_pyreader, loss, accuracy, num_seqs = create_model(\n args,\n pyreader_name='test_reader',\n emotect_config=emotect_config,\n num_labels=num_labels,\n is_infer=False)\n test_prog = test_prog.clone(for_test=True)\n\n if args.do_infer:\n test_prog = fluid.Program()\n with fluid.program_guard(test_prog, startup_prog):\n with fluid.unique_name.guard():\n infer_pyreader, probs = create_model(\n args,\n pyreader_name='infer_reader',\n emotect_config=emotect_config,\n num_labels=num_labels,\n is_infer=True)\n test_prog = test_prog.clone(for_test=True)\n\n exe.run(startup_prog)\n\n if args.do_train:\n if args.init_checkpoint:\n utils.init_checkpoint(\n exe,\n args.init_checkpoint,\n main_program=startup_prog)\n elif args.do_val or args.do_infer:\n if not args.init_checkpoint:\n raise ValueError(\"args 'init_checkpoint' should be set if\"\n \"only doing validation or infer!\")\n utils.init_checkpoint(\n exe,\n args.init_checkpoint,\n main_program=test_prog)\n\n if args.do_train:\n train_exe = exe\n train_pyreader.decorate_paddle_reader(train_data_generator)\n else:\n train_exe = None\n if args.do_val or args.do_infer:\n test_exe = exe\n\n if args.do_train:\n train_pyreader.start()\n steps = 0\n total_cost, total_acc, total_num_seqs = [], [], []\n time_begin = time.time()\n ce_info = []\n while True:\n try:\n steps += 1\n if steps % args.skip_steps == 0:\n fetch_list = [loss.name, accuracy.name, num_seqs.name]\n else:\n fetch_list = []\n\n outputs = train_exe.run(program=train_program,\n fetch_list=fetch_list,\n return_numpy=False)\n if steps % args.skip_steps == 0:\n np_loss, np_acc, np_num_seqs = outputs\n np_loss = np.array(np_loss)\n np_acc = np.array(np_acc)\n np_num_seqs = np.array(np_num_seqs)\n total_cost.extend(np_loss * np_num_seqs)\n total_acc.extend(np_acc * np_num_seqs)\n total_num_seqs.extend(np_num_seqs)\n\n if args.verbose:\n verbose = \"train pyreader queue size: %d, \" % train_pyreader.queue.size()\n print(verbose)\n\n time_end = time.time()\n used_time = time_end - time_begin\n print(\"step: %d, avg loss: %f, \"\n \"avg acc: %f, speed: %f steps/s\" %\n (steps, np.sum(total_cost) / np.sum(total_num_seqs),\n np.sum(total_acc) / np.sum(total_num_seqs),\n args.skip_steps / used_time))\n ce_info.append([np.sum(total_cost) / np.sum(total_num_seqs), np.sum(total_acc) / np.sum(total_num_seqs), used_time])\n total_cost, total_acc, total_num_seqs = [], [], []\n time_begin = time.time()\n\n if steps % args.save_steps == 0:\n save_path = os.path.join(args.output_dir, \"step_\" + str(steps))\n fluid.io.save_persistables(exe, save_path, train_program)\n\n if steps % args.validation_steps == 0:\n # evaluate on dev set\n if args.do_val:\n test_pyreader.decorate_paddle_reader(\n processor.data_generator(\n batch_size=args.batch_size,\n phase='dev',\n epoch=1))\n evaluate(test_exe, test_prog, test_pyreader,\n [loss.name, accuracy.name, num_seqs.name],\n \"dev\")\n\n except fluid.core.EOFException:\n save_path = os.path.join(args.output_dir, \"step_\" + str(steps))\n fluid.io.save_persistables(exe, save_path, train_program)\n train_pyreader.reset()\n break\n\n if args.do_train and args.enable_ce:\n card_num = get_cards()\n ce_loss = 0\n ce_acc = 0\n ce_time = 0\n try:\n ce_loss = ce_info[-2][0]\n ce_acc = ce_info[-2][1]\n ce_time = ce_info[-2][2]\n except:\n print(\"ce info error\")\n print(\"kpis\\teach_step_duration_%s_card%s\\t%s\" %\n (task_name, card_num, ce_time))\n print(\"kpis\\ttrain_loss_%s_card%s\\t%f\" %\n (task_name, card_num, ce_loss))\n print(\"kpis\\ttrain_acc_%s_card%s\\t%f\" %\n (task_name, card_num, ce_acc))\n\n # evaluate on test set\n if not args.do_train and args.do_val:\n test_pyreader.decorate_paddle_reader(\n processor.data_generator(\n batch_size=args.batch_size,\n phase='test',\n epoch=1))\n print(\"Final test result:\")\n evaluate(test_exe, test_prog, test_pyreader,\n [loss.name, accuracy.name, num_seqs.name],\n \"test\")\n\n # infer\n if args.do_infer:\n infer_pyreader.decorate_paddle_reader(\n processor.data_generator(\n batch_size=args.batch_size,\n phase='infer',\n epoch=1))\n infer(test_exe, test_prog, infer_pyreader,\n [probs.name], \"infer\")\n\n\ndef get_cards():\n num = 0\n cards = os.environ.get('CUDA_VISIBLE_DEVICES', '')\n if cards != '':\n num = len(cards.split(\",\"))\n return num\n\n\nif __name__ == \"__main__\":\n utils.print_arguments(args)\n main(args)\n"
] | [
[
"numpy.array",
"numpy.sum",
"numpy.argmax"
]
] |
ice-blaze/simple-captcha-deeplearning | [
"16960249bf316bef8fe6b9d86113c902309b36c5"
] | [
"deep_learning.py"
] | [
"from generate_captchas import CHAR_POSSIBILITIES\nfrom generate_captchas import generate_captcha\nfrom generate_captchas import get_random_captcha_names_and_lines\nfrom digital_processing_image_approach import clean_image_kernel4\nimport keras\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout\nimport os\nimport imageio\nimport random\nimport numpy as np\nnp.random.seed(123) # for reproducibility\n\n\ndef add_dict(a, b):\n \"\"\"\n :param a dict: Dictionary we will merge with b\n :param b dict: Dictionary that will be merged into a\n :return a dict: Merged dictionary of a and b\n \"\"\"\n for key in b:\n a[key] = a.get(key, 0) + b[key]\n\n return a\n\n\ndef similar(real, predicted):\n \"\"\"\n Compare if the captcha code predicted is close to the real one\n :param real string: Real captcha string\n :param predicted string: Predicted captcha string\n :return\n wrong_letter_count float: Percentage of wrong letter\n wrong_letter_dict dict: Dict of all wrong letters as key and a counter\n of failed as value\n \"\"\"\n wrong_letter_count = 0\n\n wrong_letter_dict = {}\n for real_letter, preddicted_letter in zip(real, predicted):\n if real_letter != preddicted_letter:\n wrong_letter_dict[real_letter] = \\\n wrong_letter_dict.get(real_letter, 0) + 1\n wrong_letter_count += 1\n\n wrong_letter_count /= len(real)\n wrong_letter_count = 1.0 - wrong_letter_count\n\n return wrong_letter_count, wrong_letter_dict\n\n\ndef create_model(input_shape, number_of_classes):\n \"\"\"\n :param input_shape numpy1d: Shape of the image\n :param number_of_classes int: Class number the model should handle\n :return model Model: Keras model\n \"\"\"\n model = Sequential()\n model.add(Conv2D(\n 20,\n kernel_size=(5, 5),\n padding=\"same\",\n strides=(1, 1),\n activation='relu',\n input_shape=(input_shape)\n ))\n\n model.add(Conv2D(32, (3, 3), padding=\"same\", activation='relu'))\n model.add(Conv2D(32, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(4, 4), strides=(4, 4)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, (3, 3), padding=\"same\", activation='relu'))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Conv2D(128, (3, 3), padding=\"same\", activation='relu'))\n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(64*8*8, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(number_of_classes, activation='softmax'))\n\n model.compile(\n loss=keras.losses.categorical_crossentropy,\n optimizer=\"Adamax\",\n metrics=['accuracy']\n )\n\n return model\n\n\ndef chunks(array, chunk_size):\n \"\"\"\n Convert a 1D list into a 2D list with length of the array of array equal\n to chunk_size\n :param array list: list of object\n :param chunk_size int: length of the chunks\n :return 2d list:\n \"\"\"\n for i in range(0, len(array), chunk_size):\n yield array[i:i + chunk_size]\n\n\ndef one_label(char):\n \"\"\"\n Convert one char into a binarized label\n :param char string: one character\n :return zeros list int: binarized label\n \"\"\"\n zeros = [0.0] * len(CHAR_POSSIBILITIES)\n char_index = CHAR_POSSIBILITIES.index(char)\n zeros[char_index] = 1.0\n return zeros\n\n\ndef char_to_num(captcha_name):\n \"\"\"\n Convert catpcha character to binarized labels\n :param captcha_name string: code of the captcha\n :return all_labels list int: name transform into binarized labels\n \"\"\"\n all_labels = []\n for char in captcha_name:\n all_labels += one_label(char)\n return all_labels\n\n\ndef num_to_char(captcha_binarized_label, char_count):\n \"\"\"\n Convert catpcha binarized labels to char\n :param captcha_binarized_label list int: captcha binarized\n :param char_count int: length of the original captcha name\n :return captcha_name string: captcha code\n \"\"\"\n captcha_name = \"\"\n\n for x in range(char_count):\n length = len(CHAR_POSSIBILITIES)\n char_range = captcha_binarized_label[x * length:(x + 1) * length]\n char_index = np.argmax(char_range)\n captcha_name += CHAR_POSSIBILITIES[char_index]\n\n return captcha_name\n\n\ndef load_data_no_generator(generated_captcha_path, captchas, char_count):\n \"\"\"\n :param generated_captcha_path strig: folder containing captchas\n :param catpchas list string: All captcha names\n :param char_count int: Length of the catpcha name\n \"\"\"\n x = np.array([\n clean_image_kernel4(imageio.imread(generated_captcha_path + captcha))\n for captcha in captchas\n ])\n\n # Binarizide the labels (multi class)\n label_in_list = [\n list(captcha[:char_count])\n for captcha in captchas\n ]\n label_in_numlist = [\n char_to_num(label)\n for label in label_in_list\n ]\n # label need to be list [0,1,0,0,1,...]\n y = np.array(label_in_numlist)\n\n # 5. Preprocess input data\n x = x.astype(float)\n x /= np.max(x) # normalize\n\n return x, y\n\n\ndef load_data(captchas):\n \"\"\"\n :param captchas list string: Captcha names\n :return list tuple numpy2d,labels: Tuple of image and labels binarized\n \"\"\"\n while True:\n for captcha_chunk in captchas:\n x = np.array([\n # TODO opti possible\n clean_image_kernel4(generate_captcha(\n captcha.split(\"-\")[0], captcha.split(\"-\")[1])\n )\n for captcha in captcha_chunk\n ])\n\n # Binarizide the labels (multi class)\n label_in_list = [\n list(captcha.split(\"-\")[0])\n for captcha in captcha_chunk\n ]\n label_in_numlist = [\n char_to_num(label)\n for label in label_in_list\n ]\n # label need to be list [0,1,0,0,1,...]\n y = np.array(label_in_numlist)\n\n # 5. Preprocess input data\n x = x.astype(float)\n x /= np.max(x) # normalize\n\n yield x, y\n\n\ndef train_and_test_model(number_of_captchas=10, model_path=None):\n \"\"\"\n :param number_of_captchas int: Number of captcha we want to for the train\n :param model_path string: Path of the model if it exist\n :return None: Print test result\n \"\"\"\n number_of_classes = len(CHAR_POSSIBILITIES)\n captchas = list(get_random_captcha_names_and_lines(number_of_captchas))\n random.shuffle(captchas)\n char_count = len(captchas[0].split(\"-\")[0])\n batch_size = 250\n\n pivot = int(len(captchas) / 10)\n x_five, y_five = next(load_data([captchas[:1]]))\n\n captchas_train = list(chunks(captchas[pivot:], batch_size))\n captchas_test = list(chunks(captchas[:pivot], batch_size))\n\n if os.path.exists(model_path):\n model = load_model(model_path)\n else:\n model = create_model(x_five[0].shape, number_of_classes * char_count)\n\n epochs = 1\n model.fit_generator(\n load_data(captchas_train),\n steps_per_epoch=len(captchas_train),\n epochs=epochs,\n verbose=1,\n )\n\n # Save model\n model.save(model_path)\n\n score = model.evaluate_generator(\n load_data(captchas_test),\n steps=batch_size,\n )\n\n print(score)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n\n # Test with real captchas\n path = \"./real-captchas/\"\n real_captchas = os.listdir(path)\n print_test(model, path, real_captchas, char_count, 100)\n\n\ndef print_test(model, path, captchas, char_count, max_size=100):\n \"\"\"\n :param model Model: Keras model to read captchas\n :param path string: Path where are stored real captchas\n :param catpchas list string: All captcha names\n :param char_count int: Length of the catpcha name\n :param max_size int: Number of captcha we want to test\n :return None: Print captcha test results\n \"\"\"\n print(\"Real captcha test\")\n data = load_data_no_generator(path, captchas, char_count)\n x = data[0]\n y = data[1]\n allx = model.predict(x)\n\n predicted = [\n num_to_char(predict, char_count) for predict in allx[:max_size]\n ]\n real = [num_to_char(real_label, char_count) for real_label in y[:max_size]]\n ziper = zip(real, predicted)\n correct = 0\n mean_similar = 0\n error_dict = {}\n for z in ziper:\n sim, sim_dict = similar(z[0], z[1])\n mean_similar += sim\n error_dict = add_dict(error_dict, sim_dict)\n if z[0] == z[1]:\n correct += 1\n print(str(z[0] == z[1]) + \" \" + str(z) + \" simili: \" + str(sim))\n print(\"overall: \" + str(correct/len(predicted)))\n print(\"overall similarity: \" + str(mean_similar / len(predicted)))\n print(error_dict)\n print(sorted(error_dict.keys()))\n\n\nif __name__ == \"__main__\":\n model_path = \"model.h5\"\n # train_and_test_model(1600000, model_path)\n train_and_test_model(800000, model_path)\n"
] | [
[
"numpy.array",
"numpy.max",
"numpy.random.seed",
"numpy.argmax"
]
] |
Samteymoori/pepper | [
"734d226de47a855952e3b58145c1fcfbe221d3b4"
] | [
"pepper_variant/modules/python/models/predict_distributed_cpu.py"
] | [
"import sys\nimport os\nimport torch\nimport torch.onnx\nimport torch.distributed as dist\nimport torch.nn as nn\nimport onnxruntime\nfrom datetime import datetime\nfrom torch.utils.data import DataLoader\nimport torch.multiprocessing as mp\n\nfrom pepper_variant.modules.python.models.dataloader_predict import SequenceDataset\nfrom pepper_variant.modules.python.models.ModelHander import ModelHandler\nfrom pepper_variant.modules.python.Options import ImageSizeOptions, TrainOptions\nfrom pepper_variant.modules.python.DataStorePredict import DataStore\n\n\ndef predict(input_filepath, file_chunks, output_filepath, model_path, batch_size, num_workers, threads, thread_id):\n # session options\n sess_options = onnxruntime.SessionOptions()\n sess_options.intra_op_num_threads = threads\n sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL\n sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL\n\n ort_session = onnxruntime.InferenceSession(model_path + \".onnx\", sess_options=sess_options)\n torch.set_num_threads(threads)\n\n # create output file\n output_filename = output_filepath + \"pepper_prediction_\" + str(thread_id) + \".hdf\"\n prediction_data_file = DataStore(output_filename, mode='w')\n\n # data loader\n input_data = SequenceDataset(input_filepath, file_chunks)\n\n data_loader = DataLoader(input_data,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers)\n\n batch_completed = 0\n total_batches = len(data_loader)\n with torch.no_grad():\n for contig, contig_start, contig_end, chunk_id, images, position, index in data_loader:\n images = images.type(torch.FloatTensor)\n hidden = torch.zeros(images.size(0), 2 * TrainOptions.GRU_LAYERS, TrainOptions.HIDDEN_SIZE)\n\n prediction_base_tensor = torch.zeros((images.size(0), images.size(1), ImageSizeOptions.TOTAL_LABELS))\n\n for i in range(0, ImageSizeOptions.SEQ_LENGTH, TrainOptions.WINDOW_JUMP):\n if i + TrainOptions.TRAIN_WINDOW > ImageSizeOptions.SEQ_LENGTH:\n break\n chunk_start = i\n chunk_end = i + TrainOptions.TRAIN_WINDOW\n # chunk all the data\n image_chunk = images[:, chunk_start:chunk_end]\n\n # run inference on onnx mode, which takes numpy inputs\n ort_inputs = {ort_session.get_inputs()[0].name: image_chunk.cpu().numpy(),\n ort_session.get_inputs()[1].name: hidden.cpu().numpy()}\n output_base, hidden = ort_session.run(None, ort_inputs)\n output_base = torch.from_numpy(output_base)\n hidden = torch.from_numpy(hidden)\n\n # now calculate how much padding is on the top and bottom of this chunk so we can do a simple\n # add operation\n top_zeros = chunk_start\n bottom_zeros = ImageSizeOptions.SEQ_LENGTH - chunk_end\n\n # do softmax and get prediction\n # we run a softmax a padding to make the output tensor compatible for adding\n inference_layers = nn.Sequential(\n nn.Softmax(dim=2),\n nn.ZeroPad2d((0, 0, top_zeros, bottom_zeros))\n )\n\n # run the softmax and padding layers\n base_prediction = (inference_layers(output_base) * 10).type(torch.IntTensor)\n\n # now simply add the tensor to the global counter\n prediction_base_tensor = torch.add(prediction_base_tensor, base_prediction)\n\n # base_values, base_labels = torch.max(prediction_base_tensor, 2)\n #\n # predicted_base_labels = base_labels.cpu().numpy()\n prediction_base_tensor = prediction_base_tensor.cpu().numpy().astype(int)\n\n for i in range(images.size(0)):\n prediction_data_file.write_prediction(contig[i],\n contig_start[i],\n contig_end[i],\n chunk_id[i],\n position[i],\n index[i],\n prediction_base_tensor[i])\n batch_completed += 1\n\n if thread_id == 0 and batch_completed % 5 == 0:\n sys.stderr.write(\"[\" + str(datetime.now().strftime('%m-%d-%Y %H:%M:%S')) + \"] \" +\n \"INFO: BATCHES PROCESSED \" + str(batch_completed) + \"/\" + str(total_batches) + \".\\n\")\n sys.stderr.flush()\n\n\ndef cleanup():\n dist.destroy_process_group()\n\n\ndef setup(rank, total_callers, args, all_input_files):\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '12355'\n\n # initialize the process group\n dist.init_process_group(\"gloo\", rank=rank, world_size=total_callers)\n\n filepath, output_filepath, model_path, batch_size, threads, num_workers = args\n\n # Explicitly setting seed to make sure that models created in two processes\n # start from same random weights and biases.\n predict(filepath, all_input_files[rank], output_filepath, model_path, batch_size, num_workers, threads, rank)\n cleanup()\n\n\ndef predict_distributed_cpu(filepath, file_chunks, output_filepath, model_path, batch_size, callers, threads, num_workers):\n \"\"\"\n Create a prediction table/dictionary of an images set using a trained model.\n :param filepath: Path to image files to predict on\n :param file_chunks: Path to chunked files\n :param batch_size: Batch size used for prediction\n :param model_path: Path to a trained model\n :param output_filepath: Path to output directory\n :param callers: Number of callers to start\n :param threads: Number of threads per caller.\n :param num_workers: Number of workers to be used by the dataloader\n :return: Prediction dictionary\n \"\"\"\n transducer_model, hidden_size, gru_layers, prev_ite = \\\n ModelHandler.load_simple_model_for_training(model_path,\n input_channels=ImageSizeOptions.IMAGE_CHANNELS,\n image_features=ImageSizeOptions.IMAGE_HEIGHT,\n seq_len=ImageSizeOptions.SEQ_LENGTH,\n num_classes=ImageSizeOptions.TOTAL_LABELS)\n\n transducer_model.eval()\n\n sys.stderr.write(\"[\" + str(datetime.now().strftime('%m-%d-%Y %H:%M:%S')) + \"] INFO: MODEL LOADING TO ONNX\\n\")\n x = torch.zeros(1, TrainOptions.TRAIN_WINDOW, ImageSizeOptions.IMAGE_HEIGHT)\n h = torch.zeros(1, 2 * TrainOptions.GRU_LAYERS, TrainOptions.HIDDEN_SIZE)\n\n if not os.path.isfile(model_path + \".onnx\"):\n sys.stderr.write(\"[\" + str(datetime.now().strftime('%m-%d-%Y %H:%M:%S')) + \"] INFO: SAVING MODEL TO ONNX\\n\")\n torch.onnx.export(transducer_model, (x, h),\n model_path + \".onnx\",\n training=False,\n opset_version=10,\n do_constant_folding=True,\n input_names=['input_image', 'input_hidden'],\n output_names=['output_pred', 'output_hidden'],\n dynamic_axes={'input_image': {0: 'batch_size'},\n 'input_hidden': {0: 'batch_size'},\n 'output_pred': {0: 'batch_size'},\n 'output_hidden': {0: 'batch_size'}})\n\n transducer_model.eval()\n args = (filepath, output_filepath, model_path, batch_size, threads, num_workers)\n\n mp.spawn(setup,\n args=(callers, args, file_chunks),\n nprocs=callers,\n join=True)\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.onnx.export",
"torch.multiprocessing.spawn",
"torch.add",
"torch.distributed.init_process_group",
"torch.no_grad",
"torch.nn.Softmax",
"torch.set_num_threads",
"torch.from_numpy",
"torch.zeros",
"torch.nn.ZeroPad2d",
"torch.distributed.destroy_process_group"
]
] |
thanever/SOC | [
"9f30d1a9c7610a68de9c178a1170bdf1c8ca11d4"
] | [
"Data/scigrid-de/pypower/scigrid_2011_01_07_01.py"
] | [
"from numpy import array\ndef scigrid_2011_01_07_01():\n\tppc = {\"version\": '2'}\n\tppc[\"baseMVA\"] = 100.0\n\tppc[\"bus\"] = array([\n\t\t[586,\t\t3,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[589,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[590,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[593,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[595,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[598,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[599,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[602,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[603,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[607,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[608,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[609,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[612,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[614,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[616,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[617,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[618,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[619,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[624,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[629,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[632,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[637,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[638,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[640,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[641,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[642,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[643,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[647,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[652,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[655,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[663,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[666,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[670,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[672,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[676,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[681,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[683,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[687,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[694,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[695,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[697,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[698,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[702,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[705,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[707,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[714,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[716,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[717,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[722,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[724,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[730,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[732,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[735,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[741,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[742,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[743,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[747,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[749,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[750,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[753,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[761,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[762,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[765,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[767,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[772,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[774,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[777,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[778,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[781,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[784,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[785,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[788,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[789,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[791,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[792,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[795,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[800,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[801,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[802,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[805,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[806,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[808,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[809,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[811,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[814,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[816,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[817,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[821,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[826,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[834,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[835,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[836,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[837,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[839,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[841,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[843,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[844,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[850,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[851,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[853,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[856,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[857,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[858,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[860,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[865,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[867,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[869,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[870,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[872,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[874,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[875,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[882,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[883,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[885,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[886,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[889,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[890,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[893,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[894,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[895,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[896,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[898,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[902,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[903,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[905,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[906,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[907,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[909,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[917,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[918,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[920,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[921,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[922,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[923,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[925,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[931,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[936,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[937,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[939,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[940,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[944,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[950,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[952,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[958,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[959,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[960,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[963,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[965,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[967,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[969,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999644,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[971,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[978,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[982,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[983,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[984,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[985,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[986,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[987,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[988,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[993,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[994,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[995,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[997,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[999,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1002,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1007,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1010,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1011,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1012,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1014,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1027,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1028,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1029,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1030,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1031,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1032,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1033,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1034,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1035,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1036,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1037,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1038,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1039,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1040,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1041,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1042,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1043,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1044,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1045,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1046,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1047,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1048,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1049,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1050,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1051,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1052,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1053,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1054,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1055,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1056,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1057,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1058,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1059,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1060,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1061,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1062,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1063,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1064,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1065,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1066,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1067,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1068,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1069,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1070,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1071,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1072,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1073,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1074,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1075,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1076,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1077,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1078,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1079,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1080,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1081,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1082,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1083,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1084,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1085,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1086,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1087,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1088,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1089,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1090,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1091,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1092,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1093,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1096,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1097,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1098,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1099,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1100,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1101,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1102,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1103,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1105,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1106,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1107,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1108,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1109,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1110,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1111,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1113,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1114,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1115,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1116,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1117,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1118,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1119,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1120,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1121,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1122,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1123,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1124,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1125,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1126,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1127,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1128,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1129,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1130,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1131,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1133,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1134,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1135,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1136,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1137,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1138,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1139,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1140,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1142,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1143,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1144,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1145,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1146,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1147,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1148,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1149,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1150,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1151,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1152,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1155,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1157,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1160,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1161,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1162,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1163,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1164,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1165,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1166,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1168,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1169,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1171,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1172,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1173,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1175,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1176,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1177,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1178,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1179,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1181,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1182,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1183,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1184,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1186,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1187,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1188,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1189,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1190,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1191,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1192,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1193,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1194,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1195,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1196,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1197,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1198,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1199,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1200,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1201,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1202,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1203,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1204,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1205,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1206,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1207,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1208,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1209,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1210,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1211,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1212,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1213,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1214,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1215,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1216,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1217,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1218,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1219,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1220,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1221,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1222,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1223,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1224,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1225,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1226,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1227,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1228,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1229,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1230,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1231,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1232,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1233,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1235,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1236,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1237,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1238,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1239,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1240,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1241,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1242,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1243,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1244,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1245,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1246,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1247,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1248,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1249,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1250,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1251,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1252,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1253,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1254,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1255,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1256,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1257,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1258,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1259,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1260,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1261,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1262,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1263,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1264,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1265,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1266,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1267,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1268,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1269,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1270,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1271,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1272,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1273,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1274,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1275,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1276,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1277,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1278,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1279,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1280,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1281,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1282,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1283,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1284,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1285,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1286,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1287,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1288,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1289,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1290,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1291,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1292,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1293,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1294,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1295,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1296,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1297,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1298,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1299,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1300,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1301,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1302,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1303,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1304,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1305,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1306,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1307,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1308,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1309,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1310,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1311,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1312,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1313,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1314,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1315,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1316,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1317,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1318,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1319,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1320,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1321,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1322,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1323,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1324,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1325,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1326,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1327,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1328,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1329,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1330,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1332,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1333,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1334,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1335,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1336,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1337,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1338,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1339,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1340,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1341,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1342,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1343,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1344,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1345,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1346,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1347,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1348,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1349,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1350,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1351,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1352,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1355,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1356,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1357,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1358,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1359,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1363,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1364,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1365,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1366,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1367,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1368,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1369,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1370,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1371,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1372,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1373,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1374,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1375,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1376,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1377,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1378,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1379,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1381,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1382,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1383,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1387,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1390,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1391,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1393,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1394,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1395,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1396,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1397,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1398,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1399,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1400,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1401,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1402,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1403,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1404,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1405,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1406,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1407,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1408,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1409,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1410,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1411,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1412,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1413,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1414,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1415,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1416,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1417,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1418,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1419,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1420,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1421,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999644,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1422,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1423,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1424,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1425,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1426,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1427,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1428,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1429,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1430,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1431,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1432,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1433,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1434,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1435,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1436,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1437,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1438,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1439,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1440,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1441,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1442,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1443,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1444,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1445,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1446,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1447,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1448,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1449,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1450,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1451,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1452,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1453,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1454,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1455,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1456,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1459,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1460,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1461,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1463,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1464,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1466,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1467,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1468,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1469,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1470,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1471,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1472,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1473,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1474,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1475,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1476,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1477,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1479,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1480,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1481,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1482,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1483,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1484,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1485,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1486,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1487,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1488,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1489,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1490,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1491,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1492,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1493,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1494,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1495,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1496,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1497,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1498,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1499,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1500,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1501,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1502,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1503,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1504,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1505,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1506,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1507,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1508,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1510,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1511,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1512,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1513,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1514,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1516,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1517,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1518,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1519,\t\t2,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[1,\t\t1,\t\t231.535683,\t\t46.307137,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[2,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000015,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[3,\t\t1,\t\t40.581977,\t\t8.116395,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[4,\t\t1,\t\t66.738408,\t\t13.347682,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[5,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.998829,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[6,\t\t1,\t\t195.97163,\t\t39.194326,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[7,\t\t1,\t\t147.688993,\t\t29.537799,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[8,\t\t1,\t\t123.575597,\t\t24.715119,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[9,\t\t1,\t\t83.572245,\t\t16.714449,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[10,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001864,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[11,\t\t1,\t\t73.223533,\t\t14.644707,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[12,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000997,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[13,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000519,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[14,\t\t1,\t\t175.12383,\t\t35.024766,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[15,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000477,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[16,\t\t1,\t\t298.667302,\t\t59.73346,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[17,\t\t1,\t\t70.343995,\t\t14.068799,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[18,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.002785,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[19,\t\t1,\t\t173.793495,\t\t34.758699,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[20,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.998624,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[21,\t\t1,\t\t747.338688,\t\t149.467738,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[22,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000541,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[23,\t\t1,\t\t97.851973,\t\t19.570395,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[24,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999995,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[25,\t\t1,\t\t46.803281,\t\t9.360656,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[26,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000745,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[27,\t\t1,\t\t57.452323,\t\t11.490465,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[28,\t\t1,\t\t169.754403,\t\t33.950881,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[29,\t\t1,\t\t62.354326,\t\t12.470865,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[30,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999264,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[31,\t\t1,\t\t122.711704,\t\t24.542341,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[32,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.995193,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[33,\t\t1,\t\t153.857417,\t\t30.771483,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[34,\t\t1,\t\t30.52459,\t\t6.104918,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[35,\t\t1,\t\t2.020889,\t\t0.404178,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[36,\t\t1,\t\t6.690873,\t\t1.338175,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[37,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.002691,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[38,\t\t1,\t\t161.19808,\t\t32.239616,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[39,\t\t1,\t\t52.784066,\t\t10.556813,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[40,\t\t1,\t\t55.134608,\t\t11.026922,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[41,\t\t1,\t\t59.257208,\t\t11.851442,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[42,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001586,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[43,\t\t1,\t\t90.873598,\t\t18.17472,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[44,\t\t1,\t\t116.259296,\t\t23.251859,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[45,\t\t1,\t\t61.713034,\t\t12.342607,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[46,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000336,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[47,\t\t1,\t\t268.333226,\t\t53.666645,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[48,\t\t1,\t\t184.443359,\t\t36.888672,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[49,\t\t1,\t\t46.654864,\t\t9.330973,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[50,\t\t1,\t\t67.93578,\t\t13.587156,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[51,\t\t1,\t\t88.040336,\t\t17.608067,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[52,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0001,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[53,\t\t1,\t\t133.58711,\t\t26.717422,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[54,\t\t1,\t\t67.87003,\t\t13.574006,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[55,\t\t1,\t\t66.560665,\t\t13.312133,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[56,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999841,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[57,\t\t1,\t\t79.452642,\t\t15.890528,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[58,\t\t1,\t\t181.99836,\t\t36.399672,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[59,\t\t1,\t\t51.979844,\t\t10.395969,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[60,\t\t1,\t\t27.405216,\t\t5.481043,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[61,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999477,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[62,\t\t1,\t\t208.931319,\t\t41.786264,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[63,\t\t1,\t\t123.330369,\t\t24.666074,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[64,\t\t1,\t\t1308.785147,\t\t261.757029,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[65,\t\t1,\t\t4.360894,\t\t0.872179,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[66,\t\t1,\t\t138.366196,\t\t27.673239,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[67,\t\t1,\t\t296.818798,\t\t59.36376,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[68,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.998332,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[69,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.00075,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[70,\t\t1,\t\t561.513466,\t\t112.302693,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[71,\t\t1,\t\t130.488497,\t\t26.097699,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[72,\t\t1,\t\t213.722252,\t\t42.74445,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[73,\t\t1,\t\t68.420546,\t\t13.684109,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[74,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.003789,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[75,\t\t1,\t\t85.276082,\t\t17.055216,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[76,\t\t1,\t\t82.310129,\t\t16.462026,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[77,\t\t1,\t\t79.722985,\t\t15.944597,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[78,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.995035,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[79,\t\t1,\t\t82.320126,\t\t16.464025,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[80,\t\t1,\t\t87.436676,\t\t17.487335,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[81,\t\t1,\t\t98.704099,\t\t19.74082,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[82,\t\t1,\t\t3.28493,\t\t0.656986,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[83,\t\t1,\t\t219.786066,\t\t43.957213,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[84,\t\t1,\t\t21.636582,\t\t4.327316,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[85,\t\t1,\t\t75.031466,\t\t15.006293,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[86,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999969,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[87,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999273,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[88,\t\t1,\t\t60.560337,\t\t12.112067,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[89,\t\t1,\t\t75.134368,\t\t15.026874,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[90,\t\t1,\t\t86.776878,\t\t17.355376,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[91,\t\t1,\t\t30.141967,\t\t6.028393,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[92,\t\t1,\t\t32.89546,\t\t6.579092,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[93,\t\t1,\t\t32.263856,\t\t6.452771,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[94,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999174,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[95,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000263,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[96,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999998,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[97,\t\t1,\t\t4.53767,\t\t0.907534,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[98,\t\t1,\t\t83.429506,\t\t16.685901,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[99,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001151,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[100,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001527,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[101,\t\t1,\t\t59.076598,\t\t11.81532,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[102,\t\t1,\t\t114.34551,\t\t22.869102,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[103,\t\t1,\t\t133.692027,\t\t26.738405,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[104,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999922,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[105,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999928,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[106,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.99986,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[107,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999995,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[108,\t\t1,\t\t94.303426,\t\t18.860685,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[109,\t\t1,\t\t38.181848,\t\t7.63637,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[110,\t\t1,\t\t49.561569,\t\t9.912314,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[111,\t\t1,\t\t87.340876,\t\t17.468175,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[112,\t\t1,\t\t44.205493,\t\t8.841099,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[113,\t\t1,\t\t69.683871,\t\t13.936774,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[114,\t\t1,\t\t102.627302,\t\t20.52546,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[115,\t\t1,\t\t66.157788,\t\t13.231558,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[116,\t\t1,\t\t110.70596,\t\t22.141192,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[117,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000816,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[118,\t\t1,\t\t171.412339,\t\t34.282468,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[119,\t\t1,\t\t33.22675,\t\t6.64535,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[120,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001279,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[121,\t\t1,\t\t45.121942,\t\t9.024388,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[122,\t\t1,\t\t39.503802,\t\t7.90076,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[123,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000268,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[124,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000006,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[125,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999914,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[126,\t\t1,\t\t207.119414,\t\t41.423883,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[127,\t\t1,\t\t160.125097,\t\t32.025019,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[128,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001323,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[129,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999999,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[130,\t\t1,\t\t220.78338,\t\t44.156676,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[131,\t\t1,\t\t48.748779,\t\t9.749756,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[132,\t\t1,\t\t126.934451,\t\t25.38689,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[133,\t\t1,\t\t42.518068,\t\t8.503614,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[134,\t\t1,\t\t42.343957,\t\t8.468791,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[135,\t\t1,\t\t42.400098,\t\t8.48002,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[136,\t\t1,\t\t41.074226,\t\t8.214845,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[137,\t\t1,\t\t32.8556,\t\t6.57112,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[138,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999263,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[139,\t\t1,\t\t64.360791,\t\t12.872158,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[140,\t\t1,\t\t44.508243,\t\t8.901649,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[141,\t\t1,\t\t52.734412,\t\t10.546882,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[142,\t\t1,\t\t58.026678,\t\t11.605336,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[143,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.99998,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[144,\t\t1,\t\t52.856304,\t\t10.571261,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[145,\t\t1,\t\t153.760388,\t\t30.752078,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[146,\t\t1,\t\t198.226065,\t\t39.645213,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[147,\t\t1,\t\t121.500905,\t\t24.300181,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[148,\t\t1,\t\t171.460082,\t\t34.292016,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[149,\t\t1,\t\t110.539074,\t\t22.107815,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[150,\t\t1,\t\t144.320239,\t\t28.864048,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[151,\t\t1,\t\t34.008844,\t\t6.801769,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[152,\t\t1,\t\t70.598833,\t\t14.119767,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[153,\t\t1,\t\t125.9598,\t\t25.19196,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[154,\t\t1,\t\t129.385711,\t\t25.877142,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[155,\t\t1,\t\t134.766653,\t\t26.953331,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[156,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999992,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[157,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000087,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[158,\t\t1,\t\t35.506525,\t\t7.101305,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[159,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001066,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[160,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999999,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[161,\t\t1,\t\t110.227427,\t\t22.045485,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[162,\t\t1,\t\t164.757336,\t\t32.951467,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[163,\t\t1,\t\t32.949911,\t\t6.589982,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[164,\t\t1,\t\t33.082423,\t\t6.616485,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[165,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999998,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[166,\t\t1,\t\t38.678704,\t\t7.735741,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[167,\t\t1,\t\t54.411201,\t\t10.88224,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[168,\t\t1,\t\t37.13495,\t\t7.42699,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[169,\t\t1,\t\t127.123641,\t\t25.424728,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[170,\t\t1,\t\t95.522697,\t\t19.104539,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[171,\t\t1,\t\t81.528586,\t\t16.305717,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[172,\t\t1,\t\t40.012009,\t\t8.002402,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[173,\t\t1,\t\t38.223311,\t\t7.644662,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[174,\t\t1,\t\t57.359494,\t\t11.471899,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[175,\t\t1,\t\t38.198259,\t\t7.639652,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[176,\t\t1,\t\t133.106751,\t\t26.62135,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[177,\t\t1,\t\t21.704995,\t\t4.340999,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[178,\t\t1,\t\t114.954978,\t\t22.990996,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[179,\t\t1,\t\t42.356942,\t\t8.471388,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[180,\t\t1,\t\t37.232836,\t\t7.446567,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[181,\t\t1,\t\t28.102272,\t\t5.620454,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[182,\t\t1,\t\t1.273046,\t\t0.254609,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[183,\t\t1,\t\t381.062729,\t\t76.212546,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[184,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999954,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[185,\t\t1,\t\t81.488061,\t\t16.297612,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[186,\t\t1,\t\t43.880897,\t\t8.776179,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[187,\t\t1,\t\t25.665856,\t\t5.133171,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[188,\t\t1,\t\t38.198259,\t\t7.639652,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[189,\t\t1,\t\t140.163669,\t\t28.032734,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[190,\t\t1,\t\t185.392677,\t\t37.078535,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[191,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[192,\t\t1,\t\t44.648172,\t\t8.929634,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[193,\t\t1,\t\t38.136642,\t\t7.627328,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[194,\t\t1,\t\t26.326335,\t\t5.265267,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[195,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999999,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[196,\t\t1,\t\t36.934313,\t\t7.386863,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[197,\t\t1,\t\t58.517517,\t\t11.703503,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[198,\t\t1,\t\t34.627533,\t\t6.925507,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[199,\t\t1,\t\t44.581796,\t\t8.916359,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[200,\t\t1,\t\t38.199146,\t\t7.639829,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[201,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.997871,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[202,\t\t1,\t\t39.143281,\t\t7.828656,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[203,\t\t1,\t\t5.157478,\t\t1.031496,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[204,\t\t1,\t\t151.164654,\t\t30.232931,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[205,\t\t1,\t\t75.589132,\t\t15.117826,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[206,\t\t1,\t\t36.277501,\t\t7.2555,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[207,\t\t1,\t\t107.873663,\t\t21.574733,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[208,\t\t1,\t\t31.76454,\t\t6.352908,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[209,\t\t1,\t\t44.14161,\t\t8.828322,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[210,\t\t1,\t\t50.710449,\t\t10.14209,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[211,\t\t1,\t\t178.207882,\t\t35.641576,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[212,\t\t1,\t\t44.665292,\t\t8.933058,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[213,\t\t1,\t\t209.380904,\t\t41.876181,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[214,\t\t1,\t\t140.886808,\t\t28.177362,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[215,\t\t1,\t\t297.912187,\t\t59.582437,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[216,\t\t1,\t\t100.452037,\t\t20.090407,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[217,\t\t1,\t\t32.1884,\t\t6.43768,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[218,\t\t1,\t\t98.063081,\t\t19.612616,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[219,\t\t1,\t\t157.599323,\t\t31.519865,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[220,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999672,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[221,\t\t1,\t\t89.903024,\t\t17.980605,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[222,\t\t1,\t\t0.0,\t\t0.0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[223,\t\t1,\t\t89.099462,\t\t17.819892,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[224,\t\t1,\t\t103.6104,\t\t20.72208,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[225,\t\t1,\t\t186.038417,\t\t37.207683,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[226,\t\t1,\t\t64.988967,\t\t12.997793,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[227,\t\t1,\t\t80.963073,\t\t16.192615,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[228,\t\t1,\t\t79.38182,\t\t15.876364,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[229,\t\t1,\t\t175.658429,\t\t35.131686,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[230,\t\t1,\t\t42.132923,\t\t8.426585,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[231,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000936,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[232,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999991,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[233,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999606,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[234,\t\t1,\t\t150.082157,\t\t30.016431,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[235,\t\t1,\t\t48.804717,\t\t9.760943,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[236,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999981,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[237,\t\t1,\t\t0.403914,\t\t0.080783,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[238,\t\t1,\t\t55.223425,\t\t11.044685,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[239,\t\t1,\t\t76.298087,\t\t15.259617,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[240,\t\t1,\t\t481.273697,\t\t96.254739,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[241,\t\t1,\t\t356.125818,\t\t71.225164,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[242,\t\t1,\t\t129.671855,\t\t25.934371,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[243,\t\t1,\t\t104.619329,\t\t20.923866,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[244,\t\t1,\t\t124.646159,\t\t24.929232,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[245,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001786,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[246,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999913,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[247,\t\t1,\t\t24.735326,\t\t4.947065,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[248,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999998,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[249,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999997,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[250,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999995,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[251,\t\t1,\t\t61.387468,\t\t12.277494,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[252,\t\t1,\t\t157.430773,\t\t31.486155,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[253,\t\t1,\t\t69.118117,\t\t13.823623,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[254,\t\t1,\t\t22.068268,\t\t4.413654,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[255,\t\t1,\t\t108.529902,\t\t21.70598,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[256,\t\t1,\t\t124.464912,\t\t24.892982,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[257,\t\t1,\t\t60.06952,\t\t12.013904,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[258,\t\t1,\t\t195.759311,\t\t39.151862,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[259,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999581,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[260,\t\t1,\t\t121.832905,\t\t24.366581,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[261,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.002014,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[262,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.99968,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[263,\t\t1,\t\t174.769144,\t\t34.953829,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[264,\t\t1,\t\t226.248083,\t\t45.249617,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[265,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000009,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[266,\t\t1,\t\t109.036505,\t\t21.807301,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[267,\t\t1,\t\t137.907521,\t\t27.581504,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[268,\t\t1,\t\t47.956289,\t\t9.591258,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[269,\t\t1,\t\t38.510698,\t\t7.70214,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[270,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[271,\t\t1,\t\t0.0,\t\t0.0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[272,\t\t1,\t\t0.78576,\t\t0.157152,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[273,\t\t1,\t\t107.453062,\t\t21.490612,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[274,\t\t1,\t\t208.874596,\t\t41.774919,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[275,\t\t1,\t\t39.102465,\t\t7.820493,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[276,\t\t1,\t\t152.431348,\t\t30.48627,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[277,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.998577,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[278,\t\t1,\t\t118.997587,\t\t23.799517,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[279,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.998164,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[280,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999529,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[281,\t\t1,\t\t157.181561,\t\t31.436312,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[282,\t\t1,\t\t222.279069,\t\t44.455814,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[283,\t\t1,\t\t89.099103,\t\t17.819821,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[284,\t\t1,\t\t135.167465,\t\t27.033493,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[285,\t\t1,\t\t60.279948,\t\t12.05599,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[286,\t\t1,\t\t126.337034,\t\t25.267407,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[287,\t\t1,\t\t77.649516,\t\t15.529903,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[288,\t\t1,\t\t49.943628,\t\t9.988726,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[289,\t\t1,\t\t78.546842,\t\t15.709368,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[290,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.004907,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[291,\t\t1,\t\t51.690749,\t\t10.33815,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[292,\t\t1,\t\t101.905943,\t\t20.381189,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[293,\t\t1,\t\t89.813561,\t\t17.962712,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[294,\t\t1,\t\t23.933957,\t\t4.786791,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[295,\t\t1,\t\t50.078174,\t\t10.015635,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[296,\t\t1,\t\t142.172054,\t\t28.434411,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[297,\t\t1,\t\t149.424424,\t\t29.884885,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[298,\t\t1,\t\t78.899066,\t\t15.779813,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[299,\t\t1,\t\t76.413221,\t\t15.282644,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[300,\t\t1,\t\t208.170304,\t\t41.634061,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[301,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999525,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[302,\t\t1,\t\t175.358016,\t\t35.071603,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[303,\t\t1,\t\t90.068963,\t\t18.013793,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[304,\t\t1,\t\t77.342281,\t\t15.468456,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[305,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.99979,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[306,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999891,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[307,\t\t1,\t\t91.735133,\t\t18.347027,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[308,\t\t1,\t\t113.097197,\t\t22.619439,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[309,\t\t1,\t\t185.042919,\t\t37.008584,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[310,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000041,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[311,\t\t1,\t\t157.177116,\t\t31.435423,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[312,\t\t1,\t\t70.686923,\t\t14.137385,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[313,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001149,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[314,\t\t1,\t\t218.943091,\t\t43.788618,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[315,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001529,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[316,\t\t1,\t\t85.78475,\t\t17.15695,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[317,\t\t1,\t\t115.506023,\t\t23.101205,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[318,\t\t1,\t\t189.819037,\t\t37.963807,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[319,\t\t1,\t\t6.800077,\t\t1.360015,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[320,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999995,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[321,\t\t1,\t\t160.858437,\t\t32.171687,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[322,\t\t1,\t\t20.478315,\t\t4.095663,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[323,\t\t1,\t\t2.130594,\t\t0.426119,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[324,\t\t1,\t\t376.637527,\t\t75.327505,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[325,\t\t1,\t\t122.691298,\t\t24.53826,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[326,\t\t1,\t\t9.94743,\t\t1.989486,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[327,\t\t1,\t\t85.604424,\t\t17.120885,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[328,\t\t1,\t\t145.883095,\t\t29.176619,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[329,\t\t1,\t\t219.42118,\t\t43.884236,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[330,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001641,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[331,\t\t1,\t\t17.421295,\t\t3.484259,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[332,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.994883,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[333,\t\t1,\t\t183.050164,\t\t36.610033,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[334,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.99946,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[335,\t\t1,\t\t186.816503,\t\t37.363301,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[336,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.998019,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[337,\t\t1,\t\t74.310127,\t\t14.862025,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[338,\t\t1,\t\t201.688244,\t\t40.337649,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[339,\t\t1,\t\t124.74139,\t\t24.948278,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[340,\t\t1,\t\t105.466324,\t\t21.093265,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[341,\t\t1,\t\t95.343664,\t\t19.068733,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[342,\t\t1,\t\t165.389884,\t\t33.077977,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[343,\t\t1,\t\t90.735302,\t\t18.14706,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[344,\t\t1,\t\t227.495134,\t\t45.499027,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[345,\t\t1,\t\t248.756971,\t\t49.751394,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[346,\t\t1,\t\t246.952253,\t\t49.390451,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[347,\t\t1,\t\t86.363489,\t\t17.272698,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[348,\t\t1,\t\t225.759849,\t\t45.15197,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[349,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001361,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[350,\t\t1,\t\t118.436912,\t\t23.687382,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[351,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.001141,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[352,\t\t1,\t\t783.968775,\t\t156.793755,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[353,\t\t1,\t\t2.356872,\t\t0.471374,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[354,\t\t1,\t\t16.012385,\t\t3.202477,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[355,\t\t1,\t\t0.0,\t\t0.0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[356,\t\t1,\t\t0.0,\t\t0.0,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[357,\t\t1,\t\t0.040138,\t\t0.008028,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[358,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.00082,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[359,\t\t1,\t\t2.343515,\t\t0.468703,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[360,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000685,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[361,\t\t1,\t\t59.980163,\t\t11.996033,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[362,\t\t1,\t\t170.974507,\t\t34.194901,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[363,\t\t1,\t\t251.729885,\t\t50.345977,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[364,\t\t1,\t\t59.3922,\t\t11.87844,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[365,\t\t1,\t\t53.307654,\t\t10.661531,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[366,\t\t1,\t\t105.6556,\t\t21.13112,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[367,\t\t1,\t\t51.069528,\t\t10.213906,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[368,\t\t1,\t\t25.147475,\t\t5.029495,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[369,\t\t1,\t\t20.664524,\t\t4.132905,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[370,\t\t1,\t\t60.836949,\t\t12.16739,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[371,\t\t1,\t\t306.104743,\t\t61.220949,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[372,\t\t1,\t\t177.514538,\t\t35.502908,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[373,\t\t1,\t\t119.786939,\t\t23.957388,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[374,\t\t1,\t\t61.424714,\t\t12.284943,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[375,\t\t1,\t\t201.49439,\t\t40.298878,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[376,\t\t1,\t\t221.001397,\t\t44.200279,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[377,\t\t1,\t\t158.145186,\t\t31.629037,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[378,\t\t1,\t\t157.840789,\t\t31.568158,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[379,\t\t1,\t\t54.400959,\t\t10.880192,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[380,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999989,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[381,\t\t1,\t\t181.920125,\t\t36.384025,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[382,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000287,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[383,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999356,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[384,\t\t1,\t\t64.195093,\t\t12.839019,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[385,\t\t1,\t\t81.026806,\t\t16.205361,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[386,\t\t1,\t\t65.10261,\t\t13.020522,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[387,\t\t1,\t\t132.584124,\t\t26.516825,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[388,\t\t1,\t\t711.974806,\t\t142.394961,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[389,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999953,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[390,\t\t1,\t\t58.786094,\t\t11.757219,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[391,\t\t1,\t\t66.962375,\t\t13.392475,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[392,\t\t1,\t\t128.500124,\t\t25.700025,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[393,\t\t1,\t\t160.472614,\t\t32.094523,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[394,\t\t1,\t\t57.717386,\t\t11.543477,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[395,\t\t1,\t\t79.99273,\t\t15.998546,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[396,\t\t1,\t\t56.658032,\t\t11.331606,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[397,\t\t1,\t\t454.335008,\t\t90.867002,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[398,\t\t1,\t\t196.782306,\t\t39.356461,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[399,\t\t1,\t\t83.843594,\t\t16.768719,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[400,\t\t1,\t\t44.670462,\t\t8.934092,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[401,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000557,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[402,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t1.000356,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[403,\t\t1,\t\t22.179923,\t\t4.435985,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[404,\t\t1,\t\t78.141243,\t\t15.628249,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[405,\t\t1,\t\t589.107715,\t\t117.821543,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[406,\t\t1,\t\t44.635096,\t\t8.927019,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[407,\t\t1,\t\t88.356151,\t\t17.67123,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[408,\t\t1,\t\t255.47644,\t\t51.095288,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[409,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999926,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[410,\t\t1,\t\t33.07651,\t\t6.615302,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[411,\t\t1,\t\t31.275194,\t\t6.255039,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[412,\t\t1,\t\t2.19674,\t\t0.439348,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[413,\t\t1,\t\t109.665229,\t\t21.933046,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[414,\t\t1,\t\t9.311764,\t\t1.862353,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[415,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999523,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[416,\t\t1,\t\t132.609322,\t\t26.521864,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[417,\t\t1,\t\t5.18875,\t\t1.03775,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[418,\t\t1,\t\t108.130419,\t\t21.626084,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[419,\t\t1,\t\t57.79494,\t\t11.558988,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[420,\t\t1,\t\t58.18776,\t\t11.637552,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[421,\t\t1,\t\t83.817984,\t\t16.763597,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[422,\t\t1,\t\t61.407864,\t\t12.281573,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[423,\t\t1,\t\t128.970085,\t\t25.794017,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[424,\t\t1,\t\t9.298411,\t\t1.859682,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[425,\t\t1,\t\t76.363415,\t\t15.272683,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[426,\t\t1,\t\t6.326944,\t\t1.265389,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[427,\t\t1,\t\t53.17174,\t\t10.634348,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[428,\t\t1,\t\t23.840558,\t\t4.768112,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[429,\t\t1,\t\t269.035043,\t\t53.807009,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[430,\t\t1,\t\t143.305714,\t\t28.661143,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[431,\t\t1,\t\t95.830732,\t\t19.166146,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[432,\t\t1,\t\t112.020247,\t\t22.404049,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[433,\t\t1,\t\t57.261764,\t\t11.452353,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[434,\t\t1,\t\t29.801811,\t\t5.960362,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[435,\t\t1,\t\t119.188482,\t\t23.837696,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[436,\t\t1,\t\t63.632731,\t\t12.726546,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[437,\t\t1,\t\t14.491687,\t\t2.898337,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[438,\t\t1,\t\t38.891719,\t\t7.778344,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[439,\t\t1,\t\t72.411353,\t\t14.482271,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[440,\t\t1,\t\t61.194993,\t\t12.238999,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[441,\t\t1,\t\t46.914161,\t\t9.382832,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[442,\t\t1,\t\t62.083316,\t\t12.416663,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[443,\t\t1,\t\t134.602474,\t\t26.920495,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[444,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999997,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[445,\t\t1,\t\t61.161808,\t\t12.232362,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[446,\t\t1,\t\t28.360182,\t\t5.672036,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[447,\t\t1,\t\t53.918247,\t\t10.783649,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[448,\t\t1,\t\t39.624436,\t\t7.924887,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[449,\t\t1,\t\t199.799824,\t\t39.959965,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[450,\t\t1,\t\t122.267959,\t\t24.453592,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[451,\t\t1,\t\t52.245702,\t\t10.44914,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[452,\t\t1,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0,\t\t0.999998,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[453,\t\t1,\t\t35.014757,\t\t7.002951,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[454,\t\t1,\t\t24.428604,\t\t4.885721,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[455,\t\t1,\t\t39.828783,\t\t7.965757,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[456,\t\t1,\t\t39.828783,\t\t7.965757,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[457,\t\t1,\t\t122.144889,\t\t24.428978,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[458,\t\t1,\t\t116.175191,\t\t23.235038,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[459,\t\t1,\t\t141.38953,\t\t28.277906,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[460,\t\t1,\t\t185.814973,\t\t37.162995,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[461,\t\t1,\t\t193.287865,\t\t38.657573,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[462,\t\t1,\t\t59.12776,\t\t11.825552,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[463,\t\t1,\t\t30.297434,\t\t6.059487,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[464,\t\t1,\t\t30.334057,\t\t6.066811,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[465,\t\t1,\t\t48.997793,\t\t9.799559,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[466,\t\t1,\t\t39.780009,\t\t7.956002,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[467,\t\t1,\t\t36.710361,\t\t7.342072,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[468,\t\t1,\t\t60.190482,\t\t12.038096,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[469,\t\t1,\t\t37.298836,\t\t7.459767,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[470,\t\t1,\t\t94.98582,\t\t18.997164,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[471,\t\t1,\t\t93.522105,\t\t18.704421,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[472,\t\t1,\t\t32.711213,\t\t6.542243,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[473,\t\t1,\t\t60.065587,\t\t12.013117,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[474,\t\t1,\t\t31.023248,\t\t6.20465,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[475,\t\t1,\t\t30.444615,\t\t6.088923,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[476,\t\t1,\t\t34.407424,\t\t6.881485,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[477,\t\t1,\t\t55.52614,\t\t11.105228,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[478,\t\t1,\t\t69.750952,\t\t13.95019,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[479,\t\t1,\t\t126.404216,\t\t25.280843,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[480,\t\t1,\t\t55.405258,\t\t11.081052,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[481,\t\t1,\t\t48.116491,\t\t9.623298,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[482,\t\t1,\t\t54.634205,\t\t10.926841,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[483,\t\t1,\t\t46.462388,\t\t9.292478,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[484,\t\t1,\t\t36.424252,\t\t7.28485,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[485,\t\t1,\t\t54.408192,\t\t10.881638,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[486,\t\t1,\t\t500.528791,\t\t100.105758,\t\t0,\t\t0,\t\t0,\t\t0.999644,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[487,\t\t1,\t\t126.831682,\t\t25.366336,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[488,\t\t1,\t\t365.459497,\t\t73.091899,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[489,\t\t1,\t\t96.1879,\t\t19.23758,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t380.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[490,\t\t1,\t\t29.930087,\t\t5.986017,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[491,\t\t1,\t\t41.154254,\t\t8.230851,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[492,\t\t1,\t\t64.176373,\t\t12.835275,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[493,\t\t1,\t\t82.715663,\t\t16.543133,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[494,\t\t1,\t\t113.049619,\t\t22.609924,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[495,\t\t1,\t\t88.990255,\t\t17.798051,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[496,\t\t1,\t\t6.303328,\t\t1.260666,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[497,\t\t1,\t\t788.229231,\t\t157.645846,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[498,\t\t1,\t\t36.96724,\t\t7.393448,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[499,\t\t1,\t\t51.600211,\t\t10.320042,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[500,\t\t1,\t\t28.250508,\t\t5.650102,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[501,\t\t1,\t\t47.794989,\t\t9.558998,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[502,\t\t1,\t\t188.636924,\t\t37.727385,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[503,\t\t1,\t\t57.772131,\t\t11.554426,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[504,\t\t1,\t\t37.831905,\t\t7.566381,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[505,\t\t1,\t\t268.333226,\t\t53.666645,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[506,\t\t1,\t\t84.226497,\t\t16.845299,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[507,\t\t1,\t\t80.117224,\t\t16.023445,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[508,\t\t1,\t\t116.472908,\t\t23.294582,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[509,\t\t1,\t\t153.488191,\t\t30.697638,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[510,\t\t1,\t\t96.96766,\t\t19.393532,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[511,\t\t1,\t\t84.585425,\t\t16.917085,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[512,\t\t1,\t\t55.873895,\t\t11.174779,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[513,\t\t1,\t\t30.780554,\t\t6.156111,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[514,\t\t1,\t\t76.60982,\t\t15.321964,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[515,\t\t1,\t\t68.340511,\t\t13.668102,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[516,\t\t1,\t\t76.45695,\t\t15.29139,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[517,\t\t1,\t\t35.91366,\t\t7.182732,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[518,\t\t1,\t\t202.268006,\t\t40.453601,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[519,\t\t1,\t\t19.906875,\t\t3.981375,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[520,\t\t1,\t\t80.37176,\t\t16.074352,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[521,\t\t1,\t\t72.602992,\t\t14.520598,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[522,\t\t1,\t\t62.16327,\t\t12.432654,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[523,\t\t1,\t\t33.461781,\t\t6.692356,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[524,\t\t1,\t\t97.122526,\t\t19.424505,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[525,\t\t1,\t\t115.705825,\t\t23.141165,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[526,\t\t1,\t\t35.07983,\t\t7.015966,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[527,\t\t1,\t\t38.515188,\t\t7.703038,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[528,\t\t1,\t\t84.063,\t\t16.8126,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[529,\t\t1,\t\t107.756318,\t\t21.551264,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[530,\t\t1,\t\t45.662726,\t\t9.132545,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[531,\t\t1,\t\t46.426928,\t\t9.285386,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[532,\t\t1,\t\t44.561758,\t\t8.912352,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[533,\t\t1,\t\t39.932712,\t\t7.986542,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[534,\t\t1,\t\t110.156768,\t\t22.031354,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[535,\t\t1,\t\t137.909203,\t\t27.581841,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[536,\t\t1,\t\t108.702172,\t\t21.740434,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[537,\t\t1,\t\t36.160733,\t\t7.232147,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[538,\t\t1,\t\t27.031297,\t\t5.406259,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[539,\t\t1,\t\t28.681868,\t\t5.736374,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[540,\t\t1,\t\t25.826762,\t\t5.165352,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[541,\t\t1,\t\t66.712756,\t\t13.342551,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[542,\t\t1,\t\t91.642706,\t\t18.328541,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[543,\t\t1,\t\t50.054795,\t\t10.010959,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[544,\t\t1,\t\t93.227759,\t\t18.645552,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[545,\t\t1,\t\t200.734654,\t\t40.146931,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[546,\t\t1,\t\t100.61124,\t\t20.122248,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[547,\t\t1,\t\t130.046639,\t\t26.009328,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[548,\t\t1,\t\t42.096635,\t\t8.419327,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[549,\t\t1,\t\t35.996222,\t\t7.199244,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[550,\t\t1,\t\t29.703005,\t\t5.940601,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[551,\t\t1,\t\t28.63298,\t\t5.726596,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[552,\t\t1,\t\t142.188155,\t\t28.437631,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[553,\t\t1,\t\t0.983722,\t\t0.196744,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[554,\t\t1,\t\t144.051445,\t\t28.810289,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[555,\t\t1,\t\t54.885195,\t\t10.977039,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[556,\t\t1,\t\t84.909223,\t\t16.981845,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[557,\t\t1,\t\t180.401553,\t\t36.080311,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[558,\t\t1,\t\t106.375344,\t\t21.275069,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[559,\t\t1,\t\t56.93106,\t\t11.386212,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[560,\t\t1,\t\t88.939784,\t\t17.787957,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[561,\t\t1,\t\t48.771981,\t\t9.754396,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[562,\t\t1,\t\t133.241398,\t\t26.64828,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[563,\t\t1,\t\t93.679562,\t\t18.735912,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[564,\t\t1,\t\t184.970556,\t\t36.994111,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[565,\t\t1,\t\t139.56945,\t\t27.91389,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[566,\t\t1,\t\t0.224178,\t\t0.044836,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[567,\t\t1,\t\t226.8764,\t\t45.37528,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[568,\t\t1,\t\t209.805777,\t\t41.961155,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[569,\t\t1,\t\t147.620818,\t\t29.524164,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[570,\t\t1,\t\t230.46268,\t\t46.092536,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[571,\t\t1,\t\t169.684163,\t\t33.936833,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[572,\t\t1,\t\t299.294532,\t\t59.858906,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[573,\t\t1,\t\t87.120714,\t\t17.424143,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[574,\t\t1,\t\t165.99823,\t\t33.199646,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[575,\t\t1,\t\t3.119404,\t\t0.623881,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[576,\t\t1,\t\t201.852734,\t\t40.370547,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[577,\t\t1,\t\t222.521596,\t\t44.504319,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[578,\t\t1,\t\t212.456169,\t\t42.491234,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[579,\t\t1,\t\t77.509809,\t\t15.501962,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[580,\t\t1,\t\t16.136389,\t\t3.227278,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[581,\t\t1,\t\t0.092721,\t\t0.018544,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[582,\t\t1,\t\t58.381537,\t\t11.676307,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[583,\t\t1,\t\t66.961478,\t\t13.392296,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[584,\t\t1,\t\t38.419289,\t\t7.683858,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t],\n\t\t[585,\t\t1,\t\t66.700613,\t\t13.340123,\t\t0,\t\t0,\t\t0,\t\t1.0,\t\t0,\t\t220.0,\t\t0,\t\t1.1,\t\t0.9\t\t]\n\t])\n\tppc[\"gen\"] = array([\n\t\t[586,\t\t0.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t272.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[589,\t\t63.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t63.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[590,\t\t38.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t38.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[593,\t\t11.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t11.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[595,\t\t1466.614612,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4730.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[598,\t\t12.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[599,\t\t9.3,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t9.3,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[602,\t\t24.6,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t24.6,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[603,\t\t1363.789945,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3455.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[607,\t\t1800.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1800.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[608,\t\t24.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t24.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[609,\t\t36.4,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t36.4,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[612,\t\t30.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t30.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[614,\t\t30.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t30.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[616,\t\t29.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t29.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[617,\t\t137.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t137.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[618,\t\t33.4,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t33.4,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[619,\t\t118.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t118.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[624,\t\t27.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t27.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[629,\t\t75.3,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t75.3,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[632,\t\t45.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t45.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[637,\t\t53.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t53.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[638,\t\t128.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t128.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[640,\t\t12.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[641,\t\t12.6,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.6,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[642,\t\t28.9,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t28.9,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[643,\t\t857.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t857.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[647,\t\t14.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t14.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[652,\t\t46.9,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t46.9,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[655,\t\t61.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t61.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[663,\t\t15.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t15.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[666,\t\t28.9,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t28.9,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[670,\t\t24.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t24.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[672,\t\t33.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t33.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[676,\t\t370.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t370.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[681,\t\t40.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t40.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[683,\t\t27.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t27.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[687,\t\t1329.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1329.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[694,\t\t16.4,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t16.4,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[695,\t\t14.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t14.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[697,\t\t11.6,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t11.6,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[698,\t\t24.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t24.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[702,\t\t73.4,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t73.4,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[705,\t\t17.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t17.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[707,\t\t34.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t34.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[714,\t\t15.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t15.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[716,\t\t0.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[717,\t\t11.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t11.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[722,\t\t20.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t20.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[724,\t\t12.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[730,\t\t633.2,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t633.2,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[732,\t\t14.6,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t14.6,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[735,\t\t84.8,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t84.8,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[741,\t\t214.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t214.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[742,\t\t9.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t9.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[743,\t\t1227.688539,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1410.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[747,\t\t12.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[749,\t\t16.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t16.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[750,\t\t90.8,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t90.8,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[753,\t\t311.8,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t311.8,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[761,\t\t15.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t15.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[762,\t\t1076.088882,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1105.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[765,\t\t59.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t59.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[767,\t\t11.2,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t11.2,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[772,\t\t18.8,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t18.8,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[774,\t\t33.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t33.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[777,\t\t79.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t79.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[778,\t\t14.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t14.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[781,\t\t945.392426,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1310.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[784,\t\t1059.960906,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1275.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[785,\t\t3.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[788,\t\t700.494671,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t875.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[789,\t\t77.4,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t77.4,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[791,\t\t10.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t10.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[792,\t\t62.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t62.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[795,\t\t13.6,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t13.6,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[800,\t\t36.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t36.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[801,\t\t50.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t50.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[802,\t\t500.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t500.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[805,\t\t693.813273,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1410.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[806,\t\t35.8,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t35.8,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[808,\t\t217.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t217.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[809,\t\t12.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[811,\t\t25.2,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.2,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[814,\t\t89.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t89.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[816,\t\t80.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t80.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[817,\t\t54.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t54.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[821,\t\t82.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t82.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[826,\t\t58.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t58.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[834,\t\t23.3,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t23.3,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[835,\t\t63.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t63.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[836,\t\t25.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[837,\t\t472.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t472.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[839,\t\t73.3,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t73.3,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[841,\t\t23.3,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t23.3,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[843,\t\t333.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t333.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[844,\t\t40.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t40.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[850,\t\t16.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t16.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[851,\t\t79.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t79.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[853,\t\t11.6,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t11.6,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[856,\t\t36.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t36.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[857,\t\t1402.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1402.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[858,\t\t56.8,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t56.8,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[860,\t\t25.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[865,\t\t11.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t11.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[867,\t\t264.697826,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t769.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[869,\t\t1360.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1360.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[870,\t\t58.4,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t58.4,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[872,\t\t22.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t22.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[874,\t\t20.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t20.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[875,\t\t24.4,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t24.4,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[882,\t\t17.4,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t17.4,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[883,\t\t18.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t18.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[885,\t\t34.740146,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t490.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[886,\t\t2572.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2572.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[889,\t\t9.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t9.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[890,\t\t48.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t48.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[893,\t\t60.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t60.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[894,\t\t158.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t158.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[895,\t\t19.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t19.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[896,\t\t24.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t24.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[898,\t\t84.6,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t84.6,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[902,\t\t19.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t19.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[903,\t\t20.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t20.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[905,\t\t137.3,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t137.3,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[906,\t\t66.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t66.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[907,\t\t67.3,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t67.3,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[909,\t\t36.8,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t36.8,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[917,\t\t17.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t17.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[918,\t\t38.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t38.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[920,\t\t12.8,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.8,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[921,\t\t124.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t124.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[922,\t\t164.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t164.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[923,\t\t146.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t146.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[925,\t\t26.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t26.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[931,\t\t217.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t217.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[936,\t\t104.4,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t104.4,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[937,\t\t30.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t30.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[939,\t\t0.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[940,\t\t29.6,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t29.6,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[944,\t\t25.4,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.4,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[950,\t\t16.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t16.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[952,\t\t31.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t31.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[958,\t\t66.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t66.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[959,\t\t45.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t45.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[960,\t\t26.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t26.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[963,\t\t687.931579,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t875.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[965,\t\t352.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t352.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[967,\t\t37.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t37.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[969,\t\t56.9,\t\t0,\t\t9999,\t\t-9999,\t\t0.999644,\t\t100,\t\t1,\t\t56.9,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[971,\t\t20.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t20.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[978,\t\t4.6,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.6,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[982,\t\t9.9,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t9.9,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[983,\t\t44.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t44.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[984,\t\t465.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t465.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[985,\t\t22.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t22.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[986,\t\t11.2,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t11.2,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[987,\t\t164.5,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t164.5,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[988,\t\t5.1,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t5.1,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[993,\t\t392.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t392.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[994,\t\t33.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t33.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[995,\t\t4.2,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.2,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[997,\t\t18.8,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t18.8,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[999,\t\t15.6,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t15.6,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1002,\t\t9.9,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t9.9,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1007,\t\t23.3,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t23.3,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1010,\t\t750.0,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t750.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1011,\t\t18.7,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t18.7,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1012,\t\t810.029779,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2835.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1014,\t\t599.602726,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t750.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1027,\t\t10.460207,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t48.3,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1028,\t\t292.918282,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t400.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1029,\t\t27.465302,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t60.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1030,\t\t533.877229,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1018.0,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1031,\t\t1002.917112,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1447.2,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1032,\t\t79.932691,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t153.510391,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1033,\t\t20.55676,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t50.164506,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1034,\t\t36.699953,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t84.262779,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1035,\t\t35.271451,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t49.886469,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1036,\t\t46.753001,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t67.223077,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1037,\t\t40.25786,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t94.684044,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1038,\t\t37.755525,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t85.798525,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1039,\t\t101.893155,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t132.724114,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1040,\t\t0.018424,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.064179,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1041,\t\t153.223357,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t204.187624,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1042,\t\t40.87186,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t52.70053,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1043,\t\t1.823835,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t6.035538,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1044,\t\t11.076386,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t36.163532,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1045,\t\t12.693234,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t61.836204,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1046,\t\t18.636555,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t106.787063,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1047,\t\t2.990521,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t13.029581,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1048,\t\t13.95159,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t71.656883,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1049,\t\t198.425639,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t293.755375,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1050,\t\t39.486108,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t52.781606,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1051,\t\t285.38149,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t304.42978,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1052,\t\t5.143615,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t20.66869,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1053,\t\t4.192271,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t16.368087,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1054,\t\t65.843261,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t273.855776,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1055,\t\t2.569306,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.856069,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1056,\t\t432.936564,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t603.943953,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1057,\t\t130.808026,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t426.979979,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1058,\t\t549.489833,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1055.735174,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1059,\t\t360.823263,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t414.871332,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1060,\t\t9.16295,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t10.351632,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1061,\t\t154.755519,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t161.862597,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1062,\t\t2.358253,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.878561,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1063,\t\t6.654734,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t8.670916,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1064,\t\t154.89402,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t209.786524,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1065,\t\t250.621857,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t339.421643,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1066,\t\t68.904322,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t134.399019,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1067,\t\t6.260048,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t32.653526,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1068,\t\t2.977816,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t5.009022,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1069,\t\t1.620267,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.190759,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1070,\t\t0.473903,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.788599,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1071,\t\t2.394921,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.328696,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1072,\t\t36.154158,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t112.606433,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1073,\t\t20.275153,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t77.81765,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1074,\t\t48.536291,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t153.592986,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1075,\t\t8.668695,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t15.783448,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1076,\t\t0.719805,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.29551,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1077,\t\t18.059078,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t26.120041,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1078,\t\t14.921952,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t34.413246,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1079,\t\t22.955211,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t72.327992,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1080,\t\t44.741318,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t132.149983,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1081,\t\t388.316194,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t405.642115,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1082,\t\t485.516098,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t510.054159,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1083,\t\t613.766095,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t633.681488,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1084,\t\t522.770891,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t602.719371,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1085,\t\t37.272877,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t113.714399,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1086,\t\t69.300753,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t225.59917,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1087,\t\t107.585832,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t116.66597,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1088,\t\t35.327353,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t36.782492,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1089,\t\t297.558685,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t384.449592,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1090,\t\t23.576709,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t89.140897,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1091,\t\t7.850455,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t45.7939,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1092,\t\t5.88887,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t54.002032,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1093,\t\t10.655098,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t155.605298,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1096,\t\t7.860251,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t84.50612,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1097,\t\t0.394111,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.601122,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1098,\t\t9.296361,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t71.025499,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1099,\t\t54.610258,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t290.937198,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1100,\t\t0.003509,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.026696,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1101,\t\t24.535269,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t83.930665,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1102,\t\t117.607859,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t350.979988,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1103,\t\t93.242905,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t245.381701,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1105,\t\t0.002734,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.178593,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1106,\t\t0.001842,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.289793,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1107,\t\t7.627584,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t76.221615,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1108,\t\t84.395325,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t320.422751,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1109,\t\t0.005786,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.77821,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1110,\t\t0.001346,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.654557,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1111,\t\t11.638705,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t89.637993,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1113,\t\t0.000435,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.536361,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1114,\t\t2.594751,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t13.446889,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1115,\t\t0.024181,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t50.575278,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1116,\t\t0.003557,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t32.601142,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1117,\t\t1.0211,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t90.792541,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1118,\t\t0.126568,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t8.725012,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1119,\t\t0.06487,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t43.254023,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1120,\t\t0.003805,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.416001,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1121,\t\t0.000463,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.540589,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1122,\t\t0.001107,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.462883,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1123,\t\t0.000619,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.464336,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1124,\t\t0.001002,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.288283,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1125,\t\t0.961999,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.818899,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1126,\t\t1.24405,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t29.154893,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1127,\t\t0.204465,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t105.296621,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1128,\t\t0.003399,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.06139,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1129,\t\t0.004899,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.738747,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1130,\t\t0.00018,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.025754,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1131,\t\t0.002931,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.897078,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1133,\t\t0.000617,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.719597,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1134,\t\t0.000436,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.508453,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1135,\t\t0.027822,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t8.117819,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1136,\t\t0.000284,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.4027,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1137,\t\t0.098149,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.669012,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1138,\t\t0.002053,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.254278,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1139,\t\t0.000241,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t19.822769,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1140,\t\t4.49627,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t28.389457,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1142,\t\t0.00236,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.215733,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1143,\t\t0.502306,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.239356,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1144,\t\t0.030776,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t52.527382,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1145,\t\t40.324835,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t175.889627,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1146,\t\t0.000738,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.861317,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1147,\t\t0.011916,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t45.703707,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1148,\t\t0.651323,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t17.645529,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1149,\t\t0.135893,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t8.556784,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1150,\t\t0.021433,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.62256,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1151,\t\t0.019427,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t13.036113,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1152,\t\t0.00013,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.116518,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1155,\t\t0.000865,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.609451,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1157,\t\t0.006459,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.354147,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1160,\t\t61.129181,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t238.377761,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1161,\t\t2.896951,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.263391,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1162,\t\t273.439171,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t502.409178,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1163,\t\t206.24686,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t330.03194,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1164,\t\t143.533861,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t285.625412,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1165,\t\t29.685091,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t57.188579,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1166,\t\t32.175395,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t83.277163,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1168,\t\t0.000743,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.345774,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1169,\t\t0.003458,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.721845,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1171,\t\t1.967453,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t9.029885,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1172,\t\t0.631482,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.584043,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1173,\t\t82.749143,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t254.253327,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1175,\t\t0.000868,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.855454,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1176,\t\t0.000324,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.23222,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1177,\t\t0.126674,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t27.87401,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1178,\t\t0.165025,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.167999,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1179,\t\t0.011629,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.306293,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1181,\t\t13.535858,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t85.739557,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1182,\t\t8.79188,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t99.319579,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1183,\t\t0.981738,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t38.222575,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1184,\t\t0.008347,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.219005,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1186,\t\t3.535988,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t38.916368,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1187,\t\t0.27759,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t9.814574,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1188,\t\t56.68999,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t179.712741,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1189,\t\t8.957888,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t20.261805,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1190,\t\t210.457608,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t220.533673,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1191,\t\t70.653669,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t73.079413,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1192,\t\t8.195868,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t21.454569,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1193,\t\t0.865781,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.399953,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1194,\t\t3.340189,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t8.986036,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1195,\t\t0.071729,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.202359,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1196,\t\t49.815385,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t160.697956,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1197,\t\t26.370587,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t90.592266,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1198,\t\t8.079646,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t39.819157,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1199,\t\t43.056892,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t201.421956,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1200,\t\t11.02043,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t56.012408,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1201,\t\t17.382661,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.166667,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1202,\t\t20.92899,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t49.89238,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1203,\t\t143.537583,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t182.623256,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1204,\t\t23.95278,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t47.541821,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1205,\t\t0.219444,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.548843,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1206,\t\t1.467907,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.806894,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1207,\t\t1.289842,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.575453,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1208,\t\t1.785392,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.242031,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1209,\t\t0.039688,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.268261,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1210,\t\t0.579627,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t9.02599,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1211,\t\t13.976304,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t18.005229,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1212,\t\t74.870478,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t91.171888,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1213,\t\t46.121501,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t57.342704,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1214,\t\t2.447531,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.505907,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1215,\t\t0.708893,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.252965,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1216,\t\t16.428571,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t67.754469,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1217,\t\t32.069234,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t35.871617,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1218,\t\t0.793403,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.980482,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1219,\t\t0.548688,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.33953,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1220,\t\t2.817267,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t30.597849,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1221,\t\t292.553779,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t593.230436,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1222,\t\t166.288529,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t211.057769,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1223,\t\t3.615447,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.806101,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1224,\t\t54.949188,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t160.523778,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1225,\t\t21.684328,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t34.931481,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1226,\t\t1.849094,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.982858,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1227,\t\t9.902281,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t17.482807,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1228,\t\t0.730895,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.021367,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1229,\t\t9.347805,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t51.244222,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1230,\t\t0.238773,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.681276,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1231,\t\t4.366652,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t33.55478,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1232,\t\t11.333033,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t75.075088,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1233,\t\t150.178408,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t575.36828,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1235,\t\t2.638187,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t9.03734,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1236,\t\t22.763423,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t82.225035,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1237,\t\t2.778775,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t14.605409,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1238,\t\t72.798024,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t188.691049,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1239,\t\t0.564342,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.267706,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1240,\t\t241.248703,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t339.51051,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1241,\t\t364.295435,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t385.361595,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1242,\t\t4.834243,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t27.074038,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1243,\t\t37.302558,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t83.079842,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1244,\t\t79.039372,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t323.472536,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1245,\t\t2.700683,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t8.080896,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1246,\t\t11.614519,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t57.127825,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1247,\t\t4.672328,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t21.833396,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1248,\t\t11.023432,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t91.958275,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1249,\t\t65.703041,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t76.135177,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1250,\t\t28.580821,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t30.830519,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1251,\t\t21.224131,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t23.404345,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1252,\t\t14.138152,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t14.887727,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1253,\t\t50.455721,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t64.502694,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1254,\t\t28.780508,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t82.278695,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1255,\t\t3.003121,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.818419,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1256,\t\t12.275602,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t15.091842,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1257,\t\t65.168323,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t88.95288,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1258,\t\t68.145193,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t235.487329,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1259,\t\t85.172922,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t109.288719,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1260,\t\t6.875991,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t20.168717,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1261,\t\t173.495737,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t201.699555,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1262,\t\t0.309635,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.524108,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1263,\t\t0.24441,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.352421,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1264,\t\t64.013359,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t82.035361,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1265,\t\t4.784966,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t6.654727,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1266,\t\t103.17248,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t119.710849,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1267,\t\t38.430186,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t39.469006,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1268,\t\t2.034979,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.4295,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1269,\t\t2.322702,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t5.105829,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1270,\t\t25.03907,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t38.950511,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1271,\t\t23.845798,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t47.371792,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1272,\t\t0.422069,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.23166,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1273,\t\t0.244404,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.169201,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1274,\t\t50.377516,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t53.095629,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1275,\t\t87.392367,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t99.0753,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1276,\t\t24.185119,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.655641,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1277,\t\t52.100619,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t65.611252,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1278,\t\t146.059023,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t170.437781,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1279,\t\t0.000154,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.004344,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1280,\t\t0.06616,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.626494,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1281,\t\t0.401488,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.51246,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1282,\t\t0.613544,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.363037,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1283,\t\t402.284475,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1297.764428,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1284,\t\t16.498159,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t28.426322,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1285,\t\t0.402632,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.937048,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1286,\t\t9.779237,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t17.872201,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1287,\t\t90.378036,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t93.199628,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1288,\t\t144.534188,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t148.402692,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1289,\t\t165.62078,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t184.149235,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1290,\t\t3.310598,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.901974,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1291,\t\t91.035472,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t98.293351,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1292,\t\t31.980176,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t41.682074,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1293,\t\t2.251511,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.402107,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1294,\t\t4.500984,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t5.39743,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1295,\t\t5.035929,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t5.873666,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1296,\t\t6.542922,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t27.356489,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1297,\t\t69.476429,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t177.778742,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1298,\t\t0.892933,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.014603,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1299,\t\t0.650887,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.158207,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1300,\t\t10.924264,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t23.74405,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1301,\t\t29.938353,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t60.863304,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1302,\t\t3.756946,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.877299,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1303,\t\t3.548349,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.335516,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1304,\t\t6.98833,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t9.594319,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1305,\t\t0.004134,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.004567,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1306,\t\t0.013051,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.827014,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1307,\t\t0.000269,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.29894,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1308,\t\t3.092704,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.278321,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1309,\t\t1.952844,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.34909,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1310,\t\t0.96121,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.64589,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1311,\t\t0.915033,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t11.854004,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1312,\t\t61.692105,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t262.264924,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1313,\t\t23.4633,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t30.836748,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1314,\t\t9.723847,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.003987,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1315,\t\t7.484353,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t7.879027,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1316,\t\t0.342208,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.757497,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1317,\t\t2.443039,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t23.958574,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1318,\t\t1.145435,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.956332,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1319,\t\t5.754202,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t17.708276,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1320,\t\t10.408423,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t20.75859,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1321,\t\t0.058081,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.161123,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1322,\t\t0.553533,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.929763,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1323,\t\t111.607065,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t199.111909,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1324,\t\t7.765494,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t13.063258,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1325,\t\t55.916254,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t90.497559,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1326,\t\t15.960037,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t56.928865,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1327,\t\t14.515435,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t50.796895,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1328,\t\t4.734532,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t16.063343,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1329,\t\t189.523369,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t218.675424,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1330,\t\t19.894995,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t30.131028,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1332,\t\t16.042068,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t26.293088,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1333,\t\t36.231617,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t45.650254,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1334,\t\t0.134934,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.215341,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1335,\t\t2.182146,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.306939,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1336,\t\t25.507951,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t29.773035,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1337,\t\t17.701639,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t121.31241,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1338,\t\t0.295098,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.832524,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1339,\t\t2.095095,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t10.086482,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1340,\t\t9.678742,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t70.098327,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1341,\t\t32.05516,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t205.513321,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1342,\t\t0.019287,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.734589,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1343,\t\t0.027263,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.102108,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1344,\t\t0.080101,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.226057,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1345,\t\t2.810638,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.971188,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1346,\t\t78.824561,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t214.719215,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1347,\t\t115.323366,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t414.115976,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1348,\t\t4.836222,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t22.707927,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1349,\t\t8.174869,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t42.352342,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1350,\t\t0.009739,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.094971,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1351,\t\t0.000376,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.015958,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1352,\t\t0.034671,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.83726,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1355,\t\t0.989078,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.688324,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1356,\t\t57.296262,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t73.486231,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1357,\t\t39.855184,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t56.459913,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1358,\t\t0.144939,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.247293,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1359,\t\t64.298242,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t70.633589,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1363,\t\t0.004007,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.036158,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1364,\t\t0.008183,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.061068,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1365,\t\t6.2e-05,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.000456,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1366,\t\t1.0371,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.229992,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1367,\t\t8.708215,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t43.863891,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1368,\t\t0.162393,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.298243,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1369,\t\t4.676329,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t7.968859,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1370,\t\t0.206453,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.343308,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1371,\t\t15.125702,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t81.767208,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1372,\t\t187.012409,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t192.966588,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1373,\t\t34.669274,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t35.200257,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1374,\t\t22.833303,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t108.220146,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1375,\t\t13.048539,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t61.223816,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1376,\t\t16.260883,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t176.213655,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1377,\t\t91.898696,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t234.376272,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1378,\t\t100.492585,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t246.029906,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1379,\t\t0.001688,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.805984,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1381,\t\t0.003024,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.01257,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1382,\t\t60.392773,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t138.839906,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1383,\t\t21.830255,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t109.821439,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1387,\t\t0.003612,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.493561,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1390,\t\t0.003859,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.732816,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1391,\t\t0.003146,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.521719,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1393,\t\t0.000486,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.376509,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1394,\t\t0.000481,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.077886,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1395,\t\t0.000515,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.073776,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1396,\t\t0.000342,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.026112,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1397,\t\t0.017188,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.084545,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1398,\t\t0.002611,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.779641,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1399,\t\t0.888247,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t17.868157,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1400,\t\t0.000449,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.297197,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1401,\t\t9.673835,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t89.339497,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1402,\t\t1.995463,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t26.328902,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1403,\t\t53.765488,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t119.651672,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1404,\t\t51.552063,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t134.800518,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1405,\t\t3.911245,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t29.550802,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1406,\t\t1.823208,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t10.763987,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1407,\t\t0.020768,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.211614,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1408,\t\t27.750555,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t41.078698,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1409,\t\t6.125989,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.019786,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1410,\t\t16.580102,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t37.466518,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1411,\t\t29.991893,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t39.395367,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1412,\t\t1.247754,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t5.987601,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1413,\t\t1.161805,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t5.679791,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1414,\t\t7.260981,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.992489,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1415,\t\t1.902862,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t7.454501,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1416,\t\t1.697076,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t7.958002,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1417,\t\t0.000225,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.001311,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1418,\t\t31.771568,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t88.264613,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1419,\t\t13.601182,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t33.260903,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1420,\t\t1.057952,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.399757,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1421,\t\t4.889225,\t\t0,\t\t9999,\t\t-9999,\t\t0.999644,\t\t100,\t\t1,\t\t6.972369,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1422,\t\t3.591055,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t4.730495,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1423,\t\t1.379632,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.931017,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1424,\t\t52.568259,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t219.092115,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1425,\t\t7.570898,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t21.366402,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1426,\t\t53.646053,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t68.762602,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1427,\t\t426.696884,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t480.698671,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1428,\t\t229.292533,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t334.885743,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1429,\t\t4.000522,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t13.279826,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1430,\t\t0.00361,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.034248,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1431,\t\t82.661441,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t227.662022,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1432,\t\t3.068396,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.058931,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1433,\t\t353.343587,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1289.241188,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1434,\t\t12.901546,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t99.440014,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1435,\t\t16.366899,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t86.713217,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1436,\t\t25.427054,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t98.434116,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1437,\t\t233.567574,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t238.321958,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1438,\t\t303.313525,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t392.815158,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1439,\t\t27.439294,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t99.103164,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1440,\t\t0.682349,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.833609,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1441,\t\t0.102576,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.171578,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1442,\t\t0.287662,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.715522,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1443,\t\t24.01603,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t103.005076,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1444,\t\t5.78705,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t8.981696,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1445,\t\t8.939839,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t25.036799,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1446,\t\t665.560328,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t758.547933,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1447,\t\t71.232954,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t89.477411,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1448,\t\t0.635617,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t7.523578,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1449,\t\t4.007945,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t95.437673,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1450,\t\t11.695201,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t59.256809,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1451,\t\t11.056834,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t68.198838,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1452,\t\t2.209088,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t24.068921,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1453,\t\t62.829218,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t64.93775,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1454,\t\t103.053623,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t155.126607,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1455,\t\t0.000929,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.654438,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1456,\t\t0.807723,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t50.054822,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1459,\t\t0.001899,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t5.309059,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1460,\t\t8.582365,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t101.498473,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1461,\t\t0.00048,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t17.951737,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1463,\t\t0.000661,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.711207,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1464,\t\t103.699065,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t218.884211,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1466,\t\t0.008472,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t5.685017,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1467,\t\t0.01035,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.096155,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1468,\t\t0.015871,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t23.789171,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1469,\t\t9.519658,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t65.007467,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1470,\t\t18.665435,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t78.965265,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1471,\t\t37.296985,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t159.165074,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1472,\t\t0.506929,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t11.980182,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1473,\t\t5.4e-05,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t8.362608,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1474,\t\t0.001949,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.398948,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1475,\t\t0.000397,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.39088,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1476,\t\t101.327203,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t250.480113,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1477,\t\t2.856374,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t12.122974,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1479,\t\t3.294063,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t5.592606,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1480,\t\t10.202484,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t18.681964,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1481,\t\t0.018812,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.053146,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1482,\t\t4.22506,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t17.51083,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1483,\t\t0.032046,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t3.599649,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1484,\t\t0.00133,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.02991,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1485,\t\t0.025059,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.563547,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1486,\t\t0.128922,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.89934,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1487,\t\t0.399374,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.142917,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1488,\t\t0.557496,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t5.569856,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1489,\t\t0.000102,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.118938,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1490,\t\t153.87342,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t782.463701,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1491,\t\t79.356319,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t84.622838,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1492,\t\t222.647124,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t229.927503,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1493,\t\t81.369208,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t83.557175,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1494,\t\t322.728735,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t404.486733,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1495,\t\t25.969556,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t66.920717,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1496,\t\t5.1e-05,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.000282,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1497,\t\t71.545947,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t89.070006,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1498,\t\t92.120695,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t105.800802,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1499,\t\t0.748238,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t2.286676,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1500,\t\t0.028955,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.154817,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1501,\t\t1.053275,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t8.165333,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1502,\t\t0.10328,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.938928,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1503,\t\t29.240906,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t45.972187,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1504,\t\t122.968061,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t188.822836,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1505,\t\t7.645825,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t26.765913,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1506,\t\t21.720319,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t56.406717,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1507,\t\t3.842405,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t15.438042,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1508,\t\t0.06199,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.065259,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1510,\t\t80.0538,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t107.008141,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1511,\t\t112.671979,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t155.22192,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1512,\t\t52.731338,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t64.130052,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1513,\t\t20.534213,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t23.051786,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1514,\t\t0.001102,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.027711,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1516,\t\t0.010731,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.02881,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1517,\t\t0.893235,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t1.286804,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1518,\t\t0.001327,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.670542,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[1519,\t\t9.2e-05,\t\t0,\t\t9999,\t\t-9999,\t\t1.0,\t\t100,\t\t1,\t\t0.04654,\t\t0.0,\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t])\n\tppc[\"branch\"] = array([\n\t\t[586,\t\t1,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[589,\t\t108,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[590,\t\t108,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[593,\t\t112,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[595,\t\t115,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[598,\t\t118,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[599,\t\t119,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[602,\t\t121,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[603,\t\t526,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[607,\t\t127,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[608,\t\t127,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[609,\t\t529,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[612,\t\t493,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[614,\t\t130,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[616,\t\t132,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[617,\t\t133,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[618,\t\t133,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[619,\t\t134,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[624,\t\t14,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[629,\t\t145,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[632,\t\t145,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[637,\t\t148,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[638,\t\t149,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[640,\t\t153,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[641,\t\t155,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[642,\t\t533,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[643,\t\t534,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[647,\t\t536,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[652,\t\t167,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[655,\t\t170,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[663,\t\t178,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[666,\t\t180,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[670,\t\t183,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[672,\t\t185,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[676,\t\t19,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[681,\t\t197,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[683,\t\t200,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[687,\t\t202,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[694,\t\t21,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[695,\t\t210,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[697,\t\t211,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[698,\t\t212,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[702,\t\t215,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[705,\t\t217,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[707,\t\t219,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[714,\t\t225,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[716,\t\t226,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[717,\t\t227,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[722,\t\t545,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[724,\t\t238,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[730,\t\t547,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[732,\t\t247,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[735,\t\t253,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[741,\t\t264,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[742,\t\t264,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[743,\t\t500,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[747,\t\t273,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[749,\t\t274,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[750,\t\t557,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[753,\t\t28,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[761,\t\t288,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[762,\t\t289,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[765,\t\t560,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[767,\t\t292,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[772,\t\t3,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[774,\t\t300,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[777,\t\t300,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[778,\t\t300,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[781,\t\t303,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[784,\t\t563,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[785,\t\t501,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[788,\t\t311,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[789,\t\t565,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[791,\t\t314,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[792,\t\t316,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[795,\t\t319,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[800,\t\t326,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[801,\t\t327,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[802,\t\t327,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[805,\t\t328,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[806,\t\t328,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[808,\t\t329,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[809,\t\t329,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[811,\t\t568,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[814,\t\t570,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[816,\t\t335,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[817,\t\t571,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[821,\t\t338,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[826,\t\t339,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[834,\t\t572,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[835,\t\t572,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[836,\t\t572,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[837,\t\t350,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[839,\t\t350,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[841,\t\t573,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[843,\t\t352,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[844,\t\t352,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[850,\t\t574,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[851,\t\t575,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[853,\t\t362,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[856,\t\t363,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[857,\t\t365,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[858,\t\t368,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[860,\t\t371,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[865,\t\t375,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[867,\t\t376,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[869,\t\t503,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[870,\t\t503,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[872,\t\t378,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[874,\t\t576,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[875,\t\t381,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[882,\t\t388,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[883,\t\t388,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[885,\t\t393,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[886,\t\t394,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[889,\t\t397,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[890,\t\t40,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[893,\t\t400,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[894,\t\t400,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[895,\t\t580,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[896,\t\t581,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[898,\t\t403,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[902,\t\t405,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[903,\t\t406,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[905,\t\t413,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[906,\t\t414,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[907,\t\t583,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[909,\t\t417,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[917,\t\t43,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[918,\t\t424,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[920,\t\t428,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[921,\t\t428,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[922,\t\t429,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[923,\t\t432,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[925,\t\t44,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[931,\t\t439,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[936,\t\t445,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[937,\t\t447,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[939,\t\t450,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[940,\t\t451,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[944,\t\t458,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[950,\t\t462,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[952,\t\t47,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[958,\t\t478,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[959,\t\t478,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[960,\t\t479,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[963,\t\t481,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[965,\t\t49,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[967,\t\t49,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[969,\t\t486,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[971,\t\t51,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[978,\t\t491,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[982,\t\t62,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[983,\t\t62,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[984,\t\t63,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[985,\t\t63,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[986,\t\t64,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[987,\t\t65,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[988,\t\t66,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[993,\t\t67,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[994,\t\t67,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[995,\t\t509,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[997,\t\t510,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[999,\t\t70,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1002,\t\t71,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1007,\t\t511,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1010,\t\t79,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1011,\t\t79,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1012,\t\t81,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1014,\t\t83,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1027,\t\t218,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1028,\t\t221,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1029,\t\t268,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1030,\t\t269,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1031,\t\t498,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1032,\t\t1,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1033,\t\t3,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1034,\t\t4,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1035,\t\t6,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1036,\t\t7,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1037,\t\t8,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1038,\t\t9,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1039,\t\t11,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1040,\t\t14,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1041,\t\t16,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1042,\t\t17,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1043,\t\t19,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1044,\t\t21,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1045,\t\t23,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1046,\t\t25,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1047,\t\t27,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1048,\t\t28,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1049,\t\t29,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1050,\t\t31,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1051,\t\t33,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1052,\t\t34,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1053,\t\t35,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1054,\t\t36,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1055,\t\t38,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1056,\t\t39,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1057,\t\t40,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1058,\t\t41,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1059,\t\t43,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1060,\t\t44,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1061,\t\t45,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1062,\t\t47,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1063,\t\t48,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1064,\t\t49,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1065,\t\t50,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1066,\t\t51,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1067,\t\t53,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1068,\t\t54,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1069,\t\t55,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1070,\t\t57,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1071,\t\t58,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1072,\t\t59,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1073,\t\t60,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1074,\t\t62,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1075,\t\t63,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1076,\t\t64,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1077,\t\t65,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1078,\t\t66,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1079,\t\t67,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1080,\t\t70,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1081,\t\t71,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1082,\t\t72,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1083,\t\t73,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1084,\t\t75,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1085,\t\t76,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1086,\t\t77,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1087,\t\t79,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1088,\t\t80,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1089,\t\t81,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1090,\t\t82,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1091,\t\t83,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1092,\t\t84,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1093,\t\t85,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1096,\t\t90,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1097,\t\t91,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1098,\t\t92,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1099,\t\t93,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1100,\t\t97,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1101,\t\t98,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1102,\t\t101,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1103,\t\t102,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1105,\t\t108,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1106,\t\t109,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1107,\t\t110,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1108,\t\t111,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1109,\t\t112,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1110,\t\t113,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1111,\t\t114,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1113,\t\t116,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1114,\t\t118,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1115,\t\t119,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1116,\t\t121,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1117,\t\t122,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1118,\t\t126,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1119,\t\t127,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1120,\t\t130,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1121,\t\t131,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1122,\t\t132,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1123,\t\t133,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1124,\t\t134,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1125,\t\t135,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1126,\t\t136,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1127,\t\t137,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1128,\t\t139,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1129,\t\t140,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1130,\t\t141,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1131,\t\t142,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1133,\t\t145,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1134,\t\t146,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1135,\t\t147,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1136,\t\t148,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1137,\t\t149,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1138,\t\t150,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1139,\t\t151,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1140,\t\t152,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1142,\t\t154,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1143,\t\t155,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1144,\t\t158,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1145,\t\t161,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1146,\t\t162,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1147,\t\t163,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1148,\t\t164,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1149,\t\t166,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1150,\t\t167,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1151,\t\t168,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1152,\t\t169,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1155,\t\t172,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1157,\t\t174,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1160,\t\t177,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1161,\t\t178,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1162,\t\t179,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1163,\t\t180,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1164,\t\t181,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1165,\t\t182,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1166,\t\t183,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1168,\t\t186,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1169,\t\t187,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1171,\t\t189,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1172,\t\t190,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1173,\t\t192,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1175,\t\t194,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1176,\t\t196,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1177,\t\t197,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1178,\t\t198,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1179,\t\t199,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1181,\t\t202,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1182,\t\t203,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1183,\t\t204,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1184,\t\t205,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1186,\t\t207,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1187,\t\t208,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1188,\t\t209,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1189,\t\t210,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1190,\t\t211,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1191,\t\t212,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1192,\t\t213,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1193,\t\t214,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1194,\t\t215,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1195,\t\t216,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1196,\t\t217,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1197,\t\t218,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1198,\t\t219,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1199,\t\t221,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1200,\t\t222,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1201,\t\t223,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1202,\t\t224,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1203,\t\t225,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1204,\t\t226,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1205,\t\t227,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1206,\t\t228,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1207,\t\t229,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1208,\t\t230,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1209,\t\t234,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1210,\t\t235,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1211,\t\t237,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1212,\t\t238,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1213,\t\t239,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1214,\t\t240,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1215,\t\t241,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1216,\t\t242,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1217,\t\t243,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1218,\t\t244,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1219,\t\t247,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1220,\t\t251,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1221,\t\t252,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1222,\t\t253,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1223,\t\t254,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1224,\t\t255,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1225,\t\t256,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1226,\t\t257,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1227,\t\t258,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1228,\t\t260,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1229,\t\t263,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1230,\t\t264,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1231,\t\t266,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1232,\t\t267,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1233,\t\t268,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1235,\t\t271,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1236,\t\t272,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1237,\t\t273,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1238,\t\t274,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1239,\t\t275,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1240,\t\t276,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1241,\t\t278,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1242,\t\t281,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1243,\t\t282,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1244,\t\t283,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1245,\t\t284,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1246,\t\t285,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1247,\t\t286,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1248,\t\t287,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1249,\t\t288,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1250,\t\t289,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1251,\t\t291,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1252,\t\t292,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1253,\t\t293,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1254,\t\t294,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1255,\t\t295,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1256,\t\t296,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1257,\t\t297,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1258,\t\t298,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1259,\t\t299,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1260,\t\t300,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1261,\t\t302,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1262,\t\t303,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1263,\t\t304,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1264,\t\t307,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1265,\t\t308,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1266,\t\t309,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1267,\t\t311,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1268,\t\t312,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1269,\t\t314,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1270,\t\t316,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1271,\t\t317,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1272,\t\t318,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1273,\t\t319,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1274,\t\t321,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1275,\t\t322,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1276,\t\t323,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1277,\t\t324,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1278,\t\t325,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1279,\t\t326,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1280,\t\t327,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1281,\t\t328,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1282,\t\t329,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1283,\t\t331,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1284,\t\t333,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1285,\t\t335,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1286,\t\t337,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1287,\t\t338,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1288,\t\t339,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1289,\t\t340,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1290,\t\t341,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1291,\t\t342,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1292,\t\t343,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1293,\t\t344,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1294,\t\t345,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1295,\t\t346,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1296,\t\t347,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1297,\t\t348,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1298,\t\t350,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1299,\t\t352,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1300,\t\t353,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1301,\t\t354,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1302,\t\t355,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1303,\t\t356,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1304,\t\t357,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1305,\t\t359,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1306,\t\t361,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1307,\t\t362,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1308,\t\t363,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1309,\t\t364,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1310,\t\t365,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1311,\t\t366,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1312,\t\t367,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1313,\t\t368,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1314,\t\t369,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1315,\t\t370,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1316,\t\t371,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1317,\t\t372,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1318,\t\t373,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1319,\t\t374,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1320,\t\t375,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1321,\t\t376,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1322,\t\t377,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1323,\t\t378,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1324,\t\t379,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1325,\t\t381,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1326,\t\t384,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1327,\t\t385,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1328,\t\t386,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1329,\t\t387,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1330,\t\t388,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1332,\t\t391,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1333,\t\t392,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1334,\t\t393,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1335,\t\t394,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1336,\t\t395,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1337,\t\t396,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1338,\t\t397,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1339,\t\t398,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1340,\t\t399,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1341,\t\t400,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1342,\t\t403,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1343,\t\t404,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1344,\t\t405,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1345,\t\t406,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1346,\t\t407,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1347,\t\t408,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1348,\t\t410,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1349,\t\t411,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1350,\t\t412,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1351,\t\t413,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1352,\t\t414,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1355,\t\t418,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1356,\t\t419,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1357,\t\t420,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1358,\t\t421,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1359,\t\t422,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1363,\t\t426,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1364,\t\t427,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1365,\t\t428,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1366,\t\t429,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1367,\t\t430,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1368,\t\t431,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1369,\t\t432,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1370,\t\t433,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1371,\t\t434,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1372,\t\t435,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1373,\t\t436,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1374,\t\t437,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1375,\t\t438,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1376,\t\t439,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1377,\t\t440,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1378,\t\t441,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1379,\t\t442,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1381,\t\t445,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1382,\t\t446,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1383,\t\t447,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1387,\t\t451,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1390,\t\t455,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1391,\t\t456,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1393,\t\t458,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1394,\t\t459,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1395,\t\t460,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1396,\t\t461,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1397,\t\t462,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1398,\t\t463,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1399,\t\t464,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1400,\t\t465,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1401,\t\t466,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1402,\t\t467,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1403,\t\t468,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1404,\t\t469,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1405,\t\t470,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1406,\t\t471,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1407,\t\t472,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1408,\t\t473,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1409,\t\t474,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1410,\t\t475,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1411,\t\t476,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1412,\t\t477,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1413,\t\t478,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1414,\t\t479,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1415,\t\t480,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1416,\t\t481,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1417,\t\t482,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1418,\t\t483,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1419,\t\t484,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1420,\t\t485,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1421,\t\t486,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1422,\t\t487,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1423,\t\t488,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1424,\t\t489,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1425,\t\t490,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1426,\t\t491,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1427,\t\t492,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1428,\t\t493,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1429,\t\t494,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1430,\t\t495,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1431,\t\t496,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1432,\t\t497,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1433,\t\t498,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1434,\t\t499,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1435,\t\t500,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1436,\t\t501,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1437,\t\t502,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1438,\t\t503,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1439,\t\t504,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1440,\t\t505,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1441,\t\t506,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1442,\t\t507,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1443,\t\t508,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1444,\t\t509,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1445,\t\t510,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1446,\t\t511,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1447,\t\t512,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1448,\t\t513,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1449,\t\t514,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1450,\t\t515,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1451,\t\t516,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1452,\t\t517,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1453,\t\t518,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1454,\t\t519,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1455,\t\t520,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1456,\t\t521,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1459,\t\t524,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1460,\t\t525,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1461,\t\t526,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1463,\t\t528,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1464,\t\t529,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1466,\t\t531,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1467,\t\t532,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1468,\t\t533,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1469,\t\t534,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1470,\t\t535,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1471,\t\t536,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1472,\t\t537,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1473,\t\t538,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1474,\t\t539,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1475,\t\t540,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1476,\t\t541,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1477,\t\t542,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1479,\t\t544,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1480,\t\t545,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1481,\t\t546,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1482,\t\t547,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1483,\t\t548,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1484,\t\t549,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1485,\t\t550,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1486,\t\t551,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1487,\t\t552,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1488,\t\t554,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1489,\t\t555,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1490,\t\t556,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1491,\t\t557,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1492,\t\t558,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1493,\t\t559,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1494,\t\t560,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1495,\t\t561,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1496,\t\t562,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1497,\t\t563,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1498,\t\t564,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1499,\t\t565,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1500,\t\t566,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1501,\t\t567,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1502,\t\t568,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1503,\t\t569,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1504,\t\t570,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1505,\t\t571,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1506,\t\t572,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1507,\t\t573,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1508,\t\t574,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1510,\t\t576,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1511,\t\t577,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1512,\t\t578,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1513,\t\t579,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1514,\t\t580,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1516,\t\t582,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1517,\t\t583,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1518,\t\t584,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1519,\t\t585,\t\t0,\t\t1e-05,\t\t0,\t\t9999,\t\t9999,\t\t9999,\t\t0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[1,\t\t490,\t\t0,\t\t0.01433884297520661,\t\t0.151691958358336,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t43.375\t\t],\n\t\t[3,\t\t4,\t\t0,\t\t0.006291637811634348,\t\t0.903417549506624,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t72.681\t\t],\n\t\t[491,\t\t6,\t\t0,\t\t0.011200661157024791,\t\t0.118492839955776,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t33.882\t\t],\n\t\t[7,\t\t5,\t\t0,\t\t0.005794840720221606,\t\t0.20802058859584005,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t33.471\t\t],\n\t\t[8,\t\t9,\t\t0,\t\t0.0024379328254847646,\t\t0.350063268897336,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t28.163\t\t],\n\t\t[492,\t\t11,\t\t0,\t\t0.018224793388429753,\t\t0.0482004476327704,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t27.565\t\t],\n\t\t[11,\t\t493,\t\t0,\t\t0.030286942148760328,\t\t0.08010209706571599,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t45.809\t\t],\n\t\t[492,\t\t493,\t\t0,\t\t0.04521652892561983,\t\t0.11958747011094399,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t68.39\t\t],\n\t\t[494,\t\t14,\t\t0,\t\t0.012990743801652892,\t\t0.137430291356512,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t39.297\t\t],\n\t\t[13,\t\t15,\t\t0,\t\t0.007681959833795014,\t\t0.27576354266704156,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t44.371\t\t],\n\t\t[16,\t\t5,\t\t0,\t\t0.006275623268698061,\t\t0.22527950450957998,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t36.248000000000005\t\t],\n\t\t[17,\t\t18,\t\t0,\t\t0.04623522622347646,\t\t0.9335989000302801,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t200.291\t\t],\n\t\t[17,\t\t12,\t\t0,\t\t0.0056020313942728535,\t\t0.113118303398186,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t24.268\t\t],\n\t\t[14,\t\t495,\t\t0,\t\t0.0017957024793388433,\t\t0.018996904156819597,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.432\t\t],\n\t\t[494,\t\t19,\t\t0,\t\t0.010246611570247935,\t\t0.10839986031771602,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t30.996\t\t],\n\t\t[20,\t\t21,\t\t0,\t\t0.005415685595567867,\t\t0.19440984828307922,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t31.281\t\t],\n\t\t[20,\t\t22,\t\t0,\t\t0.0049706544321329645,\t\t0.713737278110032,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t57.42100000000001\t\t],\n\t\t[497,\t\t23,\t\t0,\t\t0.002190413223140496,\t\t0.005793146490362,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t3.313\t\t],\n\t\t[23,\t\t499,\t\t0,\t\t0.020799669421487598,\t\t0.22004164444829602,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t62.919\t\t],\n\t\t[25,\t\t26,\t\t0,\t\t0.00141845567867036,\t\t0.050919084651523595,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t8.193\t\t],\n\t\t[25,\t\t22,\t\t0,\t\t0.0035578254847645433,\t\t0.0319293051869808,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t10.275\t\t],\n\t\t[23,\t\t27,\t\t0,\t\t0.027738181818181818,\t\t0.073361203699828,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t41.95399999999999\t\t],\n\t\t[28,\t\t23,\t\t0,\t\t0.012841652892561981,\t\t0.0339632611780132,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t19.423\t\t],\n\t\t[8,\t\t21,\t\t0,\t\t0.004948753462603878,\t\t0.17764812836304802,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t28.584\t\t],\n\t\t[9,\t\t29,\t\t0,\t\t0.002212863573407202,\t\t0.31774552934092004,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t25.563000000000002\t\t],\n\t\t[30,\t\t25,\t\t0,\t\t0.019958795013850415,\t\t0.17911796401827998,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t57.641000000000005\t\t],\n\t\t[31,\t\t32,\t\t0,\t\t0.0299776084949446,\t\t0.605319030583196,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t129.863\t\t],\n\t\t[32,\t\t33,\t\t0,\t\t0.016762234533725762,\t\t0.33846927983213604,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t72.61399999999999\t\t],\n\t\t[34,\t\t35,\t\t0,\t\t0.001931900826446281,\t\t0.020437759184893597,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t5.843999999999999\t\t],\n\t\t[35,\t\t36,\t\t0,\t\t0.0008730578512396695,\t\t0.0092361605077588,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t2.641\t\t],\n\t\t[490,\t\t6,\t\t0,\t\t0.049352066115702475,\t\t0.130525028606764,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t74.645\t\t],\n\t\t[37,\t\t10,\t\t0,\t\t0.02404639889196676,\t\t0.485553838251812,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t104.169\t\t],\n\t\t[10,\t\t38,\t\t0,\t\t0.006848799630657894,\t\t0.13829351176534158,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t29.669\t\t],\n\t\t[37,\t\t38,\t\t0,\t\t0.01437834718372576,\t\t1.1613317560186958,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t124.574\t\t],\n\t\t[39,\t\t40,\t\t0,\t\t0.04521629732222991,\t\t0.913024308337812,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t195.877\t\t],\n\t\t[39,\t\t41,\t\t0,\t\t0.017466989843005543,\t\t0.35269996139852006,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t75.667\t\t],\n\t\t[42,\t\t41,\t\t0,\t\t0.031145429362880884,\t\t0.6289001042979919,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t134.922\t\t],\n\t\t[18,\t\t42,\t\t0,\t\t0.03439750692520776,\t\t0.6945672650962679,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t149.01\t\t],\n\t\t[492,\t\t43,\t\t0,\t\t0.01819173553719008,\t\t0.192452068436848,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t55.03\t\t],\n\t\t[44,\t\t45,\t\t0,\t\t0.02562314049586777,\t\t0.067767398802972,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t38.755\t\t],\n\t\t[44,\t\t505,\t\t0,\t\t0.006061487603305785,\t\t0.0160312607980052,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t9.168\t\t],\n\t\t[46,\t\t12,\t\t0,\t\t0.0014741170360110802,\t\t0.2116687641962416,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t17.029\t\t],\n\t\t[47,\t\t48,\t\t0,\t\t0.005344182825484765,\t\t0.01199019212302604,\t\t428.0,\t\t428.0,\t\t428.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t7.7170000000000005\t\t],\n\t\t[49,\t\t50,\t\t0,\t\t0.0019151662049861494,\t\t0.0171874439892256,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.531000000000001\t\t],\n\t\t[31,\t\t33,\t\t0,\t\t0.013475992613088641,\t\t0.27211225959163604,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t58.378\t\t],\n\t\t[31,\t\t51,\t\t0,\t\t0.003518611495844875,\t\t0.5052381383693519,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t40.647\t\t],\n\t\t[52,\t\t53,\t\t0,\t\t0.010464421745152355,\t\t1.5025884408875438,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t120.885\t\t],\n\t\t[52,\t\t54,\t\t0,\t\t0.0076126500461911354,\t\t0.1537174637168,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t32.978\t\t],\n\t\t[506,\t\t55,\t\t0,\t\t0.012634380165289257,\t\t0.133660287181212,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t38.219\t\t],\n\t\t[506,\t\t507,\t\t0,\t\t0.044157355371900825,\t\t0.11678619613628,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t66.788\t\t],\n\t\t[57,\t\t506,\t\t0,\t\t0.004687272727272727,\t\t0.049587095736244,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t14.179\t\t],\n\t\t[57,\t\t58,\t\t0,\t\t0.014436363636363634,\t\t0.0381809096340232,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t21.835\t\t],\n\t\t[58,\t\t506,\t\t0,\t\t0.019797685950413223,\t\t0.052360391943288,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t29.944000000000003\t\t],\n\t\t[59,\t\t60,\t\t0,\t\t0.019407548476454296,\t\t0.174170863885556,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t56.049\t\t],\n\t\t[508,\t\t62,\t\t0,\t\t0.051111404958677685,\t\t0.03379452026753001,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t38.653\t\t],\n\t\t[30,\t\t61,\t\t0,\t\t0.03143698060941828,\t\t0.28212765137935203,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t90.79\t\t],\n\t\t[63,\t\t506,\t\t0,\t\t0.027457190082644623,\t\t0.072618044249872,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t41.528999999999996\t\t],\n\t\t[13,\t\t64,\t\t0,\t\t0.0014816481994459833,\t\t0.2127501654814608,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t17.116\t\t],\n\t\t[65,\t\t66,\t\t0,\t\t0.03778185595567867,\t\t0.7629053006222161,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t163.671\t\t],\n\t\t[59,\t\t67,\t\t0,\t\t0.0051880193905817175,\t\t0.046559297286324804,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t14.982999999999999\t\t],\n\t\t[61,\t\t67,\t\t0,\t\t0.012931440443213295,\t\t0.1160517597580644,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t37.346\t\t],\n\t\t[68,\t\t69,\t\t0,\t\t0.011149584487534626,\t\t0.4002427745096039,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t64.4\t\t],\n\t\t[70,\t\t69,\t\t0,\t\t0.009625346260387812,\t\t0.345526355460808,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t55.596000000000004\t\t],\n\t\t[71,\t\t72,\t\t0,\t\t0.008878635734072021,\t\t0.318721276477736,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t51.283\t\t],\n\t\t[73,\t\t74,\t\t0,\t\t0.012529547553116345,\t\t0.253001288604392,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t54.278\t\t],\n\t\t[37,\t\t75,\t\t0,\t\t0.027459141274238225,\t\t0.5544652029066119,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t118.95299999999999\t\t],\n\t\t[72,\t\t75,\t\t0,\t\t0.006688711911357341,\t\t0.240108375006292,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t38.634\t\t],\n\t\t[37,\t\t72,\t\t0,\t\t0.036222068328739615,\t\t0.7314094881920841,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t156.914\t\t],\n\t\t[76,\t\t77,\t\t0,\t\t0.004683777700831025,\t\t0.6725445900750401,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t54.107\t\t],\n\t\t[77,\t\t51,\t\t0,\t\t0.00363183864265928,\t\t0.5214964473447999,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t41.955\t\t],\n\t\t[73,\t\t72,\t\t0,\t\t0.025475069252077563,\t\t0.514402082018968,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t110.35799999999999\t\t],\n\t\t[18,\t\t40,\t\t0,\t\t0.01302770083102493,\t\t0.26306018504072,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t56.43600000000001\t\t],\n\t\t[492,\t\t45,\t\t0,\t\t0.0308703030303719,\t\t0.18370114733484796,\t\t743.0,\t\t743.0,\t\t743.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t70.03699999999999\t\t],\n\t\t[10,\t\t74,\t\t0,\t\t0.030167359187465374,\t\t0.609150547206812,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t130.685\t\t],\n\t\t[45,\t\t511,\t\t0,\t\t0.08203371900826446,\t\t0.05424014819960001,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t62.038000000000004\t\t],\n\t\t[78,\t\t32,\t\t0,\t\t0.013458795013850415,\t\t0.48313777647302397,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t77.738\t\t],\n\t\t[79,\t\t80,\t\t0,\t\t0.0038086911357340715,\t\t0.1367226831743568,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t21.999000000000002\t\t],\n\t\t[81,\t\t79,\t\t0,\t\t0.010767832409972299,\t\t0.3865388099484561,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t62.195\t\t],\n\t\t[34,\t\t82,\t\t0,\t\t0.0015497520661157025,\t\t0.00409874294399768,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t2.344\t\t],\n\t\t[83,\t\t84,\t\t0,\t\t0.00902611570247934,\t\t0.0238720301499152,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.652000000000001\t\t],\n\t\t[83,\t\t499,\t\t0,\t\t0.04179570247933885,\t\t0.0276350398834796,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t31.608\t\t],\n\t\t[85,\t\t86,\t\t0,\t\t0.00802354570637119,\t\t0.28802563884886,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t46.343999999999994\t\t],\n\t\t[87,\t\t86,\t\t0,\t\t0.01904968836565097,\t\t0.683837154069184,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t110.031\t\t],\n\t\t[88,\t\t89,\t\t0,\t\t0.00380297520661157,\t\t0.010058007429140002,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.752000000000001\t\t],\n\t\t[90,\t\t86,\t\t0,\t\t0.012097818559556786,\t\t0.434282055192244,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t69.877\t\t],\n\t\t[91,\t\t86,\t\t0,\t\t9.26246537396122e-05,\t\t0.013299992817559201,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t1.07\t\t],\n\t\t[86,\t\t92,\t\t0,\t\t0.0001852493074792244,\t\t0.0066499964087796005,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.07\t\t],\n\t\t[86,\t\t93,\t\t0,\t\t0.008152181440443215,\t\t0.292643346635492,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t47.086999999999996\t\t],\n\t\t[94,\t\t86,\t\t0,\t\t0.012883829639889197,\t\t0.46249792780547194,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t74.417\t\t],\n\t\t[86,\t\t95,\t\t0,\t\t0.010421052631578947,\t\t0.37409026526870803,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t60.192\t\t],\n\t\t[513,\t\t517,\t\t0,\t\t0.0008733884297520661,\t\t0.0023099144321748,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.321\t\t],\n\t\t[97,\t\t66,\t\t0,\t\t0.03812777008310249,\t\t0.34217338998058805,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t110.113\t\t],\n\t\t[42,\t\t98,\t\t0,\t\t0.003091759002770083,\t\t0.44394630230884,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t35.716\t\t],\n\t\t[99,\t\t100,\t\t0,\t\t0.016371537396121884,\t\t0.587698093837988,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t94.56200000000001\t\t],\n\t\t[42,\t\t101,\t\t0,\t\t0.008165339335180054,\t\t0.29311568282888,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t47.163000000000004\t\t],\n\t\t[102,\t\t42,\t\t0,\t\t0.012403047091412742,\t\t0.44523901189173193,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t71.64\t\t],\n\t\t[103,\t\t87,\t\t0,\t\t0.007073060941828254,\t\t0.25390556381756,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t40.854\t\t],\n\t\t[104,\t\t103,\t\t0,\t\t0.0028852146814404432,\t\t0.1035721403291428,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.665\t\t],\n\t\t[105,\t\t87,\t\t0,\t\t0.006406682825484765,\t\t0.22998422159488002,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t37.005\t\t],\n\t\t[106,\t\t107,\t\t0,\t\t0.005714219759923823,\t\t0.11538365264216799,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t24.754\t\t],\n\t\t[108,\t\t107,\t\t0,\t\t0.0025427631578947367,\t\t0.09127896939786201,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t14.687000000000001\t\t],\n\t\t[109,\t\t106,\t\t0,\t\t0.003030470914127424,\t\t0.10878648330773438,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t17.504\t\t],\n\t\t[110,\t\t111,\t\t0,\t\t0.019821849030470913,\t\t0.7115558306889919,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t114.491\t\t],\n\t\t[87,\t\t112,\t\t0,\t\t0.006135907202216068,\t\t0.220264039928212,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t35.441\t\t],\n\t\t[113,\t\t87,\t\t0,\t\t0.003981648199445983,\t\t0.14293141813921081,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t22.998\t\t],\n\t\t[87,\t\t85,\t\t0,\t\t0.011046225761772853,\t\t0.3965324494097,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t63.803000000000004\t\t],\n\t\t[110,\t\t114,\t\t0,\t\t0.011665339335180056,\t\t0.418757110306188,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t67.37899999999999\t\t],\n\t\t[115,\t\t116,\t\t0,\t\t0.007048925619834712,\t\t0.07457124214588401,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t21.323\t\t],\n\t\t[117,\t\t118,\t\t0,\t\t0.005987534626038782,\t\t0.21493782785077598,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t34.584\t\t],\n\t\t[117,\t\t119,\t\t0,\t\t0.0038738746537396117,\t\t0.5562504472696961,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t44.751000000000005\t\t],\n\t\t[117,\t\t120,\t\t0,\t\t0.005886686288088643,\t\t0.8452704781039522,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t68.003\t\t],\n\t\t[121,\t\t122,\t\t0,\t\t0.0021170360110803325,\t\t0.0759964075574972,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t12.228\t\t],\n\t\t[123,\t\t124,\t\t0,\t\t0.0018386426592797783,\t\t0.0660027680945204,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t10.62\t\t],\n\t\t[125,\t\t126,\t\t0,\t\t0.004941135734072022,\t\t0.17737467056702802,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t28.54\t\t],\n\t\t[127,\t\t119,\t\t0,\t\t0.0029027008310249305,\t\t0.1041998502705648,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.766\t\t],\n\t\t[118,\t\t128,\t\t0,\t\t0.007397160664819945,\t\t0.265539950057812,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t42.726000000000006\t\t],\n\t\t[121,\t\t119,\t\t0,\t\t0.002552458448753463,\t\t0.0916270065931116,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t14.743\t\t],\n\t\t[530,\t\t527,\t\t0,\t\t0.022726611570247933,\t\t0.060106736329903994,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t34.374\t\t],\n\t\t[125,\t\t130,\t\t0,\t\t0.002931440443213297,\t\t0.105231531956442,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.932000000000002\t\t],\n\t\t[125,\t\t123,\t\t0,\t\t0.0019078081717451524,\t\t0.2739425623421336,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t22.039\t\t],\n\t\t[131,\t\t132,\t\t0,\t\t0.0035744459833795014,\t\t0.12831385593973843,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t20.646\t\t],\n\t\t[133,\t\t123,\t\t0,\t\t0.003864439058171745,\t\t0.13872389704704202,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t22.320999999999998\t\t],\n\t\t[524,\t\t134,\t\t0,\t\t0.008092231404958678,\t\t0.08560847143881999,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t24.479\t\t],\n\t\t[135,\t\t136,\t\t0,\t\t0.005242901662049862,\t\t0.1882073282678,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t30.283\t\t],\n\t\t[123,\t\t131,\t\t0,\t\t0.003138331024930748,\t\t0.1126583971045252,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t18.127\t\t],\n\t\t[117,\t\t128,\t\t0,\t\t0.010800034626038782,\t\t0.38769479063117196,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t62.381\t\t],\n\t\t[137,\t\t521,\t\t0,\t\t0.013832396694214875,\t\t0.14633421587532003,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t41.843\t\t],\n\t\t[531,\t\t514,\t\t0,\t\t0.0059504132231404955,\t\t0.035409362037522,\t\t743.0,\t\t743.0,\t\t743.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.5\t\t],\n\t\t[139,\t\t521,\t\t0,\t\t0.021257520661157023,\t\t0.05622132386323199,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t32.152\t\t],\n\t\t[140,\t\t514,\t\t0,\t\t0.018527603305785127,\t\t0.04900131122836401,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t28.023000000000003\t\t],\n\t\t[522,\t\t141,\t\t0,\t\t0.012168595041322314,\t\t0.032183175718526795,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t18.405\t\t],\n\t\t[142,\t\t523,\t\t0,\t\t0.007060165289256198,\t\t0.0746901476577608,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t21.357\t\t],\n\t\t[530,\t\t526,\t\t0,\t\t0.020281652892561983,\t\t0.053640374808152,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t30.676\t\t],\n\t\t[140,\t\t532,\t\t0,\t\t0.004669090909090909,\t\t0.0123486871461184,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t7.062\t\t],\n\t\t[142,\t\t144,\t\t0,\t\t0.006678126721756199,\t\t0.0397397958689204,\t\t743.0,\t\t743.0,\t\t743.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t15.151\t\t],\n\t\t[140,\t\t522,\t\t0,\t\t0.020450247933884298,\t\t0.05408627047793199,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t30.930999999999997\t\t],\n\t\t[145,\t\t146,\t\t0,\t\t0.028527603305785125,\t\t0.07544904460236,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t43.148\t\t],\n\t\t[147,\t\t523,\t\t0,\t\t0.02461289256198347,\t\t0.0650955220034416,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t37.227\t\t],\n\t\t[144,\t\t523,\t\t0,\t\t0.008479338842975206,\t\t0.0224259292904064,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t12.825\t\t],\n\t\t[139,\t\t523,\t\t0,\t\t0.029245619834710742,\t\t0.0193370088934308,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t22.116999999999997\t\t],\n\t\t[140,\t\t141,\t\t0,\t\t0.008362975206611572,\t\t0.022118173847506,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t12.649000000000001\t\t],\n\t\t[528,\t\t526,\t\t0,\t\t0.015389090909090908,\t\t0.0407006573227188,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t23.276\t\t],\n\t\t[528,\t\t148,\t\t0,\t\t0.014306115702479338,\t\t0.0378364333712244,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t21.638\t\t],\n\t\t[149,\t\t150,\t\t0,\t\t0.013604628099173552,\t\t0.035981157661543604,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t20.576999999999998\t\t],\n\t\t[145,\t\t528,\t\t0,\t\t0.00320595041322314,\t\t0.0084790121737992,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t4.849\t\t],\n\t\t[530,\t\t151,\t\t0,\t\t0.013144462809917355,\t\t0.0347641247737036,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t19.881\t\t],\n\t\t[524,\t\t152,\t\t0,\t\t0.014598347107438016,\t\t0.03860931919944,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t22.08\t\t],\n\t\t[149,\t\t525,\t\t0,\t\t0.016897190082644627,\t\t0.17875695122823998,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t51.114\t\t],\n\t\t[139,\t\t514,\t\t0,\t\t0.007824132231404959,\t\t0.020693056313687997,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t11.834000000000001\t\t],\n\t\t[126,\t\t120,\t\t0,\t\t0.012780297783933518,\t\t0.458781387757004,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t73.819\t\t],\n\t\t[530,\t\t153,\t\t0,\t\t0.02254545454545455,\t\t0.059627617060924,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t34.1\t\t],\n\t\t[528,\t\t147,\t\t0,\t\t0.15786710743801652,\t\t0.104380679149868,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t119.387\t\t],\n\t\t[528,\t\t154,\t\t0,\t\t0.006528264462809917,\t\t0.017265779790547203,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t9.874\t\t],\n\t\t[130,\t\t120,\t\t0,\t\t0.01450502077562327,\t\t0.5206947188067639,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t83.781\t\t],\n\t\t[528,\t\t155,\t\t0,\t\t0.16064132231404957,\t\t0.1062149715341,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t121.485\t\t],\n\t\t[524,\t\t533,\t\t0,\t\t0.004432727272727273,\t\t0.0468942356109744,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.409\t\t],\n\t\t[524,\t\t149,\t\t0,\t\t0.0056413223140495865,\t\t0.05968007537478799,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t17.065\t\t],\n\t\t[154,\t\t150,\t\t0,\t\t0.007539173553719007,\t\t0.0199394052006688,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t11.402999999999999\t\t],\n\t\t[157,\t\t110,\t\t0,\t\t0.009962084487534625,\t\t0.357614433044424,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t57.541000000000004\t\t],\n\t\t[119,\t\t158,\t\t0,\t\t0.0002490189289012004,\t\t0.08045252664623159,\t\t5134.0,\t\t5134.0,\t\t5134.0,\t\t0,\t\t3,\t\t1,\t\t-360,\t\t4.315\t\t],\n\t\t[159,\t\t60,\t\t0,\t\t0.010967451523545706,\t\t0.0984261617997728,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t31.674\t\t],\n\t\t[536,\t\t161,\t\t0,\t\t0.021314380165289255,\t\t0.056371704363524,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t32.238\t\t],\n\t\t[115,\t\t151,\t\t0,\t\t0.00379404958677686,\t\t0.0401376047510724,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t11.477\t\t],\n\t\t[162,\t\t134,\t\t0,\t\t0.0015910743801652895,\t\t0.016832124393744,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t4.813\t\t],\n\t\t[115,\t\t526,\t\t0,\t\t0.0037884297520661154,\t\t0.010019537998747198,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.73\t\t],\n\t\t[138,\t\t87,\t\t0,\t\t0.0011838642659279777,\t\t0.16999131006813442,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t13.675999999999998\t\t],\n\t\t[123,\t\t163,\t\t0,\t\t0.0022778739612188364,\t\t0.08177009602828919,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.157\t\t],\n\t\t[112,\t\t164,\t\t0,\t\t0.0008672957063711912,\t\t0.12453516639176802,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t10.019\t\t],\n\t\t[112,\t\t165,\t\t0,\t\t0.005989439058171744,\t\t0.21500619230086396,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t34.595\t\t],\n\t\t[166,\t\t165,\t\t0,\t\t0.002632790858725762,\t\t0.09451074335350361,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t15.207\t\t],\n\t\t[167,\t\t537,\t\t0,\t\t0.00832595041322314,\t\t0.08808100664460242,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t25.186\t\t],\n\t\t[168,\t\t104,\t\t0,\t\t0.002552458448753463,\t\t0.0916270065931116,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t14.743\t\t],\n\t\t[531,\t\t520,\t\t0,\t\t0.016156694214876033,\t\t0.042730794079516396,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t24.436999999999998\t\t],\n\t\t[139,\t\t520,\t\t0,\t\t0.010682314049586776,\t\t0.0282522993797748,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.157\t\t],\n\t\t[520,\t\t169,\t\t0,\t\t0.0011328925619834712,\t\t0.0119849761681232,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t3.427\t\t],\n\t\t[168,\t\t105,\t\t0,\t\t0.007340893351800554,\t\t0.26352009133553606,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t42.401\t\t],\n\t\t[520,\t\t170,\t\t0,\t\t0.005842644628099174,\t\t0.015452470732151198,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t8.837\t\t],\n\t\t[171,\t\t89,\t\t0,\t\t0.005505454545454546,\t\t0.058242717567848004,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.654\t\t],\n\t\t[521,\t\t172,\t\t0,\t\t0.006304793388429752,\t\t0.06669899780522001,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t19.072\t\t],\n\t\t[123,\t\t173,\t\t0,\t\t0.005247403047091413,\t\t0.18836891696656402,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t30.309\t\t],\n\t\t[521,\t\t174,\t\t0,\t\t0.013300495867768597,\t\t0.035176796844864404,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t20.117\t\t],\n\t\t[37,\t\t39,\t\t0,\t\t0.004338873499549862,\t\t0.35044859579205606,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t37.592\t\t],\n\t\t[530,\t\t175,\t\t0,\t\t0.013128595041322313,\t\t0.0347221581224188,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t19.857\t\t],\n\t\t[530,\t\t176,\t\t0,\t\t0.005685289256198347,\t\t0.01503630144005,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t8.599\t\t],\n\t\t[88,\t\t530,\t\t0,\t\t0.006015867768595041,\t\t0.0159106066755372,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t9.099\t\t],\n\t\t[177,\t\t496,\t\t0,\t\t0.018632066115702478,\t\t0.19711036673178398,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t56.361999999999995\t\t],\n\t\t[178,\t\t525,\t\t0,\t\t0.03106842975206612,\t\t0.08216895464241199,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t46.99100000000001\t\t],\n\t\t[179,\t\t493,\t\t0,\t\t0.057079669421487594,\t\t0.15096278779194802,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t86.333\t\t],\n\t\t[180,\t\t181,\t\t0,\t\t0.041027438016528923,\t\t0.10850827416682,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t62.053999999999995\t\t],\n\t\t[182,\t\t180,\t\t0,\t\t0.00866314049586777,\t\t0.09164817200545601,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t26.206\t\t],\n\t\t[179,\t\t181,\t\t0,\t\t0.01957223140495868,\t\t0.051764115772731996,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t29.603\t\t],\n\t\t[180,\t\t493,\t\t0,\t\t0.06676561983471074,\t\t0.17657993119175203,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t100.98299999999999\t\t],\n\t\t[183,\t\t30,\t\t0,\t\t0.0024804362880886427,\t\t0.356166349712776,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t28.654\t\t],\n\t\t[183,\t\t21,\t\t0,\t\t0.0025647506925207757,\t\t0.36827307214930394,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t29.628\t\t],\n\t\t[538,\t\t185,\t\t0,\t\t0.018631404958677687,\t\t0.0123189607681008,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t14.09\t\t],\n\t\t[538,\t\t89,\t\t0,\t\t0.014509752066115702,\t\t0.038375005396288,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t21.945999999999998\t\t],\n\t\t[184,\t\t186,\t\t0,\t\t0.0016554709141274237,\t\t0.059427351084826,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t9.562000000000001\t\t],\n\t\t[184,\t\t187,\t\t0,\t\t0.002698753462603878,\t\t0.09687863927102919,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t15.588\t\t],\n\t\t[520,\t\t172,\t\t0,\t\t0.0034188429752066113,\t\t0.0361682589818792,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t10.342\t\t],\n\t\t[89,\t\t175,\t\t0,\t\t0.0037309090909090903,\t\t0.0098674088877672,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.643\t\t],\n\t\t[185,\t\t89,\t\t0,\t\t0.005812892561983471,\t\t0.0153737832609196,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t8.792\t\t],\n\t\t[89,\t\t188,\t\t0,\t\t0.003108760330578513,\t\t0.008221966434607202,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t4.702\t\t],\n\t\t[189,\t\t190,\t\t0,\t\t0.008599492151454294,\t\t0.17364414688031998,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t37.253\t\t],\n\t\t[539,\t\t172,\t\t0,\t\t0.0021570247933884296,\t\t0.022819366646419197,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t6.525\t\t],\n\t\t[504,\t\t192,\t\t0,\t\t0.0003084297520661157,\t\t0.00326290713886456,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t0.9329999999999999\t\t],\n\t\t[105,\t\t186,\t\t0,\t\t0.003273372576177285,\t\t0.1175060580379876,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t18.907\t\t],\n\t\t[105,\t\t187,\t\t0,\t\t0.0021712257617728533,\t\t0.0779416868808324,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t12.540999999999999\t\t],\n\t\t[539,\t\t193,\t\t0,\t\t0.005608595041322314,\t\t0.01483346262541,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t8.482999999999999\t\t],\n\t\t[187,\t\t194,\t\t0,\t\t4.8649584487534626e-05,\t\t0.0069856037041576,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t0.562\t\t],\n\t\t[539,\t\t540,\t\t0,\t\t0.004394710743801653,\t\t0.0116230138006708,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t6.647\t\t],\n\t\t[539,\t\t196,\t\t0,\t\t0.00332297520661157,\t\t0.008788516227194,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.026\t\t],\n\t\t[197,\t\t540,\t\t0,\t\t0.004737190082644629,\t\t0.012528794024621601,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t7.165\t\t],\n\t\t[110,\t\t198,\t\t0,\t\t0.00018724030470914128,\t\t0.02688587333118328,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t2.1630000000000003\t\t],\n\t\t[197,\t\t539,\t\t0,\t\t0.009172231404958677,\t\t0.024258473063998802,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.873\t\t],\n\t\t[199,\t\t537,\t\t0,\t\t0.03612826446280991,\t\t0.0238877676441712,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t27.322\t\t],\n\t\t[134,\t\t526,\t\t0,\t\t0.007771239669421488,\t\t0.020553167475975197,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t11.754000000000001\t\t],\n\t\t[200,\t\t193,\t\t0,\t\t0.0009322314049586776,\t\t0.009862163056380801,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t2.82\t\t],\n\t\t[4,\t\t201,\t\t0,\t\t0.013726108033240996,\t\t0.49273365914097605,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t79.282\t\t],\n\t\t[202,\t\t86,\t\t0,\t\t0.00013365650969529087,\t\t0.00479794133417816,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.772\t\t],\n\t\t[85,\t\t203,\t\t0,\t\t0.0019011426592797783,\t\t0.2729854600553416,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t21.962\t\t],\n\t\t[147,\t\t204,\t\t0,\t\t0.0073874380165289254,\t\t0.0781523963903056,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t22.346999999999998\t\t],\n\t\t[147,\t\t205,\t\t0,\t\t0.005959669421487603,\t\t0.00394049369636956,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t4.507\t\t],\n\t\t[123,\t\t206,\t\t0,\t\t0.0005753116343490305,\t\t0.0826091142668064,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t6.646\t\t],\n\t\t[537,\t\t207,\t\t0,\t\t0.018456198347107437,\t\t0.048812461297776,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t27.915\t\t],\n\t\t[165,\t\t208,\t\t0,\t\t0.00414612188365651,\t\t0.14883562055771601,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t23.948\t\t],\n\t\t[4,\t\t94,\t\t0,\t\t0.013687673130193905,\t\t0.49135394025941603,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t79.06\t\t],\n\t\t[4,\t\t2,\t\t0,\t\t5.2054478301015697e-05,\t\t0.016817654469309,\t\t5134.0,\t\t5134.0,\t\t5134.0,\t\t0,\t\t3,\t\t1,\t\t-360,\t\t0.902\t\t],\n\t\t[209,\t\t4,\t\t0,\t\t0.0022369286703601107,\t\t0.32120104149338397,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t25.840999999999998\t\t],\n\t\t[119,\t\t163,\t\t0,\t\t0.003535145429362881,\t\t0.12690306230914922,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t20.419\t\t],\n\t\t[210,\t\t3,\t\t0,\t\t0.0003150969529085873,\t\t0.011311208844832242,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.82\t\t],\n\t\t[99,\t\t211,\t\t0,\t\t0.0035045013850415513,\t\t0.1258030161741948,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t20.242\t\t],\n\t\t[99,\t\t69,\t\t0,\t\t0.021717970914127423,\t\t0.7796219621557,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t125.443\t\t],\n\t\t[212,\t\t99,\t\t0,\t\t0.008453774238227147,\t\t0.30346978938770003,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t48.82899999999999\t\t],\n\t\t[213,\t\t214,\t\t0,\t\t0.01490115702479339,\t\t0.15764073118032798,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t45.076\t\t],\n\t\t[510,\t\t215,\t\t0,\t\t0.002174710743801653,\t\t0.09202587186721281,\t\t1981.0,\t\t1981.0,\t\t1981.0,\t\t0,\t\t4,\t\t1,\t\t-360,\t\t13.157\t\t],\n\t\t[128,\t\t69,\t\t0,\t\t0.010711651662049862,\t\t1.538088234801848,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t123.741\t\t],\n\t\t[216,\t\t69,\t\t0,\t\t0.009628462603878117,\t\t1.3825528982351443,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t111.228\t\t],\n\t\t[217,\t\t98,\t\t0,\t\t0.0012787396121883656,\t\t0.045903620070299994,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t7.386\t\t],\n\t\t[504,\t\t218,\t\t0,\t\t0.027480991735537193,\t\t0.072680994226412,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t41.565\t\t],\n\t\t[177,\t\t504,\t\t0,\t\t0.07054809917355372,\t\t0.18658373169634002,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t106.704\t\t],\n\t\t[219,\t\t209,\t\t0,\t\t0.003938798476454294,\t\t0.5655728721401839,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t45.501000000000005\t\t],\n\t\t[219,\t\t220,\t\t0,\t\t0.0013026315789473684,\t\t0.1870451326342096,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t15.048\t\t],\n\t\t[94,\t\t95,\t\t0,\t\t0.01070740997229917,\t\t0.38436979242743197,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t61.846000000000004\t\t],\n\t\t[159,\t\t221,\t\t0,\t\t0.009937153739612188,\t\t0.356719480257712,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t57.397\t\t],\n\t\t[34,\t\t161,\t\t0,\t\t0.010965289256198347,\t\t0.116002818645824,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t33.17\t\t],\n\t\t[222,\t\t221,\t\t0,\t\t0.0046457756232686975,\t\t0.16677196601221997,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t26.834\t\t],\n\t\t[211,\t\t52,\t\t0,\t\t0.05267313019390582,\t\t0.472709090515552,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t152.12\t\t],\n\t\t[215,\t\t223,\t\t0,\t\t0.04873190082644628,\t\t0.128884831985184,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t73.707\t\t],\n\t\t[224,\t\t215,\t\t0,\t\t0.019086280991735535,\t\t0.050478887076288004,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t28.868000000000002\t\t],\n\t\t[225,\t\t224,\t\t0,\t\t0.04200925619834711,\t\t0.11110496071615601,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t63.538999999999994\t\t],\n\t\t[224,\t\t223,\t\t0,\t\t0.031061818181818183,\t\t0.082151468537468,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t46.981\t\t],\n\t\t[226,\t\t6,\t\t0,\t\t0.06420099173553719,\t\t0.0424492677936932,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t48.552\t\t],\n\t\t[7,\t\t3,\t\t0,\t\t0.009332929362880887,\t\t0.335029305054692,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t53.907\t\t],\n\t\t[216,\t\t227,\t\t0,\t\t0.01989941135734072,\t\t0.7143401282507,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t114.939\t\t],\n\t\t[228,\t\t229,\t\t0,\t\t0.010545454545454545,\t\t0.027890337012274,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t15.95\t\t],\n\t\t[227,\t\t230,\t\t0,\t\t0.003993074792243767,\t\t0.573366419334696,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t46.128\t\t],\n\t\t[231,\t\t53,\t\t0,\t\t0.007193213296398893,\t\t1.0328749562310842,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t83.096\t\t],\n\t\t[544,\t\t545,\t\t0,\t\t0.013061818181818181,\t\t0.034545548464856,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t19.756\t\t],\n\t\t[234,\t\t235,\t\t0,\t\t0.04608859504132231,\t\t0.121893887321888,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t69.709\t\t],\n\t\t[546,\t\t214,\t\t0,\t\t0.057025454545454546,\t\t0.15081940173295602,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t86.251\t\t],\n\t\t[233,\t\t227,\t\t0,\t\t0.0029001038781163438,\t\t0.1041066260218888,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.750999999999998\t\t],\n\t\t[237,\t\t238,\t\t0,\t\t0.026324628099173554,\t\t0.06962267451304,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t39.816\t\t],\n\t\t[212,\t\t100,\t\t0,\t\t0.007955505540166205,\t\t0.285583163531816,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t45.951\t\t],\n\t\t[519,\t\t239,\t\t0,\t\t0.01740429752066116,\t\t0.046030422038308406,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t26.324\t\t],\n\t\t[238,\t\t519,\t\t0,\t\t0.015166280991735538,\t\t0.040111375593995205,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t22.939\t\t],\n\t\t[213,\t\t240,\t\t0,\t\t0.01665388429752066,\t\t0.04404574915373599,\t\t1200.0,\t\t1200.0,\t\t1200.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t25.189\t\t],\n\t\t[241,\t\t242,\t\t0,\t\t0.009862015235457064,\t\t0.3540221919932281,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t56.963\t\t],\n\t\t[70,\t\t241,\t\t0,\t\t0.003819858033240997,\t\t0.5484941897752321,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t44.126999999999995\t\t],\n\t\t[509,\t\t213,\t\t0,\t\t0.011363636363636364,\t\t0.120216969880216,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t34.375\t\t],\n\t\t[68,\t\t243,\t\t0,\t\t0.003611668975069252,\t\t0.1296500701715312,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t20.861\t\t],\n\t\t[243,\t\t244,\t\t0,\t\t0.0007699099722991691,\t\t0.027637882270859202,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t4.447\t\t],\n\t\t[68,\t\t244,\t\t0,\t\t0.004104051246537396,\t\t0.147325387728876,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t23.705\t\t],\n\t\t[544,\t\t547,\t\t0,\t\t0.02418776859504132,\t\t0.255884661882476,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t73.168\t\t],\n\t\t[245,\t\t227,\t\t0,\t\t0.012676419667590028,\t\t0.45505241780707606,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t73.219\t\t],\n\t\t[246,\t\t208,\t\t0,\t\t0.0010155817174515235,\t\t0.0364568961999408,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.8660000000000005\t\t],\n\t\t[112,\t\t208,\t\t0,\t\t0.0017927631578947367,\t\t0.0643558063672372,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t10.355\t\t],\n\t\t[165,\t\t247,\t\t0,\t\t0.0002113919667590028,\t\t0.0075884538459086,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.2209999999999999\t\t],\n\t\t[537,\t\t549,\t\t0,\t\t0.00032066115702479337,\t\t0.00084807607842936,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.485\t\t],\n\t\t[537,\t\t550,\t\t0,\t\t0.00032198347107438016,\t\t0.0008515732993697601,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.48700000000000004\t\t],\n\t\t[537,\t\t551,\t\t0,\t\t0.0002651239669421488,\t\t0.0007011927988648,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.401\t\t],\n\t\t[110,\t\t251,\t\t0,\t\t0.00023857340720221602,\t\t0.008564200982522441,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.3780000000000001\t\t],\n\t\t[510,\t\t252,\t\t0,\t\t0.08467702479338843,\t\t0.055987884365424005,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t64.03699999999999\t\t],\n\t\t[529,\t\t253,\t\t0,\t\t0.04859504132231405,\t\t0.12852286961777998,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t73.5\t\t],\n\t\t[237,\t\t239,\t\t0,\t\t0.03309421487603306,\t\t0.08752669712542799,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t50.055\t\t],\n\t\t[254,\t\t238,\t\t0,\t\t0.07815008264462811,\t\t0.05167231372274401,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t59.101000000000006\t\t],\n\t\t[69,\t\t255,\t\t0,\t\t0.0009369806094182826,\t\t0.134541235754472,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t10.824000000000002\t\t],\n\t\t[510,\t\t225,\t\t0,\t\t0.021953719008264466,\t\t0.232250442756508,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t66.41\t\t],\n\t\t[256,\t\t257,\t\t0,\t\t0.010125619834710746,\t\t0.0267799693631888,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t15.315\t\t],\n\t\t[258,\t\t190,\t\t0,\t\t0.011717451523545707,\t\t0.10515695255750121,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t33.84\t\t],\n\t\t[258,\t\t259,\t\t0,\t\t0.015782548476454293,\t\t0.1416387085570408,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t45.58\t\t],\n\t\t[260,\t\t261,\t\t0,\t\t0.006791031855955679,\t\t0.9751256416231477,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t78.45\t\t],\n\t\t[554,\t\t553,\t\t0,\t\t0.17583338842975205,\t\t0.11625986438453201,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t132.974\t\t],\n\t\t[515,\t\t263,\t\t0,\t\t0.006987107438016529,\t\t0.0739172618295936,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t21.136\t\t],\n\t\t[14,\t\t264,\t\t0,\t\t0.01700694214876033,\t\t0.17991802858084,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t51.446000000000005\t\t],\n\t\t[116,\t\t555,\t\t0,\t\t0.0009768595041322315,\t\t0.0103342878835768,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t2.955\t\t],\n\t\t[151,\t\t116,\t\t0,\t\t0.007244958677685951,\t\t0.0191612735410668,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t10.958\t\t],\n\t\t[111,\t\t114,\t\t0,\t\t0.008806613573407202,\t\t0.3161358573133961,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t50.867\t\t],\n\t\t[77,\t\t111,\t\t0,\t\t0.00288452216066482,\t\t0.41418912211817605,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t33.321999999999996\t\t],\n\t\t[266,\t\t525,\t\t0,\t\t0.01042909090909091,\t\t0.027582581569373602,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t15.774000000000001\t\t],\n\t\t[267,\t\t120,\t\t0,\t\t0.013136945983379503,\t\t0.471584184581432,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t75.87899999999999\t\t],\n\t\t[268,\t\t269,\t\t0,\t\t0.0010327272727272726,\t\t0.0027313295556817604,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.5619999999999998\t\t],\n\t\t[556,\t\t271,\t\t0,\t\t0.052289586776859506,\t\t0.0345735262323792,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t39.544000000000004\t\t],\n\t\t[556,\t\t272,\t\t0,\t\t0.04685355371900827,\t\t0.030979257409249603,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t35.433\t\t],\n\t\t[529,\t\t273,\t\t0,\t\t0.0034604958677685953,\t\t0.009152227205140799,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.234\t\t],\n\t\t[128,\t\t274,\t\t0,\t\t0.0029350761772853184,\t\t0.1053620459045884,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.953\t\t],\n\t\t[34,\t\t275,\t\t0,\t\t0.0008290909090909092,\t\t0.00054818938265696,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.627\t\t],\n\t\t[503,\t\t276,\t\t0,\t\t0.006707438016528925,\t\t0.07095861291266,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t20.29\t\t],\n\t\t[503,\t\t504,\t\t0,\t\t0.06432727272727272,\t\t0.680524223098808,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t194.59\t\t],\n\t\t[177,\t\t218,\t\t0,\t\t0.04330380165289256,\t\t0.114528740018308,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t65.497\t\t],\n\t\t[277,\t\t278,\t\t0,\t\t0.007191135734072023,\t\t1.032576638635032,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t83.072\t\t],\n\t\t[557,\t\t558,\t\t0,\t\t0.04341289256198347,\t\t0.258338836678648,\t\t743.0,\t\t743.0,\t\t743.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t98.493\t\t],\n\t\t[557,\t\t559,\t\t0,\t\t0.03415867768595042,\t\t0.09034195998366001,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t51.665\t\t],\n\t\t[559,\t\t558,\t\t0,\t\t0.04474314049586777,\t\t0.11833546501370001,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t67.67399999999999\t\t],\n\t\t[277,\t\t78,\t\t0,\t\t0.03585768698060942,\t\t0.32180078416049196,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t103.557\t\t],\n\t\t[277,\t\t279,\t\t0,\t\t0.021390927977839334,\t\t0.191970480441328,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t61.777\t\t],\n\t\t[78,\t\t279,\t\t0,\t\t0.015811980609418283,\t\t0.1419028439283376,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t45.665\t\t],\n\t\t[281,\t\t282,\t\t0,\t\t0.0023178670360110803,\t\t0.08320574945862161,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.388\t\t],\n\t\t[283,\t\t161,\t\t0,\t\t0.036741157024793386,\t\t0.09717203248350399,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t55.571000000000005\t\t],\n\t\t[268,\t\t161,\t\t0,\t\t0.018883636363636366,\t\t0.199771751868832,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t57.123000000000005\t\t],\n\t\t[256,\t\t284,\t\t0,\t\t0.010755371900826446,\t\t0.113782083346976,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t32.535\t\t],\n\t\t[515,\t\t516,\t\t0,\t\t0.04071140495867769,\t\t0.107672438361532,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t61.576\t\t],\n\t\t[263,\t\t516,\t\t0,\t\t0.0030355371900826445,\t\t0.128452925198488,\t\t1981.0,\t\t1981.0,\t\t1981.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t18.365\t\t],\n\t\t[516,\t\t285,\t\t0,\t\t0.006908429752066116,\t\t0.018271230811372,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t10.449000000000002\t\t],\n\t\t[63,\t\t286,\t\t0,\t\t0.019088925619834708,\t\t0.050485881518556,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t28.872\t\t],\n\t\t[287,\t\t516,\t\t0,\t\t0.01732892561983471,\t\t0.011457770111127998,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.105\t\t],\n\t\t[8,\t\t102,\t\t0,\t\t0.015100069252077563,\t\t0.542055501663692,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t87.21799999999999\t\t],\n\t\t[8,\t\t101,\t\t0,\t\t0.019246883656509697,\t\t0.69091598202144,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t111.17\t\t],\n\t\t[80,\t\t288,\t\t0,\t\t0.007984072022160666,\t\t0.2866086302684072,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t46.11600000000001\t\t],\n\t\t[80,\t\t289,\t\t0,\t\t0.0003782317636201524,\t\t0.122198345223416,\t\t5134.0,\t\t5134.0,\t\t5134.0,\t\t0,\t\t4,\t\t1,\t\t-360,\t\t6.553999999999999\t\t],\n\t\t[276,\t\t560,\t\t0,\t\t0.01778314049586777,\t\t0.047032375838192794,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t26.897\t\t],\n\t\t[37,\t\t290,\t\t0,\t\t0.005629501385041551,\t\t0.4546919507138321,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t48.773999999999994\t\t],\n\t\t[290,\t\t74,\t\t0,\t\t0.02071595106187673,\t\t1.673216783321968,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t179.483\t\t],\n\t\t[512,\t\t291,\t\t0,\t\t0.0053299173553719,\t\t0.056385693247479204,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t16.123\t\t],\n\t\t[78,\t\t292,\t\t0,\t\t0.0058149815327908595,\t\t0.469673087481408,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t50.381\t\t],\n\t\t[199,\t\t548,\t\t0,\t\t0.0015530578512396695,\t\t0.00410748599634868,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t2.349\t\t],\n\t\t[491,\t\t293,\t\t0,\t\t0.014176528925619833,\t\t0.009373426429729999,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t10.720999999999998\t\t],\n\t\t[4,\t\t294,\t\t0,\t\t9.669321329639889e-05,\t\t0.013884198109531681,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t1.117\t\t],\n\t\t[490,\t\t541,\t\t0,\t\t0.050580495867768596,\t\t0.133773946861896,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t76.503\t\t],\n\t\t[491,\t\t295,\t\t0,\t\t0.010613553719008264,\t\t0.028070443890777202,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.053\t\t],\n\t\t[491,\t\t296,\t\t0,\t\t0.004400661157024794,\t\t0.0116387512948784,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t6.656000000000001\t\t],\n\t\t[295,\t\t297,\t\t0,\t\t0.020297520661157024,\t\t0.053682341459340005,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t30.7\t\t],\n\t\t[508,\t\t161,\t\t0,\t\t0.023239669421487603,\t\t0.061463658055360006,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t35.15\t\t],\n\t\t[117,\t\t123,\t\t0,\t\t0.005876211911357341,\t\t0.21094161505628,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t33.941\t\t],\n\t\t[133,\t\t117,\t\t0,\t\t0.004469182825484764,\t\t0.0401081792747688,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t12.907\t\t],\n\t\t[71,\t\t74,\t\t0,\t\t0.03904524469065097,\t\t0.7884161162841721,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t169.144\t\t],\n\t\t[74,\t\t278,\t\t0,\t\t0.0077122576177285325,\t\t1.10740463560792,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t89.09200000000001\t\t],\n\t\t[298,\t\t515,\t\t0,\t\t0.021701157024793388,\t\t0.05739464148919599,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t32.823\t\t],\n\t\t[5,\t\t299,\t\t0,\t\t0.0016232686980609415,\t\t0.058271370400665996,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t9.376\t\t],\n\t\t[32,\t\t292,\t\t0,\t\t0.009679362880886427,\t\t0.34746541983297996,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t55.908\t\t],\n\t\t[5,\t\t29,\t\t0,\t\t0.00743395083102493,\t\t1.0674425076571843,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t85.87700000000001\t\t],\n\t\t[503,\t\t560,\t\t0,\t\t0.015140495867768593,\t\t0.160172719142436,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t45.8\t\t],\n\t\t[300,\t\t301,\t\t0,\t\t0.004892053324099723,\t\t0.7024509290644521,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t56.513000000000005\t\t],\n\t\t[51,\t\t300,\t\t0,\t\t0.002573493767313019,\t\t0.3695284920307039,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t29.729\t\t],\n\t\t[244,\t\t302,\t\t0,\t\t0.007714508310249307,\t\t1.107727813004004,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t89.118\t\t],\n\t\t[31,\t\t302,\t\t0,\t\t0.004369113573407203,\t\t0.6273619041941161,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t50.472\t\t],\n\t\t[51,\t\t282,\t\t0,\t\t0.006288434903047093,\t\t0.9029576432132521,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t72.64399999999999\t\t],\n\t\t[303,\t\t304,\t\t0,\t\t8.795013850415512e-05,\t\t0.000789298639172312,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.254\t\t],\n\t\t[305,\t\t304,\t\t0,\t\t0.003881117266849031,\t\t0.0783689646873844,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.813\t\t],\n\t\t[305,\t\t259,\t\t0,\t\t0.0025625,\t\t0.36794989475177603,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t29.601999999999997\t\t],\n\t\t[306,\t\t307,\t\t0,\t\t0.03223268698060942,\t\t0.289268628831688,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t93.088\t\t],\n\t\t[305,\t\t308,\t\t0,\t\t0.0024272853185595567,\t\t0.0217833994511184,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t7.01\t\t],\n\t\t[305,\t\t309,\t\t0,\t\t0.011014773776523545,\t\t0.22241441259921202,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t47.716\t\t],\n\t\t[310,\t\t309,\t\t0,\t\t0.009565962603878117,\t\t0.343394627639832,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t55.253\t\t],\n\t\t[306,\t\t309,\t\t0,\t\t0.035333795013850415,\t\t0.31709917455019604,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t102.044\t\t],\n\t\t[311,\t\t280,\t\t0,\t\t0.003433691135734072,\t\t0.1232611016590444,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t19.833\t\t],\n\t\t[280,\t\t278,\t\t0,\t\t0.009749769159764544,\t\t0.7874838737974121,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t84.47200000000001\t\t],\n\t\t[311,\t\t32,\t\t0,\t\t0.01205909510619806,\t\t0.9740069506375919,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t104.48\t\t],\n\t\t[13,\t\t312,\t\t0,\t\t0.0043324965373961214,\t\t0.622104056565324,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t50.049\t\t],\n\t\t[313,\t\t314,\t\t0,\t\t0.006092624653739613,\t\t0.218710302449316,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t35.191\t\t],\n\t\t[312,\t\t313,\t\t0,\t\t0.00893957756232687,\t\t0.32090893884734,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t51.635\t\t],\n\t\t[547,\t\t566,\t\t0,\t\t0.027035702479338848,\t\t0.286013220297816,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t81.783\t\t],\n\t\t[245,\t\t315,\t\t0,\t\t0.014162569252077564,\t\t0.508401547875772,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t81.803\t\t],\n\t\t[312,\t\t316,\t\t0,\t\t8.803670360110802e-05,\t\t0.01264120812658816,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.0170000000000001\t\t],\n\t\t[312,\t\t314,\t\t0,\t\t0.005339854570637119,\t\t0.191687700220296,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t30.843000000000004\t\t],\n\t\t[554,\t\t546,\t\t0,\t\t0.08174743801652892,\t\t0.21620344446439202,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t123.64299999999999\t\t],\n\t\t[262,\t\t216,\t\t0,\t\t0.042641966759002774,\t\t0.38268554099981195,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t123.15\t\t],\n\t\t[317,\t\t233,\t\t0,\t\t0.005647276084951523,\t\t0.114031901035644,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t24.464000000000002\t\t],\n\t\t[318,\t\t317,\t\t0,\t\t0.008311634349030471,\t\t0.16783161497270002,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t36.006\t\t],\n\t\t[231,\t\t52,\t\t0,\t\t0.035263677285318554,\t\t1.2658796434850879,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t203.683\t\t],\n\t\t[319,\t\t567,\t\t0,\t\t0.006089586776859504,\t\t0.0644223069721,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t18.421\t\t],\n\t\t[557,\t\t321,\t\t0,\t\t0.010004628099173555,\t\t0.10583989458750401,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t30.264\t\t],\n\t\t[277,\t\t65,\t\t0,\t\t0.009430170821779778,\t\t0.7616700793261759,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t81.703\t\t],\n\t\t[322,\t\t288,\t\t0,\t\t0.006545013850415513,\t\t0.528637424797136,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t56.706\t\t],\n\t\t[322,\t\t323,\t\t0,\t\t0.0018503000923372577,\t\t0.14944779312484,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t16.031\t\t],\n\t\t[277,\t\t324,\t\t0,\t\t0.019719529085872576,\t\t0.39818407235049996,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t85.425\t\t],\n\t\t[324,\t\t325,\t\t0,\t\t0.01103508771932133,\t\t0.22282459929396403,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t47.803999999999995\t\t],\n\t\t[277,\t\t325,\t\t0,\t\t0.008665743305609418,\t\t0.174981914850048,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t37.54\t\t],\n\t\t[326,\t\t327,\t\t0,\t\t0.007654214876033058,\t\t0.0202436634226288,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t11.577\t\t],\n\t\t[328,\t\t326,\t\t0,\t\t0.10300958677685952,\t\t0.068109252150368,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t77.90100000000001\t\t],\n\t\t[328,\t\t327,\t\t0,\t\t0.09827173553719008,\t\t0.064976616491468,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t74.318\t\t],\n\t\t[326,\t\t329,\t\t0,\t\t0.028062148760330575,\t\t0.07421802283046801,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t42.443999999999996\t\t],\n\t\t[568,\t\t329,\t\t0,\t\t0.05699900826446282,\t\t0.15074945731414802,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t86.211\t\t],\n\t\t[568,\t\t326,\t\t0,\t\t0.03218644628099173,\t\t0.08512585494846397,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t48.681999999999995\t\t],\n\t\t[332,\t\t78,\t\t0,\t\t0.006471029547541551,\t\t0.522661750455416,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t56.065\t\t],\n\t\t[333,\t\t306,\t\t0,\t\t0.008580159279778392,\t\t0.308006702824228,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t49.559\t\t],\n\t\t[332,\t\t333,\t\t0,\t\t0.007504674515235457,\t\t0.26939943395502003,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t43.347\t\t],\n\t\t[332,\t\t334,\t\t0,\t\t0.017124653739612188,\t\t0.15368328149175597,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t49.456\t\t],\n\t\t[66,\t\t334,\t\t0,\t\t0.030625,\t\t0.27484062260471603,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t88.445\t\t],\n\t\t[330,\t\t335,\t\t0,\t\t0.00550536703601108,\t\t0.790516769355108,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t63.598\t\t],\n\t\t[336,\t\t66,\t\t0,\t\t0.015054362880886425,\t\t0.1351036887216764,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t43.477\t\t],\n\t\t[330,\t\t336,\t\t0,\t\t0.039036357340720224,\t\t0.350327404269788,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t112.73700000000001\t\t],\n\t\t[68,\t\t70,\t\t0,\t\t0.016314058171745152,\t\t0.14640868261713597,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t47.115\t\t],\n\t\t[509,\t\t337,\t\t0,\t\t0.03494082644628099,\t\t0.09241056617056001,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t52.848\t\t],\n\t\t[324,\t\t288,\t\t0,\t\t0.012627423822714683,\t\t0.11332339674541761,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t36.468\t\t],\n\t\t[338,\t\t559,\t\t0,\t\t0.009228099173553718,\t\t0.097624922595552,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t27.915\t\t],\n\t\t[339,\t\t559,\t\t0,\t\t0.03560595041322315,\t\t0.023542417076125203,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t26.927\t\t],\n\t\t[339,\t\t340,\t\t0,\t\t0.08711537190082644,\t\t0.23040041287850396,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t131.762\t\t],\n\t\t[559,\t\t340,\t\t0,\t\t0.20983272727272728,\t\t0.138740000599684,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t158.686\t\t],\n\t\t[341,\t\t292,\t\t0,\t\t0.0009329409048961218,\t\t0.07535316024134399,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t8.083\t\t],\n\t\t[557,\t\t342,\t\t0,\t\t0.006019834710743802,\t\t0.0636843933534336,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t18.21\t\t],\n\t\t[558,\t\t343,\t\t0,\t\t0.010650247933884296,\t\t0.11266996708783199,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t32.217\t\t],\n\t\t[502,\t\t340,\t\t0,\t\t0.021737520661157025,\t\t0.22996326026071198,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t65.756\t\t],\n\t\t[72,\t\t32,\t\t0,\t\t0.00675502077562327,\t\t0.969954803293024,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t78.03399999999999\t\t],\n\t\t[344,\t\t345,\t\t0,\t\t0.0005762927054480609,\t\t0.04654686738645321,\t\t2567.0,\t\t2567.0,\t\t2567.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t4.993\t\t],\n\t\t[346,\t\t47,\t\t0,\t\t0.0011340027700831024,\t\t0.04070792194158799,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t6.55\t\t],\n\t\t[46,\t\t47,\t\t0,\t\t0.0008975069252077563,\t\t0.0322183003580208,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.184\t\t],\n\t\t[346,\t\t345,\t\t0,\t\t0.0007217797783933517,\t\t0.025910126194627202,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t4.169\t\t],\n\t\t[347,\t\t328,\t\t0,\t\t0.029905454545454544,\t\t0.07909314882361201,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t45.232\t\t],\n\t\t[347,\t\t348,\t\t0,\t\t0.04883438016528925,\t\t0.129155866607944,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t73.862\t\t],\n\t\t[571,\t\t348,\t\t0,\t\t0.041548429752066116,\t\t0.10988617921762801,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t62.842\t\t],\n\t\t[347,\t\t572,\t\t0,\t\t0.016052231404958678,\t\t0.04245451362512801,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t24.279\t\t],\n\t\t[571,\t\t570,\t\t0,\t\t0.17379041322314048,\t\t0.11490906279551602,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t131.429\t\t],\n\t\t[14,\t\t350,\t\t0,\t\t0.02166743801652892,\t\t0.05730546235524,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t32.772\t\t],\n\t\t[350,\t\t573,\t\t0,\t\t0.026277685950413226,\t\t0.06949852316919598,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t39.745\t\t],\n\t\t[15,\t\t351,\t\t0,\t\t0.02639265927977839,\t\t0.236857956201204,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t76.222\t\t],\n\t\t[352,\t\t15,\t\t0,\t\t0.0015260560941828254,\t\t0.219126704094076,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t17.629\t\t],\n\t\t[15,\t\t335,\t\t0,\t\t0.0035338758079432133,\t\t1.1417173740880242,\t\t5134.0,\t\t5134.0,\t\t5134.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t61.235\t\t],\n\t\t[232,\t\t227,\t\t0,\t\t5.5747922437673134e-05,\t\t0.000500303468136644,\t\t1200.0,\t\t1200.0,\t\t1200.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.161\t\t],\n\t\t[565,\t\t544,\t\t0,\t\t0.0394803305785124,\t\t0.10441652566461601,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t59.714\t\t],\n\t\t[235,\t\t567,\t\t0,\t\t0.02391404958677686,\t\t0.25298896294275997,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t72.34\t\t],\n\t\t[567,\t\t286,\t\t0,\t\t0.008068760330578512,\t\t0.34144067500694797,\t\t1981.0,\t\t1981.0,\t\t1981.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t48.816\t\t],\n\t\t[353,\t\t519,\t\t0,\t\t0.007621818181818182,\t\t0.080631926038356,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t23.055999999999997\t\t],\n\t\t[354,\t\t353,\t\t0,\t\t0.0008436363636363636,\t\t0.00892490784392768,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t2.552\t\t],\n\t\t[355,\t\t354,\t\t0,\t\t0.0068502479338842966,\t\t0.0181173530898976,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t10.360999999999999\t\t],\n\t\t[354,\t\t356,\t\t0,\t\t0.01855404958677686,\t\t0.049071255647172,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t28.063000000000002\t\t],\n\t\t[357,\t\t358,\t\t0,\t\t0.0034823407202216067,\t\t0.5000300103406239,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t40.228\t\t],\n\t\t[574,\t\t359,\t\t0,\t\t0.013352066115702478,\t\t0.0353131884615884,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t20.195\t\t],\n\t\t[235,\t\t575,\t\t0,\t\t0.007459504132231404,\t\t0.0789147905557,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t22.565\t\t],\n\t\t[167,\t\t361,\t\t0,\t\t0.000616198347107438,\t\t0.0065188198358579995,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.864\t\t],\n\t\t[528,\t\t362,\t\t0,\t\t0.0011960330578512398,\t\t0.012652945368078402,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t3.6180000000000003\t\t],\n\t\t[363,\t\t344,\t\t0,\t\t0.0002662742382271468,\t\t0.009558592968871479,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.538\t\t],\n\t\t[259,\t\t364,\t\t0,\t\t0.013069713758102496,\t\t0.26390852570525997,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t56.618\t\t],\n\t\t[54,\t\t56,\t\t0,\t\t0.007723337950138504,\t\t0.0693122289241068,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t22.305\t\t],\n\t\t[365,\t\t364,\t\t0,\t\t0.0049974607571537395,\t\t0.10091058802821559,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t21.649\t\t],\n\t\t[231,\t\t366,\t\t0,\t\t0.0013273891966759002,\t\t0.0476500209962672,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t7.667000000000001\t\t],\n\t\t[30,\t\t367,\t\t0,\t\t0.01126108033240997,\t\t0.1010613005635992,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t32.522\t\t],\n\t\t[61,\t\t367,\t\t0,\t\t0.020337603878116343,\t\t0.18251754162067196,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t58.735\t\t],\n\t\t[254,\t\t368,\t\t0,\t\t0.0004297520661157025,\t\t0.00454638722456732,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.3\t\t],\n\t\t[254,\t\t369,\t\t0,\t\t0.00015999999999999999,\t\t0.00169265493591832,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t0.484\t\t],\n\t\t[254,\t\t370,\t\t0,\t\t0.0003669421487603306,\t\t0.0038819152455960805,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t1.11\t\t],\n\t\t[99,\t\t358,\t\t0,\t\t0.0020184383656509696,\t\t0.28982797432374396,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t23.316999999999997\t\t],\n\t\t[354,\t\t519,\t\t0,\t\t0.006762644628099174,\t\t0.07154264880985199,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t20.457\t\t],\n\t\t[571,\t\t371,\t\t0,\t\t0.023726942148760328,\t\t0.06275238397221199,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t35.887\t\t],\n\t\t[207,\t\t372,\t\t0,\t\t0.002329256198347108,\t\t0.006160354689297601,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t3.523\t\t],\n\t\t[57,\t\t373,\t\t0,\t\t0.0017725619834710745,\t\t0.0046880246727212796,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t2.681\t\t],\n\t\t[209,\t\t374,\t\t0,\t\t0.0010122922437673131,\t\t0.0363388121515216,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.847\t\t],\n\t\t[375,\t\t376,\t\t0,\t\t0.0045364727608518006,\t\t0.0916021467933684,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t19.652\t\t],\n\t\t[376,\t\t377,\t\t0,\t\t0.0030886426592797783,\t\t0.062367022394423606,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.38\t\t],\n\t\t[16,\t\t49,\t\t0,\t\t0.002266101108033241,\t\t0.32538991773524,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t26.178\t\t],\n\t\t[318,\t\t377,\t\t0,\t\t0.004755078485685596,\t\t0.0960163149704152,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t20.599\t\t],\n\t\t[378,\t\t297,\t\t0,\t\t0.01753917355371901,\t\t0.046387138574374404,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t26.528000000000002\t\t],\n\t\t[562,\t\t379,\t\t0,\t\t0.01802314049586777,\t\t0.047667121439141605,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t27.26\t\t],\n\t\t[576,\t\t563,\t\t0,\t\t0.001808264462809917,\t\t0.004782449638150801,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t2.735\t\t],\n\t\t[576,\t\t381,\t\t0,\t\t0.0034320661157024794,\t\t0.009077036954898,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.191\t\t],\n\t\t[577,\t\t576,\t\t0,\t\t0.06004495867768594,\t\t0.15880530575430396,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t90.818\t\t],\n\t\t[244,\t\t383,\t\t0,\t\t0.006845567867036011,\t\t0.1382282547912684,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t29.655\t\t],\n\t\t[244,\t\t306,\t\t0,\t\t0.02679108956599723,\t\t0.5409756541164079,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t116.059\t\t],\n\t\t[383,\t\t306,\t\t0,\t\t0.0300685595567867,\t\t0.269846910348376,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t86.838\t\t],\n\t\t[380,\t\t306,\t\t0,\t\t0.00025605955678670365,\t\t0.03676764369572,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t2.958\t\t],\n\t\t[252,\t\t225,\t\t0,\t\t0.062094545454545444,\t\t0.041056499553586,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t46.958999999999996\t\t],\n\t\t[220,\t\t76,\t\t0,\t\t0.002772074099722992,\t\t0.398042682239984,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t32.023\t\t],\n\t\t[542,\t\t384,\t\t0,\t\t0.007939834710743802,\t\t0.020999063146094,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t12.009\t\t],\n\t\t[385,\t\t384,\t\t0,\t\t0.053734876033057856,\t\t0.035529141854791196,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t40.637\t\t],\n\t\t[542,\t\t385,\t\t0,\t\t0.011306115702479337,\t\t0.119608453436296,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t34.201\t\t],\n\t\t[386,\t\t385,\t\t0,\t\t0.003668760330578512,\t\t0.0388121580140316,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t11.097999999999999\t\t],\n\t\t[387,\t\t578,\t\t0,\t\t0.015444628099173553,\t\t0.16339016240905604,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t46.72\t\t],\n\t\t[332,\t\t388,\t\t0,\t\t0.014036184210526315,\t\t0.5038646344377999,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t81.07300000000001\t\t],\n\t\t[382,\t\t332,\t\t0,\t\t0.017764369806094183,\t\t0.637697365901468,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t102.60700000000001\t\t],\n\t\t[382,\t\t388,\t\t0,\t\t0.00476159972299169,\t\t0.17092976750548,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t27.503\t\t],\n\t\t[579,\t\t578,\t\t0,\t\t0.01911074380165289,\t\t0.050543585664,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t28.905\t\t],\n\t\t[577,\t\t387,\t\t0,\t\t0.07597818181818182,\t\t0.20094506949431204,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t114.917\t\t],\n\t\t[144,\t\t390,\t\t0,\t\t0.0004277685950413223,\t\t0.0011313509747276,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.647\t\t],\n\t\t[37,\t\t49,\t\t0,\t\t0.008441481994459835,\t\t0.303028527944352,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t48.758\t\t],\n\t\t[391,\t\t233,\t\t0,\t\t0.014211218836565096,\t\t0.1275369872004348,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t41.042\t\t],\n\t\t[392,\t\t310,\t\t0,\t\t0.007035318559556785,\t\t0.06313767618386361,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t20.317999999999998\t\t],\n\t\t[260,\t\t393,\t\t0,\t\t0.006341412742382271,\t\t0.0569102963692744,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t18.314\t\t],\n\t\t[394,\t\t230,\t\t0,\t\t0.0007590027700831025,\t\t0.00681158510656168,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t2.1919999999999997\t\t],\n\t\t[395,\t\t282,\t\t0,\t\t0.008762984764542936,\t\t0.314569689934484,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t50.615\t\t],\n\t\t[395,\t\t244,\t\t0,\t\t0.0034046052631578946,\t\t0.12221699007344,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t19.665\t\t],\n\t\t[25,\t\t396,\t\t0,\t\t0.008809037396121884,\t\t0.316222866612064,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t50.881\t\t],\n\t\t[81,\t\t74,\t\t0,\t\t0.0075207756232686974,\t\t0.26997742429652244,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t43.44\t\t],\n\t\t[278,\t\t80,\t\t0,\t\t0.016286011080332407,\t\t0.5846279085788,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t94.068\t\t],\n\t\t[81,\t\t278,\t\t0,\t\t0.021054016620498613,\t\t0.755787629231688,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t121.60799999999999\t\t],\n\t\t[569,\t\t570,\t\t0,\t\t0.03253950413223141,\t\t0.08605961294018,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t49.216\t\t],\n\t\t[397,\t\t552,\t\t0,\t\t0.006289586776859504,\t\t0.0166345314104904,\t\t1200.0,\t\t1200.0,\t\t1200.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t9.513\t\t],\n\t\t[542,\t\t398,\t\t0,\t\t0.0005580165289256199,\t\t0.0059033089500572,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.6880000000000002\t\t],\n\t\t[398,\t\t385,\t\t0,\t\t0.021893553719008262,\t\t0.05790348713648401,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t33.114000000000004\t\t],\n\t\t[399,\t\t499,\t\t0,\t\t0.03266380165289256,\t\t0.021597087927192803,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t24.701999999999998\t\t],\n\t\t[83,\t\t399,\t\t0,\t\t0.025700495867768593,\t\t0.016992996557050798,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t19.436\t\t],\n\t\t[498,\t\t400,\t\t0,\t\t0.012134214876033058,\t\t0.032092247974028,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t18.352999999999998\t\t],\n\t\t[518,\t\t239,\t\t0,\t\t0.04685289256198347,\t\t0.123915281026504,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t70.865\t\t],\n\t\t[575,\t\t543,\t\t0,\t\t0.0030307438016528923,\t\t0.032062521596058796,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t9.168\t\t],\n\t\t[401,\t\t360,\t\t0,\t\t0.007957063711911357,\t\t0.071409774520472,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t22.98\t\t],\n\t\t[580,\t\t581,\t\t0,\t\t0.007134545454545454,\t\t0.018869255592422397,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t10.790999999999999\t\t],\n\t\t[401,\t\t402,\t\t0,\t\t0.0033434903047091418,\t\t0.030005778188384805,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t9.656\t\t],\n\t\t[403,\t\t231,\t\t0,\t\t0.009592105263157893,\t\t0.08608327126915,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t27.701999999999998\t\t],\n\t\t[189,\t\t360,\t\t0,\t\t0.028456024930747923,\t\t0.255375399471348,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t82.181\t\t],\n\t\t[234,\t\t404,\t\t0,\t\t0.008092561983471074,\t\t0.0214029921648796,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t12.24\t\t],\n\t\t[235,\t\t404,\t\t0,\t\t0.05107504132231405,\t\t0.13508190749437998,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t77.251\t\t],\n\t\t[235,\t\t580,\t\t0,\t\t0.000580495867768595,\t\t0.00153527999352772,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.878\t\t],\n\t\t[216,\t\t259,\t\t0,\t\t0.0022115650969529088,\t\t0.079389770210892,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t12.774000000000001\t\t],\n\t\t[405,\t\t259,\t\t0,\t\t0.0052832409972299165,\t\t0.1896554115982928,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t30.516\t\t],\n\t\t[405,\t\t318,\t\t0,\t\t0.0066348684210526315,\t\t0.23817552558268398,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t38.323\t\t],\n\t\t[406,\t\t230,\t\t0,\t\t8.098164819944598e-05,\t\t0.046512685161986804,\t\t6845.0,\t\t6845.0,\t\t6845.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.871\t\t],\n\t\t[542,\t\t407,\t\t0,\t\t0.025569586776859506,\t\t0.067625761355152,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t38.674\t\t],\n\t\t[23,\t\t408,\t\t0,\t\t0.03224528925619835,\t\t0.08528148128033601,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t48.771\t\t],\n\t\t[577,\t\t348,\t\t0,\t\t0.012999008264462809,\t\t0.13751772188026398,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t39.321999999999996\t\t],\n\t\t[562,\t\t564,\t\t0,\t\t0.06921520661157024,\t\t0.18305853298686803,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t104.68799999999999\t\t],\n\t\t[582,\t\t507,\t\t0,\t\t0.006357685950413223,\t\t0.016814638289042002,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t9.616\t\t],\n\t\t[27,\t\t410,\t\t0,\t\t0.0030042975206611565,\t\t0.007945685980170399,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t4.544\t\t],\n\t\t[501,\t\t27,\t\t0,\t\t0.003811570247933884,\t\t0.040322957460962,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t11.53\t\t],\n\t\t[27,\t\t411,\t\t0,\t\t0.004648595041322314,\t\t0.012294480221518,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t7.031000000000001\t\t],\n\t\t[411,\t\t410,\t\t0,\t\t0.002054214876033058,\t\t0.0054329327333556,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t3.1069999999999998\t\t],\n\t\t[403,\t\t360,\t\t0,\t\t0.008191481994459833,\t\t0.07351353506655639,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t23.656999999999996\t\t],\n\t\t[412,\t\t360,\t\t0,\t\t0.016761772853185596,\t\t0.15042664773666,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t48.408\t\t],\n\t\t[326,\t\t413,\t\t0,\t\t0.012077024793388432,\t\t0.12776397267356798,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t36.533\t\t],\n\t\t[414,\t\t413,\t\t0,\t\t0.008093223140495867,\t\t0.08561896310149601,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t24.482\t\t],\n\t\t[6,\t\t297,\t\t0,\t\t0.019472396694214876,\t\t0.0128750188978664,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t14.725999999999999\t\t],\n\t\t[554,\t\t580,\t\t0,\t\t0.07435371900826447,\t\t0.196648733567264,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t112.46\t\t],\n\t\t[262,\t\t401,\t\t0,\t\t0.03931232686980609,\t\t0.35280406181043206,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t113.53399999999999\t\t],\n\t\t[499,\t\t556,\t\t0,\t\t0.04185586776859504,\t\t0.11069928308639199,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t63.306999999999995\t\t],\n\t\t[224,\t\t229,\t\t0,\t\t0.004135206611570248,\t\t0.0437467367631624,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t12.509\t\t],\n\t\t[583,\t\t507,\t\t0,\t\t0.024632727272727268,\t\t0.065147980317596,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t37.257\t\t],\n\t\t[415,\t\t307,\t\t0,\t\t0.015675554016620498,\t\t0.1406784987952448,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t45.271\t\t],\n\t\t[416,\t\t507,\t\t0,\t\t0.0010555371900826446,\t\t0.011166626467730801,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t3.193\t\t],\n\t\t[284,\t\t561,\t\t0,\t\t0.015221487603305786,\t\t0.16102953827307598,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t46.045\t\t],\n\t\t[543,\t\t417,\t\t0,\t\t0.0006614876033057851,\t\t0.027991756419545603,\t\t1981.0,\t\t1981.0,\t\t1981.0,\t\t0,\t\t4,\t\t1,\t\t-360,\t\t4.002\t\t],\n\t\t[418,\t\t506,\t\t0,\t\t0.0009395041322314049,\t\t0.009939101917118,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t2.842\t\t],\n\t\t[220,\t\t157,\t\t0,\t\t0.004599549861495845,\t\t0.165112574384632,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t26.566999999999997\t\t],\n\t\t[295,\t\t419,\t\t0,\t\t0.0012023140495867769,\t\t0.012719392565946,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t3.637\t\t],\n\t\t[295,\t\t420,\t\t0,\t\t0.0008003305785123967,\t\t0.008466771900532,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t2.421\t\t],\n\t\t[541,\t\t62,\t\t0,\t\t0.05133355371900827,\t\t0.0339414035471236,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t38.821\t\t],\n\t\t[52,\t\t421,\t\t0,\t\t0.00013885041551246538,\t\t0.004984389831631239,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.802\t\t],\n\t\t[60,\t\t160,\t\t0,\t\t6.128808864265928e-05,\t\t0.000550023067454096,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t0.177\t\t],\n\t\t[535,\t\t161,\t\t0,\t\t3.735537190082645e-05,\t\t0.00039518596644331203,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t0.113\t\t],\n\t\t[267,\t\t282,\t\t0,\t\t0.0065652700831024926,\t\t0.235677115717012,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t37.921\t\t],\n\t\t[52,\t\t365,\t\t0,\t\t0.007655586334279779,\t\t0.15458444922992,\t\t1283.0,\t\t1283.0,\t\t1283.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t33.164\t\t],\n\t\t[28,\t\t27,\t\t0,\t\t0.015726942148760328,\t\t0.041594197273402404,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t23.787\t\t],\n\t\t[30,\t\t201,\t\t0,\t\t0.009128289473684211,\t\t0.327683234253536,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t52.725\t\t],\n\t\t[422,\t\t81,\t\t0,\t\t0.0004226685133887349,\t\t0.13655487952674,\t\t5134.0,\t\t5134.0,\t\t5134.0,\t\t0,\t\t6,\t\t1,\t\t-360,\t\t7.324\t\t],\n\t\t[119,\t\t425,\t\t0,\t\t0.003579120498614958,\t\t0.1284816595874996,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t20.673000000000002\t\t],\n\t\t[423,\t\t425,\t\t0,\t\t0.0006518351800554017,\t\t0.0233992864289392,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t3.765\t\t],\n\t\t[424,\t\t425,\t\t0,\t\t0.005922957063711911,\t\t0.21261965153389198,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t34.211\t\t],\n\t\t[426,\t\t428,\t\t0,\t\t0.013948429752066116,\t\t0.14756174042535197,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t42.193999999999996\t\t],\n\t\t[427,\t\t428,\t\t0,\t\t0.0002664462809917355,\t\t0.0028187600792304794,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t0.8059999999999999\t\t],\n\t\t[19,\t\t428,\t\t0,\t\t0.023607603305785128,\t\t0.24974703912892798,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t71.413\t\t],\n\t\t[45,\t\t429,\t\t0,\t\t0.02562314049586777,\t\t0.067767398802972,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t38.755\t\t],\n\t\t[44,\t\t429,\t\t0,\t\t5.289256198347107e-05,\t\t0.00013988883767892,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.08\t\t],\n\t\t[505,\t\t429,\t\t0,\t\t0.006012561983471073,\t\t0.015901863623161996,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t9.094\t\t],\n\t\t[231,\t\t431,\t\t0,\t\t0.011677285318559558,\t\t0.4191859418495199,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t67.44800000000001\t\t],\n\t\t[190,\t\t431,\t\t0,\t\t0.009600761772853185,\t\t0.34464383257266795,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t55.45399999999999\t\t],\n\t\t[430,\t\t431,\t\t0,\t\t0.0028100761772853187,\t\t0.1008748520662472,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.230999999999998\t\t],\n\t\t[286,\t\t433,\t\t0,\t\t0.01568694214876033,\t\t0.16595362535967603,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t47.453\t\t],\n\t\t[432,\t\t433,\t\t0,\t\t0.00010049586776859504,\t\t0.00106315516636076,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.304\t\t],\n\t\t[506,\t\t433,\t\t0,\t\t0.0065904132231404955,\t\t0.06972059669946801,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t19.936\t\t],\n\t\t[23,\t\t434,\t\t0,\t\t0.02613685950413223,\t\t0.069126069139116,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t39.532\t\t],\n\t\t[400,\t\t434,\t\t0,\t\t0.008155371900826446,\t\t0.021569110159669603,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t12.335\t\t],\n\t\t[500,\t\t434,\t\t0,\t\t0.006338512396694216,\t\t0.0167639285853336,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t9.587\t\t],\n\t\t[32,\t\t436,\t\t0,\t\t0.0044813019390581715,\t\t0.16086776359270402,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t25.884\t\t],\n\t\t[435,\t\t436,\t\t0,\t\t0.0006634349030470914,\t\t0.023815688073266,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t3.832\t\t],\n\t\t[78,\t\t436,\t\t0,\t\t0.00897680055401662,\t\t0.32224515307884394,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t51.85\t\t],\n\t\t[86,\t\t438,\t\t0,\t\t0.014693213296398892,\t\t0.52745036936438,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t84.868\t\t],\n\t\t[437,\t\t438,\t\t0,\t\t1.0387811634349031e-05,\t\t0.0003728969948845,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.06\t\t],\n\t\t[221,\t\t438,\t\t0,\t\t0.002280124653739612,\t\t0.081850890377238,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.17\t\t],\n\t\t[207,\t\t439,\t\t0,\t\t0.055703801652892564,\t\t0.0368309823503996,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t42.126000000000005\t\t],\n\t\t[516,\t\t439,\t\t0,\t\t0.05448462809917355,\t\t0.03602487292327441,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t41.20399999999999\t\t],\n\t\t[513,\t\t439,\t\t0,\t\t0.046726611570247926,\t\t0.0308953241066316,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t35.336999999999996\t\t],\n\t\t[181,\t\t441,\t\t0,\t\t0.040805289256198356,\t\t0.10792074104825197,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t61.718\t\t],\n\t\t[440,\t\t441,\t\t0,\t\t0.0001322314049586777,\t\t0.000349722094197784,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.2\t\t],\n\t\t[504,\t\t441,\t\t0,\t\t0.05916099173553719,\t\t0.156467413554364,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t89.48100000000001\t\t],\n\t\t[135,\t\t442,\t\t0,\t\t0.004956890581717451,\t\t0.177940231009092,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t28.631\t\t],\n\t\t[109,\t\t442,\t\t0,\t\t0.0015380886426592797,\t\t0.055213615042649204,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t8.884\t\t],\n\t\t[112,\t\t442,\t\t0,\t\t0.0027304362880886425,\t\t0.09801597510545401,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t15.770999999999999\t\t],\n\t\t[113,\t\t443,\t\t0,\t\t0.0019885734072022164,\t\t0.07138491472072879,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t11.485999999999999\t\t],\n\t\t[132,\t\t443,\t\t0,\t\t0.006788434903047091,\t\t0.24368818615747198,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t39.21\t\t],\n\t\t[107,\t\t443,\t\t0,\t\t2.2333795013850418e-05,\t\t0.000801728539002036,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.129\t\t],\n\t\t[444,\t\t445,\t\t0,\t\t7.877423822714682e-05,\t\t0.00282780221121528,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.455\t\t],\n\t\t[112,\t\t445,\t\t0,\t\t0.002816135734072022,\t\t0.101092375313206,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.266\t\t],\n\t\t[109,\t\t445,\t\t0,\t\t0.0014354224376731304,\t\t0.0515281497432104,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t8.291\t\t],\n\t\t[119,\t\t447,\t\t0,\t\t0.005212690443213296,\t\t0.74849127803204,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t60.217\t\t],\n\t\t[100,\t\t447,\t\t0,\t\t0.0050695117728531865,\t\t0.7279322237145921,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t58.563\t\t],\n\t\t[446,\t\t447,\t\t0,\t\t2.9518698060941832e-05,\t\t0.00423859584186224,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t0.341\t\t],\n\t\t[124,\t\t448,\t\t0,\t\t6.509695290858726e-05,\t\t0.00233682116794768,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.376\t\t],\n\t\t[125,\t\t448,\t\t0,\t\t0.00615148891966759,\t\t0.22082338542026803,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t35.531\t\t],\n\t\t[131,\t\t448,\t\t0,\t\t3.912742382271468e-05,\t\t0.0014045786807313759,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.226\t\t],\n\t\t[449,\t\t450,\t\t0,\t\t0.0023614958448753462,\t\t0.08477191683710039,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.64\t\t],\n\t\t[173,\t\t450,\t\t0,\t\t0.002862361495844876,\t\t0.10275176694050518,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t16.533\t\t],\n\t\t[184,\t\t450,\t\t0,\t\t0.004022853185595568,\t\t0.14441057621844403,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t23.236\t\t],\n\t\t[144,\t\t451,\t\t0,\t\t0.007672727272727273,\t\t0.020292624515794402,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t11.605\t\t],\n\t\t[140,\t\t451,\t\t0,\t\t0.006991074380165291,\t\t0.018489807120219602,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t10.574000000000002\t\t],\n\t\t[514,\t\t451,\t\t0,\t\t0.01149289256198347,\t\t0.030396095817207994,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t17.383\t\t],\n\t\t[537,\t\t585,\t\t0,\t\t0.05072595041322314,\t\t0.134158641165824,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t76.723\t\t],\n\t\t[141,\t\t585,\t\t0,\t\t0.007994710743801653,\t\t0.0211441978151932,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t12.092\t\t],\n\t\t[584,\t\t585,\t\t0,\t\t9.256198347107438e-05,\t\t0.000244805465938352,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.14\t\t],\n\t\t[522,\t\t454,\t\t0,\t\t0.0035008264462809916,\t\t0.0092588924438956,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.295\t\t],\n\t\t[144,\t\t454,\t\t0,\t\t0.00452892561983471,\t\t0.011977981726290799,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t6.85\t\t],\n\t\t[453,\t\t454,\t\t0,\t\t0.001114710743801653,\t\t0.0029481572540882,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.686\t\t],\n\t\t[199,\t\t456,\t\t0,\t\t0.013063140495867768,\t\t0.0086372614214612,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t9.879\t\t],\n\t\t[140,\t\t456,\t\t0,\t\t0.005061818181818182,\t\t0.013387361765852802,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t7.656000000000001\t\t],\n\t\t[455,\t\t456,\t\t0,\t\t0.0011365289256198346,\t\t0.00300586139962416,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t1.719\t\t],\n\t\t[537,\t\t456,\t\t0,\t\t0.039058512396694216,\t\t0.025825228046024003,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t29.538\t\t],\n\t\t[538,\t\t457,\t\t0,\t\t0.027927272727272728,\t\t0.0184653265736368,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t21.12\t\t],\n\t\t[153,\t\t457,\t\t0,\t\t0.030093223140495867,\t\t0.019897438549384,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t22.758000000000003\t\t],\n\t\t[176,\t\t457,\t\t0,\t\t0.004579173553719009,\t\t0.0030277190305137603,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t3.463\t\t],\n\t\t[524,\t\t459,\t\t0,\t\t0.004318677685950414,\t\t0.011421923596476799,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t6.532\t\t],\n\t\t[458,\t\t459,\t\t0,\t\t0.001993388429752066,\t\t0.0052720605700488,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t3.015\t\t],\n\t\t[134,\t\t459,\t\t0,\t\t0.011813553719008265,\t\t0.031244171895617998,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t17.868\t\t],\n\t\t[460,\t\t461,\t\t0,\t\t6.611570247933885e-05,\t\t0.000174861047098892,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.1\t\t],\n\t\t[150,\t\t461,\t\t0,\t\t0.008018512396694214,\t\t0.021207147792120403,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t12.128\t\t],\n\t\t[149,\t\t461,\t\t0,\t\t0.005586115702479339,\t\t0.0147740098693748,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t8.449\t\t],\n\t\t[521,\t\t463,\t\t0,\t\t0.014348429752066114,\t\t0.009487086110365599,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t10.850999999999999\t\t],\n\t\t[462,\t\t463,\t\t0,\t\t0.007197355371900825,\t\t0.0047588433967958406,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t5.443\t\t],\n\t\t[538,\t\t463,\t\t0,\t\t0.012211570247933883,\t\t0.0080742088497664,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t9.235\t\t],\n\t\t[110,\t\t464,\t\t0,\t\t0.0025753116343490306,\t\t0.0924473799817492,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t14.875\t\t],\n\t\t[90,\t\t464,\t\t0,\t\t0.007328947368421053,\t\t0.26309125979076,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t42.332\t\t],\n\t\t[165,\t\t464,\t\t0,\t\t0.002152527700831025,\t\t0.0772704722900764,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t12.433\t\t],\n\t\t[458,\t\t465,\t\t0,\t\t0.002003305785123967,\t\t0.0052982897270776,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t3.03\t\t],\n\t\t[134,\t\t465,\t\t0,\t\t0.011838677685950413,\t\t0.031310619093534,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t17.906\t\t],\n\t\t[524,\t\t465,\t\t0,\t\t0.004293553719008264,\t\t0.0113554763986092,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t6.494\t\t],\n\t\t[466,\t\t467,\t\t0,\t\t0.0023509349030470914,\t\t0.084392804892244,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.579\t\t],\n\t\t[110,\t\t467,\t\t0,\t\t0.0025337603878116343,\t\t0.09095579200221118,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t14.635\t\t],\n\t\t[165,\t\t467,\t\t0,\t\t0.0022891274238227145,\t\t0.08217406777274441,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.222000000000001\t\t],\n\t\t[468,\t\t469,\t\t0,\t\t0.0005269421487603305,\t\t0.0013936425453786,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.797\t\t],\n\t\t[541,\t\t469,\t\t0,\t\t0.022390743801652895,\t\t0.05921844221026801,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t33.866\t\t],\n\t\t[490,\t\t469,\t\t0,\t\t0.028243305785123966,\t\t0.07469714209944801,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t42.718\t\t],\n\t\t[263,\t\t471,\t\t0,\t\t0.0371900826446281,\t\t0.0245898347482832,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t28.125\t\t],\n\t\t[470,\t\t471,\t\t0,\t\t0.001570909090909091,\t\t0.0010386746197682802,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.188\t\t],\n\t\t[534,\t\t471,\t\t0,\t\t0.024497190082644622,\t\t0.0161973787927468,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t18.526\t\t],\n\t\t[136,\t\t472,\t\t0,\t\t0.0007079293628808865,\t\t0.025412930201351602,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t4.0889999999999995\t\t],\n\t\t[110,\t\t472,\t\t0,\t\t0.00019511772853185596,\t\t0.0070042485539216805,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.127\t\t],\n\t\t[251,\t\t472,\t\t0,\t\t4.207063711911357e-05,\t\t0.00151023282928764,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.243\t\t],\n\t\t[226,\t\t474,\t\t0,\t\t0.017639669421487602,\t\t0.011663231841509601,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t13.34\t\t],\n\t\t[473,\t\t474,\t\t0,\t\t0.003467107438016529,\t\t0.00916971330986216,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t5.244\t\t],\n\t\t[257,\t\t474,\t\t0,\t\t0.020264462809917356,\t\t0.053594910935781594,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t2,\t\t1,\t\t-360,\t\t30.65\t\t],\n\t\t[6,\t\t474,\t\t0,\t\t0.08066247933884299,\t\t0.05333349367016,\t\t248.0,\t\t248.0,\t\t248.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t61.001000000000005\t\t],\n\t\t[299,\t\t475,\t\t0,\t\t0.013238227146814403,\t\t0.47521993028123993,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t76.464\t\t],\n\t\t[3,\t\t475,\t\t0,\t\t0.0002794321329639889,\t\t0.010030929162389441,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.614\t\t],\n\t\t[210,\t\t475,\t\t0,\t\t0.0001481994459833795,\t\t0.00531999712702368,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.856\t\t],\n\t\t[297,\t\t476,\t\t0,\t\t0.0193500826446281,\t\t0.05117658265464801,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t29.267\t\t],\n\t\t[296,\t\t476,\t\t0,\t\t0.005596694214876033,\t\t0.014801987636898,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t8.465\t\t],\n\t\t[295,\t\t476,\t\t0,\t\t0.0009474380165289256,\t\t0.00250575880492432,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.433\t\t],\n\t\t[313,\t\t478,\t\t0,\t\t0.008696849030470914,\t\t0.31219557906752804,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t50.233000000000004\t\t],\n\t\t[477,\t\t478,\t\t0,\t\t1.5235457063711912e-05,\t\t0.0005469155924977479,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.08800000000000001\t\t],\n\t\t[245,\t\t478,\t\t0,\t\t0.005264542936288089,\t\t0.188984197007248,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t30.408\t\t],\n\t\t[479,\t\t481,\t\t0,\t\t0.028420495867768597,\t\t0.07516576970575199,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t42.986000000000004\t\t],\n\t\t[565,\t\t481,\t\t0,\t\t0.024842314049586776,\t\t0.065702289836964,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t37.574\t\t],\n\t\t[480,\t\t481,\t\t0,\t\t7.735537190082645e-05,\t\t0.000204587425105844,\t\t495.0,\t\t495.0,\t\t495.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.11699999999999999\t\t],\n\t\t[415,\t\t482,\t\t0,\t\t0.011021814404432133,\t\t0.0989140353680364,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t31.831\t\t],\n\t\t[56,\t\t482,\t\t0,\t\t0.002630886426592798,\t\t0.0236105947261788,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t7.598\t\t],\n\t\t[409,\t\t482,\t\t0,\t\t0.0007635041551246537,\t\t0.0068519822810072005,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t2.205\t\t],\n\t\t[483,\t\t484,\t\t0,\t\t9.037396121883656e-05,\t\t0.000811050963873968,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.261\t\t],\n\t\t[3,\t\t484,\t\t0,\t\t0.010022160664819944,\t\t0.08994275516621358,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t28.944000000000003\t\t],\n\t\t[301,\t\t484,\t\t0,\t\t0.00966516620498615,\t\t0.08673894848517479,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t27.913\t\t],\n\t\t[233,\t\t485,\t\t0,\t\t0.01410180055401662,\t\t0.1265550251138996,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t40.726\t\t],\n\t\t[392,\t\t485,\t\t0,\t\t0.00914819944598338,\t\t0.0820994883738036,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t26.42\t\t],\n\t\t[391,\t\t485,\t\t0,\t\t8.518005540166207e-05,\t\t0.000764438839512864,\t\t856.0,\t\t856.0,\t\t856.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.24600000000000002\t\t],\n\t\t[579,\t\t488,\t\t0,\t\t0.004636473829194215,\t\t0.11036180126571601,\t\t1486.0,\t\t1486.0,\t\t1486.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t21.038\t\t],\n\t\t[486,\t\t488,\t\t0,\t\t0.00016969696969690082,\t\t0.00403929018798184,\t\t1486.0,\t\t1486.0,\t\t1486.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.77\t\t],\n\t\t[487,\t\t488,\t\t0,\t\t0.00014567493112954544,\t\t0.00346749456396992,\t\t1486.0,\t\t1486.0,\t\t1486.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.6609999999999999\t\t],\n\t\t[270,\t\t489,\t\t0,\t\t0.0001745152354570637,\t\t0.0062646695140596,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.008\t\t],\n\t\t[331,\t\t489,\t\t0,\t\t0.003002943213296399,\t\t0.10779830627119119,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t17.345\t\t],\n\t\t[396,\t\t489,\t\t0,\t\t0.01124792243767313,\t\t0.40377286606072005,\t\t1711.0,\t\t1711.0,\t\t1711.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t64.968\t\t],\n\t\t[519,\t\t253,\t\t0,\t\t0.013353485337561985,\t\t0.141267767926912,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t40.394293146100004\t\t],\n\t\t[382,\t\t349,\t\t0,\t\t0.009091647380263157,\t\t1.30547149138788,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t105.02671053600001\t\t],\n\t\t[349,\t\t351,\t\t0,\t\t0.0005858117819605263,\t\t0.0841168325920224,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t6.76729770521\t\t],\n\t\t[459,\t\t465,\t\t0,\t\t1.578788789911157e-05,\t\t0.00016702153987596,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.047758360894800005\t\t],\n\t\t[549,\t\t550,\t\t0,\t\t3.680432518409091e-05,\t\t0.000389356391787088,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.111333083682\t\t],\n\t\t[550,\t\t551,\t\t0,\t\t5.755645674710744e-05,\t\t0.0006088951287918401,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.17410828165999997\t\t],\n\t\t[194,\t\t195,\t\t0,\t\t1.7560672583171745e-05,\t\t0.00252154053805592,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.202860889681\t\t],\n\t\t[247,\t\t248,\t\t0,\t\t2.1755213937811637e-05,\t\t0.0031238355819477198,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.25131623141\t\t],\n\t\t[2,\t\t294,\t\t0,\t\t2.3531392658518004e-05,\t\t0.003378877444715,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.271834647991\t\t],\n\t\t[549,\t\t551,\t\t0,\t\t9.265809538429751e-05,\t\t0.0009802386406577602,\t\t991.0,\t\t991.0,\t\t991.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.28029073853799996\t\t],\n\t\t[54,\t\t365,\t\t0,\t\t2.573045189134349e-05,\t\t0.00369464080598484,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.297238180249\t\t],\n\t\t[131,\t\t265,\t\t0,\t\t2.7616389041343487e-05,\t\t0.00396544290388756,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.319024526206\t\t],\n\t\t[91,\t\t92,\t\t0,\t\t2.8945628197853184e-05,\t\t0.0041563086239824396,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.33437989694200004\t\t],\n\t\t[247,\t\t249,\t\t0,\t\t3.098840072160664e-05,\t\t0.00444963074500788,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.357978005136\t\t],\n\t\t[186,\t\t191,\t\t0,\t\t3.1591661821191135e-05,\t\t0.00453625312865552,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.36494687735799997\t\t],\n\t\t[129,\t\t173,\t\t0,\t\t3.202671277479225e-05,\t\t0.00459872218332188,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.369972585975\t\t],\n\t\t[96,\t\t202,\t\t0,\t\t3.5971247867797784e-05,\t\t0.00516511877739804,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.415539855369\t\t],\n\t\t[53,\t\t320,\t\t0,\t\t3.784209581142659e-05,\t\t0.00543375421308236,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.437151890814\t\t],\n\t\t[24,\t\t396,\t\t0,\t\t4.144748602818559e-05,\t\t0.005951452925597279,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.47880135859800005\t\t],\n\t\t[133,\t\t156,\t\t0,\t\t4.431754564044322e-05,\t\t0.0063635653674415605,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.511956287238\t\t],\n\t\t[442,\t\t452,\t\t0,\t\t4.483572190450138e-05,\t\t0.006437970402313801,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.517942259441\t\t],\n\t\t[445,\t\t452,\t\t0,\t\t4.490753296371191e-05,\t\t0.0064482817668697215,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.518771820797\t\t],\n\t\t[247,\t\t250,\t\t0,\t\t4.594910768732687e-05,\t\t0.00659784169268824,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.530804092004\t\t],\n\t\t[187,\t\t195,\t\t0,\t\t4.755760376239612e-05,\t\t0.006828805970367921,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.549385438663\t\t],\n\t\t[216,\t\t236,\t\t0,\t\t5.03353075283241e-05,\t\t0.00722765701751724,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.581473472567\t\t],\n\t\t[244,\t\t389,\t\t0,\t\t5.1633313019736845e-05,\t\t0.007414037889302401,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.596468032004\t\t],\n\t\t[394,\t\t406,\t\t0,\t\t5.6346419007686985e-05,\t\t0.008090793734075721,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.650913832377\t\t],\n\t\t[442,\t\t445,\t\t0,\t\t6.388070648310249e-05,\t\t0.00917264360085512,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.737949921293\t\t],\n\t\t[442,\t\t444,\t\t0,\t\t6.584378362735456e-05,\t\t0.00945452224616264,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.760627388463\t\t],\n\t\t[198,\t\t472,\t\t0,\t\t8.37554210498615e-05,\t\t0.0120264578966664,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.967542623967\t\t],\n\t\t[464,\t\t467,\t\t0,\t\t8.460287496468144e-05,\t\t0.01214814397621276,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t0.977332411594\t\t],\n\t\t[198,\t\t251,\t\t0,\t\t8.83613182396122e-05,\t\t0.012687819608389479,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.0207499483\t\t],\n\t\t[112,\t\t143,\t\t0,\t\t9.049653833033241e-05,\t\t0.012994416294241841,\t\t3423.0,\t\t3423.0,\t\t3423.0,\t\t0,\t\t1,\t\t1,\t\t-360,\t\t1.04541601079\t\t],\n\t\t[2,\t\t490,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[5,\t\t491,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[10,\t\t492,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[12,\t\t493,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[13,\t\t494,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[15,\t\t495,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[18,\t\t496,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[20,\t\t497,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[22,\t\t498,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[24,\t\t499,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[26,\t\t500,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[30,\t\t501,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[32,\t\t502,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[37,\t\t503,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[42,\t\t504,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[46,\t\t505,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[52,\t\t506,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[56,\t\t507,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[61,\t\t508,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[68,\t\t509,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[69,\t\t510,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[74,\t\t511,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[78,\t\t512,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[86,\t\t513,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[87,\t\t514,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[94,\t\t515,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[95,\t\t516,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[96,\t\t517,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[99,\t\t518,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[100,\t\t519,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[104,\t\t520,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[105,\t\t521,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[106,\t\t522,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[107,\t\t523,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[117,\t\t524,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[120,\t\t525,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[123,\t\t526,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[124,\t\t527,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[125,\t\t528,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[128,\t\t529,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[129,\t\t530,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[138,\t\t531,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[143,\t\t532,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[156,\t\t533,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[157,\t\t534,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[159,\t\t535,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[160,\t\t536,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[165,\t\t537,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[184,\t\t538,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[191,\t\t539,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[195,\t\t540,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[201,\t\t541,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[220,\t\t542,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[231,\t\t543,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[232,\t\t544,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[233,\t\t545,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[236,\t\t546,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[245,\t\t547,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[246,\t\t548,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[248,\t\t549,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[249,\t\t550,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[250,\t\t551,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[259,\t\t552,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[261,\t\t553,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[262,\t\t554,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[265,\t\t555,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[270,\t\t556,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[277,\t\t557,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[279,\t\t558,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[280,\t\t559,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[290,\t\t560,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[301,\t\t561,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[305,\t\t562,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[306,\t\t563,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[310,\t\t564,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[313,\t\t565,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[315,\t\t566,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[320,\t\t567,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[330,\t\t568,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[332,\t\t569,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[334,\t\t570,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[336,\t\t571,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[349,\t\t572,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[351,\t\t573,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[358,\t\t574,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[360,\t\t575,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[380,\t\t576,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[382,\t\t577,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[383,\t\t578,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[389,\t\t579,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[401,\t\t580,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[402,\t\t581,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[409,\t\t582,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[415,\t\t583,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[444,\t\t584,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t],\n\t\t[452,\t\t585,\t\t0,\t\t0.005,\t\t0.0,\t\t2000.0,\t\t2000.0,\t\t2000.0,\t\t1.0,\t\t0,\t\t1,\t\t-360,\t\t360\t\t]\n\t])\n\tppc[\"gen_control\"] = array([\n\t\t[586,\t\t1,\t\t0.08658028904199107,\t\t4.329014452099554,\t\t0, 0, 0],\n\t\t[589,\t\t1,\t\t0.010042676909098597,\t\t0.5021338454549299,\t\t0, 0, 0],\n\t\t[590,\t\t1,\t\t0.012095775674984046,\t\t0.6047887837492023,\t\t0, 0, 0],\n\t\t[593,\t\t1,\t\t0.0017666198683200384,\t\t0.08833099341600192,\t\t0, 0, 0],\n\t\t[595,\t\t1,\t\t1.50560576164933,\t\t75.2802880824665,\t\t0, 0, 0],\n\t\t[598,\t\t1,\t\t0.0038197186342054878,\t\t0.1909859317102744,\t\t0, 0, 0],\n\t\t[599,\t\t1,\t\t0.0029602819415092537,\t\t0.1480140970754627,\t\t0, 0, 0],\n\t\t[602,\t\t1,\t\t0.007830423200121252,\t\t0.39152116000606263,\t\t0, 0, 0],\n\t\t[603,\t\t1,\t\t1.0997606567649967,\t\t54.98803283824984,\t\t0, 0, 0],\n\t\t[607,\t\t1,\t\t0.5729577951308232,\t\t28.64788975654116,\t\t0, 0, 0],\n\t\t[608,\t\t1,\t\t0.0076394372684109755,\t\t0.3819718634205488,\t\t0, 0, 0],\n\t\t[609,\t\t1,\t\t0.0057932399285449895,\t\t0.2896619964272495,\t\t0, 0, 0],\n\t\t[612,\t\t1,\t\t0.00954929658551372,\t\t0.477464829275686,\t\t0, 0, 0],\n\t\t[614,\t\t1,\t\t0.00954929658551372,\t\t0.477464829275686,\t\t0, 0, 0],\n\t\t[616,\t\t1,\t\t0.0046154933496649645,\t\t0.23077466748324824,\t\t0, 0, 0],\n\t\t[617,\t\t1,\t\t0.04360845440717932,\t\t2.1804227203589663,\t\t0, 0, 0],\n\t\t[618,\t\t1,\t\t0.010631550198538607,\t\t0.5315775099269304,\t\t0, 0, 0],\n\t\t[619,\t\t1,\t\t0.037560566569687294,\t\t1.8780283284843649,\t\t0, 0, 0],\n\t\t[624,\t\t1,\t\t0.004297183463481174,\t\t0.21485917317405873,\t\t0, 0, 0],\n\t\t[629,\t\t1,\t\t0.023968734429639437,\t\t1.198436721481972,\t\t0, 0, 0],\n\t\t[632,\t\t1,\t\t0.01435577586688896,\t\t0.717788793344448,\t\t0, 0, 0],\n\t\t[637,\t\t1,\t\t0.017093240888069558,\t\t0.854662044403478,\t\t0, 0, 0],\n\t\t[638,\t\t1,\t\t0.02048324117592693,\t\t1.0241620587963465,\t\t0, 0, 0],\n\t\t[640,\t\t1,\t\t0.0038197186342054878,\t\t0.1909859317102744,\t\t0, 0, 0],\n\t\t[641,\t\t1,\t\t0.0040107045659157625,\t\t0.20053522829578813,\t\t0, 0, 0],\n\t\t[642,\t\t1,\t\t0.00919915571071155,\t\t0.4599577855355775,\t\t0, 0, 0],\n\t\t[643,\t\t1,\t\t0.27279157245950864,\t\t13.639578622975431,\t\t0, 0, 0],\n\t\t[647,\t\t1,\t\t0.00445633840657307,\t\t0.2228169203286535,\t\t0, 0, 0],\n\t\t[652,\t\t1,\t\t0.00746436683100989,\t\t0.37321834155049455,\t\t0, 0, 0],\n\t\t[655,\t\t1,\t\t0.019576058000303126,\t\t0.9788029000151565,\t\t0, 0, 0],\n\t\t[663,\t\t1,\t\t0.00238732414637843,\t\t0.1193662073189215,\t\t0, 0, 0],\n\t\t[666,\t\t1,\t\t0.00919915571071155,\t\t0.4599577855355775,\t\t0, 0, 0],\n\t\t[670,\t\t1,\t\t0.0076394372684109755,\t\t0.3819718634205488,\t\t0, 0, 0],\n\t\t[672,\t\t1,\t\t0.010536057232683471,\t\t0.5268028616341736,\t\t0, 0, 0],\n\t\t[676,\t\t1,\t\t0.11777465788800255,\t\t5.888732894400127,\t\t0, 0, 0],\n\t\t[681,\t\t1,\t\t0.0063821132179850025,\t\t0.31910566089925013,\t\t0, 0, 0],\n\t\t[683,\t\t1,\t\t0.008753521870054244,\t\t0.4376760935027122,\t\t0, 0, 0],\n\t\t[687,\t\t1,\t\t0.42303383873825773,\t\t21.151691936912886,\t\t0, 0, 0],\n\t\t[694,\t\t1,\t\t0.005220282133414166,\t\t0.2610141066707083,\t\t0, 0, 0],\n\t\t[695,\t\t1,\t\t0.004679155326901723,\t\t0.23395776634508614,\t\t0, 0, 0],\n\t\t[697,\t\t1,\t\t0.0036923946797319715,\t\t0.1846197339865986,\t\t0, 0, 0],\n\t\t[698,\t\t1,\t\t0.0038197186342054878,\t\t0.1909859317102744,\t\t0, 0, 0],\n\t\t[702,\t\t1,\t\t0.023363945645890238,\t\t1.168197282294512,\t\t0, 0, 0],\n\t\t[705,\t\t1,\t\t0.005411268065124442,\t\t0.27056340325622213,\t\t0, 0, 0],\n\t\t[707,\t\t1,\t\t0.010822536130248884,\t\t0.5411268065124443,\t\t0, 0, 0],\n\t\t[714,\t\t1,\t\t0.00477464829275686,\t\t0.238732414637843,\t\t0, 0, 0],\n\t\t[716,\t\t1,\t\t1.5915494309189534e-05,\t\t0.0007957747154594768,\t\t0, 0, 0],\n\t\t[717,\t\t1,\t\t0.0017507043740108488,\t\t0.08753521870054244,\t\t0, 0, 0],\n\t\t[722,\t\t1,\t\t0.006589014644004467,\t\t0.3294507322002233,\t\t0, 0, 0],\n\t\t[724,\t\t1,\t\t0.0019257748114119334,\t\t0.09628874057059668,\t\t0, 0, 0],\n\t\t[730,\t\t1,\t\t0.10077690996578814,\t\t5.038845498289407,\t\t0, 0, 0],\n\t\t[732,\t\t1,\t\t0.004647324338283344,\t\t0.2323662169141672,\t\t0, 0, 0],\n\t\t[735,\t\t1,\t\t0.013496339174192726,\t\t0.6748169587096363,\t\t0, 0, 0],\n\t\t[741,\t\t1,\t\t0.0340591578216656,\t\t1.7029578910832803,\t\t0, 0, 0],\n\t\t[742,\t\t1,\t\t0.0028647889756541157,\t\t0.14323944878270578,\t\t0, 0, 0],\n\t\t[743,\t\t1,\t\t0.44881693951914486,\t\t22.440846975957243,\t\t0, 0, 0],\n\t\t[747,\t\t1,\t\t0.0039788735772973835,\t\t0.1989436788648692,\t\t0, 0, 0],\n\t\t[749,\t\t1,\t\t0.0025464790894703256,\t\t0.12732395447351627,\t\t0, 0, 0],\n\t\t[750,\t\t1,\t\t0.028902537665488188,\t\t1.4451268832744095,\t\t0, 0, 0],\n\t\t[753,\t\t1,\t\t0.049624511256052974,\t\t2.4812255628026487,\t\t0, 0, 0],\n\t\t[761,\t\t1,\t\t0.004997465213085514,\t\t0.2498732606542757,\t\t0, 0, 0],\n\t\t[762,\t\t1,\t\t0.3517324242330887,\t\t17.586621211654435,\t\t0, 0, 0],\n\t\t[765,\t\t1,\t\t0.018780283284843647,\t\t0.9390141642421824,\t\t0, 0, 0],\n\t\t[767,\t\t1,\t\t0.0035650707252584553,\t\t0.17825353626292276,\t\t0, 0, 0],\n\t\t[772,\t\t1,\t\t0.002992112930127632,\t\t0.1496056465063816,\t\t0, 0, 0],\n\t\t[774,\t\t1,\t\t0.010663381187156987,\t\t0.5331690593578494,\t\t0, 0, 0],\n\t\t[777,\t\t1,\t\t0.012573240504259732,\t\t0.6286620252129866,\t\t0, 0, 0],\n\t\t[778,\t\t1,\t\t0.004679155326901723,\t\t0.23395776634508614,\t\t0, 0, 0],\n\t\t[781,\t\t1,\t\t0.4169859509007658,\t\t20.84929754503829,\t\t0, 0, 0],\n\t\t[784,\t\t1,\t\t0.4058451048843331,\t\t20.292255244216655,\t\t0, 0, 0],\n\t\t[785,\t\t1,\t\t0.00047746482927568597,\t\t0.0238732414637843,\t\t0, 0, 0],\n\t\t[788,\t\t1,\t\t0.2785211504108168,\t\t13.926057520540843,\t\t0, 0, 0],\n\t\t[789,\t\t1,\t\t0.0123185925953127,\t\t0.615929629765635,\t\t0, 0, 0],\n\t\t[791,\t\t1,\t\t0.0031830988618379067,\t\t0.15915494309189535,\t\t0, 0, 0],\n\t\t[792,\t\t1,\t\t0.009979014931861837,\t\t0.49895074659309185,\t\t0, 0, 0],\n\t\t[795,\t\t1,\t\t0.004329014452099553,\t\t0.2164507226049777,\t\t0, 0, 0],\n\t\t[800,\t\t1,\t\t0.0058091554228541795,\t\t0.290457771142709,\t\t0, 0, 0],\n\t\t[801,\t\t1,\t\t0.007957747154594767,\t\t0.3978873577297384,\t\t0, 0, 0],\n\t\t[802,\t\t1,\t\t0.07957747154594767,\t\t3.9788735772973833,\t\t0, 0, 0],\n\t\t[805,\t\t1,\t\t0.44881693951914486,\t\t22.440846975957243,\t\t0, 0, 0],\n\t\t[806,\t\t1,\t\t0.005697746962689853,\t\t0.2848873481344927,\t\t0, 0, 0],\n\t\t[808,\t\t1,\t\t0.034616200122487235,\t\t1.7308100061243619,\t\t0, 0, 0],\n\t\t[809,\t\t1,\t\t0.0039788735772973835,\t\t0.1989436788648692,\t\t0, 0, 0],\n\t\t[811,\t\t1,\t\t0.0040107045659157625,\t\t0.20053522829578813,\t\t0, 0, 0],\n\t\t[814,\t\t1,\t\t0.014164789935178685,\t\t0.7082394967589343,\t\t0, 0, 0],\n\t\t[816,\t\t1,\t\t0.012748310941660816,\t\t0.6374155470830408,\t\t0, 0, 0],\n\t\t[817,\t\t1,\t\t0.017188733853924696,\t\t0.8594366926962349,\t\t0, 0, 0],\n\t\t[821,\t\t1,\t\t0.013130282805081364,\t\t0.6565141402540683,\t\t0, 0, 0],\n\t\t[826,\t\t1,\t\t0.018461973398659858,\t\t0.9230986699329929,\t\t0, 0, 0],\n\t\t[834,\t\t1,\t\t0.007416620348082323,\t\t0.37083101740411617,\t\t0, 0, 0],\n\t\t[835,\t\t1,\t\t0.010138169874953733,\t\t0.5069084937476867,\t\t0, 0, 0],\n\t\t[836,\t\t1,\t\t0.008116902097686661,\t\t0.4058451048843331,\t\t0, 0, 0],\n\t\t[837,\t\t1,\t\t0.15024226627874918,\t\t7.512113313937459,\t\t0, 0, 0],\n\t\t[839,\t\t1,\t\t0.011666057328635928,\t\t0.5833028664317964,\t\t0, 0, 0],\n\t\t[841,\t\t1,\t\t0.0037083101740411615,\t\t0.18541550870205808,\t\t0, 0, 0],\n\t\t[843,\t\t1,\t\t0.10599719209920229,\t\t5.2998596049601145,\t\t0, 0, 0],\n\t\t[844,\t\t1,\t\t0.012732395447351627,\t\t0.6366197723675814,\t\t0, 0, 0],\n\t\t[850,\t\t1,\t\t0.005092958178940651,\t\t0.25464790894703254,\t\t0, 0, 0],\n\t\t[851,\t\t1,\t\t0.01265281797580568,\t\t0.632640898790284,\t\t0, 0, 0],\n\t\t[853,\t\t1,\t\t0.0036923946797319715,\t\t0.1846197339865986,\t\t0, 0, 0],\n\t\t[856,\t\t1,\t\t0.011459155902616463,\t\t0.5729577951308231,\t\t0, 0, 0],\n\t\t[857,\t\t1,\t\t0.4462704604296745,\t\t22.313523021483725,\t\t0, 0, 0],\n\t\t[858,\t\t1,\t\t0.01808000153523931,\t\t0.9040000767619655,\t\t0, 0, 0],\n\t\t[860,\t\t1,\t\t0.0039788735772973835,\t\t0.1989436788648692,\t\t0, 0, 0],\n\t\t[865,\t\t1,\t\t0.0035014087480216977,\t\t0.17507043740108488,\t\t0, 0, 0],\n\t\t[867,\t\t1,\t\t0.24478030247533505,\t\t12.239015123766753,\t\t0, 0, 0],\n\t\t[869,\t\t1,\t\t0.4329014452099553,\t\t21.645072260497766,\t\t0, 0, 0],\n\t\t[870,\t\t1,\t\t0.018589297353133374,\t\t0.9294648676566688,\t\t0, 0, 0],\n\t\t[872,\t\t1,\t\t0.00716197243913529,\t\t0.3580986219567645,\t\t0, 0, 0],\n\t\t[874,\t\t1,\t\t0.006589014644004467,\t\t0.3294507322002233,\t\t0, 0, 0],\n\t\t[875,\t\t1,\t\t0.007766761222884492,\t\t0.38833806114422464,\t\t0, 0, 0],\n\t\t[882,\t\t1,\t\t0.005538592019597957,\t\t0.2769296009798979,\t\t0, 0, 0],\n\t\t[883,\t\t1,\t\t0.005729577951308231,\t\t0.28647889756541156,\t\t0, 0, 0],\n\t\t[885,\t\t1,\t\t0.15597184423005742,\t\t7.798592211502871,\t\t0, 0, 0],\n\t\t[886,\t\t1,\t\t0.8186930272647096,\t\t40.93465136323548,\t\t0, 0, 0],\n\t\t[889,\t\t1,\t\t0.0030239439187460114,\t\t0.15119719593730058,\t\t0, 0, 0],\n\t\t[890,\t\t1,\t\t0.0076394372684109755,\t\t0.3819718634205488,\t\t0, 0, 0],\n\t\t[893,\t\t1,\t\t0.00954929658551372,\t\t0.477464829275686,\t\t0, 0, 0],\n\t\t[894,\t\t1,\t\t0.025146481008519465,\t\t1.2573240504259733,\t\t0, 0, 0],\n\t\t[895,\t\t1,\t\t0.0030239439187460114,\t\t0.15119719593730058,\t\t0, 0, 0],\n\t\t[896,\t\t1,\t\t0.0038197186342054878,\t\t0.1909859317102744,\t\t0, 0, 0],\n\t\t[898,\t\t1,\t\t0.013464508185574344,\t\t0.6732254092787172,\t\t0, 0, 0],\n\t\t[902,\t\t1,\t\t0.006207042780583919,\t\t0.31035213902919595,\t\t0, 0, 0],\n\t\t[903,\t\t1,\t\t0.0031990143561470966,\t\t0.15995071780735484,\t\t0, 0, 0],\n\t\t[905,\t\t1,\t\t0.021851973686517232,\t\t1.0925986843258617,\t\t0, 0, 0],\n\t\t[906,\t\t1,\t\t0.010504226244065093,\t\t0.5252113122032547,\t\t0, 0, 0],\n\t\t[907,\t\t1,\t\t0.02142225534016911,\t\t1.0711127670084555,\t\t0, 0, 0],\n\t\t[909,\t\t1,\t\t0.005856901905781748,\t\t0.2928450952890874,\t\t0, 0, 0],\n\t\t[917,\t\t1,\t\t0.005411268065124442,\t\t0.27056340325622213,\t\t0, 0, 0],\n\t\t[918,\t\t1,\t\t0.012254930618075942,\t\t0.612746530903797,\t\t0, 0, 0],\n\t\t[920,\t\t1,\t\t0.0020371832715762603,\t\t0.10185916357881303,\t\t0, 0, 0],\n\t\t[921,\t\t1,\t\t0.019735212943395024,\t\t0.9867606471697512,\t\t0, 0, 0],\n\t\t[922,\t\t1,\t\t0.05220282133414166,\t\t2.6101410667070835,\t\t0, 0, 0],\n\t\t[923,\t\t1,\t\t0.023236621691416718,\t\t1.161831084570836,\t\t0, 0, 0],\n\t\t[925,\t\t1,\t\t0.008276057040778557,\t\t0.4138028520389279,\t\t0, 0, 0],\n\t\t[931,\t\t1,\t\t0.03455253814525047,\t\t1.7276269072625237,\t\t0, 0, 0],\n\t\t[936,\t\t1,\t\t0.016615776058793875,\t\t0.8307888029396938,\t\t0, 0, 0],\n\t\t[937,\t\t1,\t\t0.00477464829275686,\t\t0.238732414637843,\t\t0, 0, 0],\n\t\t[939,\t\t1,\t\t1.5915494309189534e-05,\t\t0.0007957747154594768,\t\t0, 0, 0],\n\t\t[940,\t\t1,\t\t0.009421972631040205,\t\t0.47109863155201026,\t\t0, 0, 0],\n\t\t[944,\t\t1,\t\t0.004042535554534142,\t\t0.2021267777267071,\t\t0, 0, 0],\n\t\t[950,\t\t1,\t\t0.005092958178940651,\t\t0.25464790894703254,\t\t0, 0, 0],\n\t\t[952,\t\t1,\t\t0.005045211696013082,\t\t0.2522605848006541,\t\t0, 0, 0],\n\t\t[958,\t\t1,\t\t0.010615634704229418,\t\t0.530781735211471,\t\t0, 0, 0],\n\t\t[959,\t\t1,\t\t0.007241549910681238,\t\t0.3620774955340619,\t\t0, 0, 0],\n\t\t[960,\t\t1,\t\t0.004217605991935227,\t\t0.21088029959676136,\t\t0, 0, 0],\n\t\t[963,\t\t1,\t\t0.2785211504108168,\t\t13.926057520540843,\t\t0, 0, 0],\n\t\t[965,\t\t1,\t\t0.11204507993669433,\t\t5.602253996834716,\t\t0, 0, 0],\n\t\t[967,\t\t1,\t\t0.01193662073189215,\t\t0.5968310365946076,\t\t0, 0, 0],\n\t\t[969,\t\t1,\t\t0.018111832523857688,\t\t0.9055916261928845,\t\t0, 0, 0],\n\t\t[971,\t\t1,\t\t0.0031830988618379067,\t\t0.15915494309189535,\t\t0, 0, 0],\n\t\t[978,\t\t1,\t\t0.0007321127382227185,\t\t0.03660563691113593,\t\t0, 0, 0],\n\t\t[982,\t\t1,\t\t0.0015756339366097638,\t\t0.07878169683048819,\t\t0, 0, 0],\n\t\t[983,\t\t1,\t\t0.01400563499208679,\t\t0.7002817496043395,\t\t0, 0, 0],\n\t\t[984,\t\t1,\t\t0.14801409707546268,\t\t7.400704853773133,\t\t0, 0, 0],\n\t\t[985,\t\t1,\t\t0.0035014087480216977,\t\t0.17507043740108488,\t\t0, 0, 0],\n\t\t[986,\t\t1,\t\t0.0017825353626292277,\t\t0.08912676813146138,\t\t0, 0, 0],\n\t\t[987,\t\t1,\t\t0.02618098813861678,\t\t1.3090494069308392,\t\t0, 0, 0],\n\t\t[988,\t\t1,\t\t0.0008116902097686662,\t\t0.04058451048843331,\t\t0, 0, 0],\n\t\t[993,\t\t1,\t\t0.06238873769202297,\t\t3.119436884601149,\t\t0, 0, 0],\n\t\t[994,\t\t1,\t\t0.010504226244065093,\t\t0.5252113122032547,\t\t0, 0, 0],\n\t\t[995,\t\t1,\t\t0.0006684507609859605,\t\t0.033422538049298026,\t\t0, 0, 0],\n\t\t[997,\t\t1,\t\t0.005984225860255264,\t\t0.2992112930127632,\t\t0, 0, 0],\n\t\t[999,\t\t1,\t\t0.004965634224467135,\t\t0.24828171122335674,\t\t0, 0, 0],\n\t\t[1002,\t\t1,\t\t0.0031512678732195276,\t\t0.15756339366097638,\t\t0, 0, 0],\n\t\t[1007,\t\t1,\t\t0.007416620348082323,\t\t0.37083101740411617,\t\t0, 0, 0],\n\t\t[1010,\t\t1,\t\t0.238732414637843,\t\t11.93662073189215,\t\t0, 0, 0],\n\t\t[1011,\t\t1,\t\t0.005952394871636886,\t\t0.2976197435818443,\t\t0, 0, 0],\n\t\t[1012,\t\t1,\t\t0.9024085273310466,\t\t45.12042636655233,\t\t0, 0, 0],\n\t\t[1014,\t\t1,\t\t0.238732414637843,\t\t11.93662073189215,\t\t0, 0, 0],\n\t\t[1027,\t\t3,\t\t0.003074873500535418,\t\t0.15374367502677092,\t\t2.22, 61.69, 0.004502],\n\t\t[1028,\t\t2,\t\t0.025464790894703257,\t\t1.273239544735163,\t\t0, 0, 0],\n\t\t[1029,\t\t2,\t\t0.003819718634205488,\t\t0.19098593171027442,\t\t0, 0, 0],\n\t\t[1030,\t\t2,\t\t0.06480789282701978,\t\t3.2403946413509894,\t\t0, 0, 0],\n\t\t[1031,\t\t2,\t\t0.0921316134570364,\t\t4.60658067285182,\t\t0, 0, 0],\n\t\t[1032,\t\t2,\t\t0.009772775025341927,\t\t0.4886387512670964,\t\t0, 0, 0],\n\t\t[1033,\t\t2,\t\t0.0031935716694765437,\t\t0.15967858347382718,\t\t0, 0, 0],\n\t\t[1034,\t\t2,\t\t0.005364335122251813,\t\t0.26821675611259066,\t\t0, 0, 0],\n\t\t[1035,\t\t3,\t\t0.00317587127473044,\t\t0.158793563736522,\t\t2.22, 61.69, 0.004502],\n\t\t[1036,\t\t2,\t\t0.0042795539826391196,\t\t0.21397769913195597,\t\t0, 0, 0],\n\t\t[1037,\t\t2,\t\t0.0060277734620055035,\t\t0.3013886731002752,\t\t0, 0, 0],\n\t\t[1038,\t\t2,\t\t0.005462103769994554,\t\t0.2731051884997277,\t\t0, 0, 0],\n\t\t[1039,\t\t2,\t\t0.008449479506347874,\t\t0.42247397531739384,\t\t0, 0, 0],\n\t\t[1040,\t\t3,\t\t4.085784833929019e-06,\t\t0.00020428924169645096,\t\t2.22, 61.69, 0.004502],\n\t\t[1041,\t\t2,\t\t0.012998987840239671,\t\t0.6499493920119837,\t\t0, 0, 0],\n\t\t[1042,\t\t2,\t\t0.00335501991632689,\t\t0.1677509958163445,\t\t0, 0, 0],\n\t\t[1043,\t\t3,\t\t0.00038423431443050963,\t\t0.019211715721525482,\t\t2.22, 61.69, 0.004502],\n\t\t[1044,\t\t3,\t\t0.0023022419250361527,\t\t0.11511209625180763,\t\t2.22, 61.69, 0.004502],\n\t\t[1045,\t\t2,\t\t0.003936615026511589,\t\t0.19683075132557948,\t\t0, 0, 0],\n\t\t[1046,\t\t2,\t\t0.006045611128115316,\t\t0.30228055640576584,\t\t0, 0, 0],\n\t\t[1047,\t\t3,\t\t0.0008294889076348922,\t\t0.04147444538174461,\t\t2.22, 61.69, 0.004502],\n\t\t[1048,\t\t2,\t\t0.00445182315071625,\t\t0.22259115753581254,\t\t0, 0, 0],\n\t\t[1049,\t\t2,\t\t0.01870104799381521,\t\t0.9350523996907605,\t\t0, 0, 0],\n\t\t[1050,\t\t2,\t\t0.0033601814151550304,\t\t0.1680090707577515,\t\t0, 0, 0],\n\t\t[1051,\t\t2,\t\t0.019380601737792977,\t\t0.969030086889649,\t\t0, 0, 0],\n\t\t[1052,\t\t3,\t\t0.001315809692296204,\t\t0.06579048461481019,\t\t2.22, 61.69, 0.004502],\n\t\t[1053,\t\t3,\t\t0.001042024786453249,\t\t0.05210123932266245,\t\t2.22, 61.69, 0.004502],\n\t\t[1054,\t\t2,\t\t0.017434200209443074,\t\t0.8717100104721537,\t\t0, 0, 0],\n\t\t[1055,\t\t3,\t\t0.0001818229987415119,\t\t0.009091149937075596,\t\t2.22, 61.69, 0.004502],\n\t\t[1056,\t\t2,\t\t0.0384482661909012,\t\t1.9224133095450602,\t\t0, 0, 0],\n\t\t[1057,\t\t2,\t\t0.02718238967557453,\t\t1.3591194837787268,\t\t0, 0, 0],\n\t\t[1058,\t\t2,\t\t0.06721018861714274,\t\t3.3605094308571375,\t\t0, 0, 0],\n\t\t[1059,\t\t2,\t\t0.02641152929543176,\t\t1.320576464771588,\t\t0, 0, 0],\n\t\t[1060,\t\t3,\t\t0.0006590053340983933,\t\t0.03295026670491967,\t\t2.22, 61.69, 0.004502],\n\t\t[1061,\t\t2,\t\t0.010304492946979937,\t\t0.5152246473489969,\t\t0, 0, 0],\n\t\t[1062,\t\t3,\t\t0.00018325491392786168,\t\t0.009162745696393085,\t\t2.22, 61.69, 0.004502],\n\t\t[1063,\t\t3,\t\t0.0005520076745724519,\t\t0.0276003837286226,\t\t2.22, 61.69, 0.004502],\n\t\t[1064,\t\t2,\t\t0.013355424896304362,\t\t0.667771244815218,\t\t0, 0, 0],\n\t\t[1065,\t\t2,\t\t0.021608252882636087,\t\t1.0804126441318045,\t\t0, 0, 0],\n\t\t[1066,\t\t2,\t\t0.008556107291276397,\t\t0.4278053645638199,\t\t0, 0, 0],\n\t\t[1067,\t\t3,\t\t0.002000933756260183,\t\t0.10004668781300916,\t\t2.22, 61.69, 0.004502],\n\t\t[1068,\t\t3,\t\t0.0003188842576981683,\t\t0.015944212884908417,\t\t2.22, 61.69, 0.004502],\n\t\t[1069,\t\t3,\t\t0.00020313001706596343,\t\t0.010156500853298172,\t\t2.22, 61.69, 0.004502],\n\t\t[1070,\t\t3,\t\t5.020379247175116e-05,\t\t0.0025101896235875582,\t\t2.22, 61.69, 0.004502],\n\t\t[1071,\t\t3,\t\t0.0002755733400308117,\t\t0.013778667001540588,\t\t2.22, 61.69, 0.004502],\n\t\t[1072,\t\t2,\t\t0.007168748144119091,\t\t0.3584374072059546,\t\t0, 0, 0],\n\t\t[1073,\t\t2,\t\t0.004954025493475761,\t\t0.24770127467378808,\t\t0, 0, 0],\n\t\t[1074,\t\t2,\t\t0.009778033156939965,\t\t0.48890165784699824,\t\t0, 0, 0],\n\t\t[1075,\t\t3,\t\t0.0010048055180333312,\t\t0.05024027590166657,\t\t2.22, 61.69, 0.004502],\n\t\t[1076,\t\t3,\t\t0.00014613668285460223,\t\t0.007306834142730112,\t\t2.22, 61.69, 0.004502],\n\t\t[1077,\t\t3,\t\t0.0016628534246063698,\t\t0.08314267123031849,\t\t2.22, 61.69, 0.004502],\n\t\t[1078,\t\t3,\t\t0.0021908153060440304,\t\t0.10954076530220153,\t\t2.22, 61.69, 0.004502],\n\t\t[1079,\t\t2,\t\t0.004604543003215469,\t\t0.23022715016077344,\t\t0, 0, 0],\n\t\t[1080,\t\t2,\t\t0.008412929217414397,\t\t0.4206464608707199,\t\t0, 0, 0],\n\t\t[1081,\t\t2,\t\t0.025823979083824652,\t\t1.2911989541912325,\t\t0, 0, 0],\n\t\t[1082,\t\t2,\t\t0.03247105626963941,\t\t1.623552813481971,\t\t0, 0, 0],\n\t\t[1083,\t\t2,\t\t0.04034141649573272,\t\t2.017070824786636,\t\t0, 0, 0],\n\t\t[1084,\t\t2,\t\t0.0383703068502718,\t\t1.9185153425135901,\t\t0, 0, 0],\n\t\t[1085,\t\t2,\t\t0.007239283505967098,\t\t0.3619641752983549,\t\t0, 0, 0],\n\t\t[1086,\t\t2,\t\t0.01436208920263519,\t\t0.7181044601317595,\t\t0, 0, 0],\n\t\t[1087,\t\t2,\t\t0.007427186304799236,\t\t0.3713593152399618,\t\t0, 0, 0],\n\t\t[1088,\t\t3,\t\t0.0023416461987310717,\t\t0.11708230993655358,\t\t2.22, 61.69, 0.004502],\n\t\t[1089,\t\t2,\t\t0.024474821190373128,\t\t1.2237410595186564,\t\t0, 0, 0],\n\t\t[1090,\t\t2,\t\t0.005674885746854652,\t\t0.2837442873427326,\t\t0, 0, 0],\n\t\t[1091,\t\t3,\t\t0.0025559246387118852,\t\t0.12779623193559428,\t\t2.22, 61.69, 0.004502],\n\t\t[1092,\t\t2,\t\t0.0022614569222204907,\t\t0.11307284611102454,\t\t0, 0, 0],\n\t\t[1093,\t\t2,\t\t0.005405735887485864,\t\t0.2702867943742932,\t\t0, 0, 0],\n\t\t[1096,\t\t2,\t\t0.0032869739467971857,\t\t0.16434869733985927,\t\t0, 0, 0],\n\t\t[1097,\t\t3,\t\t0.00017300345148886943,\t\t0.008650172574443471,\t\t2.22, 61.69, 0.004502],\n\t\t[1098,\t\t2,\t\t0.003289044333560044,\t\t0.1644522166780022,\t\t0, 0, 0],\n\t\t[1099,\t\t2,\t\t0.017502038182814306,\t\t0.8751019091407154,\t\t0, 0, 0],\n\t\t[1100,\t\t3,\t\t1.2394935240118277e-06,\t\t6.19746762005914e-05,\t\t2.22, 61.69, 0.004502],\n\t\t[1101,\t\t2,\t\t0.005343192104787693,\t\t0.2671596052393847,\t\t0, 0, 0],\n\t\t[1102,\t\t2,\t\t0.02234407998394998,\t\t1.1172039991974991,\t\t0, 0, 0],\n\t\t[1103,\t\t2,\t\t0.01562148424141561,\t\t0.7810742120707805,\t\t0, 0, 0],\n\t\t[1105,\t\t3,\t\t5.553489395638779e-05,\t\t0.0027767446978193898,\t\t2.22, 61.69, 0.004502],\n\t\t[1106,\t\t3,\t\t5.824860207634129e-05,\t\t0.0029124301038170645,\t\t2.22, 61.69, 0.004502],\n\t\t[1107,\t\t2,\t\t0.0030626723973069554,\t\t0.15313361986534774,\t\t0, 0, 0],\n\t\t[1108,\t\t2,\t\t0.02039874588539438,\t\t1.019937294269719,\t\t0, 0, 0],\n\t\t[1109,\t\t3,\t\t2.0410230979817453e-05,\t\t0.0010205115489908725,\t\t2.22, 61.69, 0.004502],\n\t\t[1110,\t\t3,\t\t4.209100319936101e-05,\t\t0.0021045501599680503,\t\t2.22, 61.69, 0.004502],\n\t\t[1111,\t\t2,\t\t0.004130994840039845,\t\t0.20654974200199225,\t\t0, 0, 0],\n\t\t[1113,\t\t3,\t\t8.967736039222342e-05,\t\t0.004483868019611171,\t\t2.22, 61.69, 0.004502],\n\t\t[1114,\t\t3,\t\t0.0008287580610983356,\t\t0.04143790305491678,\t\t2.22, 61.69, 0.004502],\n\t\t[1115,\t\t2,\t\t0.0012846199411427445,\t\t0.06423099705713722,\t\t0, 0, 0],\n\t\t[1116,\t\t3,\t\t0.0008266680607579276,\t\t0.04133340303789638,\t\t2.22, 61.69, 0.004502],\n\t\t[1117,\t\t2,\t\t0.002423390125278668,\t\t0.12116950626393344,\t\t0, 0, 0],\n\t\t[1118,\t\t3,\t\t0.0002364061774524349,\t\t0.011820308872621746,\t\t2.22, 61.69, 0.004502],\n\t\t[1119,\t\t3,\t\t0.001103839988378201,\t\t0.05519199941891006,\t\t2.22, 61.69, 0.004502],\n\t\t[1120,\t\t3,\t\t6.167750655223761e-05,\t\t0.0030838753276118814,\t\t2.22, 61.69, 0.004502],\n\t\t[1121,\t\t3,\t\t1.3755046233043984e-05,\t\t0.0006877523116521993,\t\t2.22, 61.69, 0.004502],\n\t\t[1122,\t\t3,\t\t3.7205183102116836e-05,\t\t0.0018602591551058418,\t\t2.22, 61.69, 0.004502],\n\t\t[1123,\t\t3,\t\t3.718482927877816e-05,\t\t0.001859241463938908,\t\t2.22, 61.69, 0.004502],\n\t\t[1124,\t\t3,\t\t3.2767805859797654e-05,\t\t0.0016383902929898828,\t\t2.22, 61.69, 0.004502],\n\t\t[1125,\t\t3,\t\t0.0007768493279403406,\t\t0.038842466397017036,\t\t2.22, 61.69, 0.004502],\n\t\t[1126,\t\t3,\t\t0.0008993573657867038,\t\t0.04496786828933519,\t\t2.22, 61.69, 0.004502],\n\t\t[1127,\t\t2,\t\t0.002692639158359382,\t\t0.13463195791796911,\t\t0, 0, 0],\n\t\t[1128,\t\t3,\t\t7.798648051461309e-05,\t\t0.0038993240257306546,\t\t2.22, 61.69, 0.004502],\n\t\t[1129,\t\t3,\t\t0.00012067336277826449,\t\t0.006033668138913225,\t\t2.22, 61.69, 0.004502],\n\t\t[1130,\t\t3,\t\t2.6018013552869856e-05,\t\t0.0013009006776434928,\t\t2.22, 61.69, 0.004502],\n\t\t[1131,\t\t3,\t\t7.376731283474909e-05,\t\t0.0036883656417374547,\t\t2.22, 61.69, 0.004502],\n\t\t[1133,\t\t3,\t\t1.8309816678670237e-05,\t\t0.000915490833933512,\t\t2.22, 61.69, 0.004502],\n\t\t[1134,\t\t3,\t\t1.2937356389347597e-05,\t\t0.0006468678194673798,\t\t2.22, 61.69, 0.004502],\n\t\t[1135,\t\t3,\t\t0.0002090133345259136,\t\t0.01045066672629568,\t\t2.22, 61.69, 0.004502],\n\t\t[1136,\t\t3,\t\t1.0239317808798805e-05,\t\t0.0005119658904399403,\t\t2.22, 61.69, 0.004502],\n\t\t[1137,\t\t3,\t\t0.00010517941277154545,\t\t0.005258970638577273,\t\t2.22, 61.69, 0.004502],\n\t\t[1138,\t\t3,\t\t3.202927158114444e-05,\t\t0.0016014635790572223,\t\t2.22, 61.69, 0.004502],\n\t\t[1139,\t\t3,\t\t0.000502422140661582,\t\t0.0251211070330791,\t\t2.22, 61.69, 0.004502],\n\t\t[1140,\t\t3,\t\t0.0014920849297188569,\t\t0.07460424648594284,\t\t2.22, 61.69, 0.004502],\n\t\t[1142,\t\t3,\t\t3.108855958207156e-05,\t\t0.001554427979103578,\t\t2.22, 61.69, 0.004502],\n\t\t[1143,\t\t3,\t\t0.0007010706467170471,\t\t0.03505353233585236,\t\t2.22, 61.69, 0.004502],\n\t\t[1144,\t\t2,\t\t0.0013348659944216786,\t\t0.06674329972108395,\t\t0, 0, 0],\n\t\t[1145,\t\t2,\t\t0.011197481443497569,\t\t0.5598740721748785,\t\t0, 0, 0],\n\t\t[1146,\t\t3,\t\t2.1915822140241895e-05,\t\t0.0010957911070120948,\t\t2.22, 61.69, 0.004502],\n\t\t[1147,\t\t3,\t\t0.0011597195411981833,\t\t0.05798597705990917,\t\t2.22, 61.69, 0.004502],\n\t\t[1148,\t\t3,\t\t0.000530075604509743,\t\t0.026503780225487154,\t\t2.22, 61.69, 0.004502],\n\t\t[1149,\t\t3,\t\t0.00023332074897085096,\t\t0.011666037448542547,\t\t2.22, 61.69, 0.004502],\n\t\t[1150,\t\t3,\t\t9.434708716193637e-05,\t\t0.004717354358096819,\t\t2.22, 61.69, 0.004502],\n\t\t[1151,\t\t3,\t\t0.00033266619332396894,\t\t0.01663330966619845,\t\t2.22, 61.69, 0.004502],\n\t\t[1152,\t\t3,\t\t2.968290590764656e-06,\t\t0.00014841452953823282,\t\t2.22, 61.69, 0.004502],\n\t\t[1155,\t\t3,\t\t1.5547398540825696e-05,\t\t0.0007773699270412849,\t\t2.22, 61.69, 0.004502],\n\t\t[1157,\t\t3,\t\t0.00011110922316080263,\t\t0.005555461158040131,\t\t2.22, 61.69, 0.004502],\n\t\t[1160,\t\t2,\t\t0.015175599618213626,\t\t0.7587799809106813,\t\t0, 0, 0],\n\t\t[1161,\t\t3,\t\t0.0010857043774739259,\t\t0.054285218873696306,\t\t2.22, 61.69, 0.004502],\n\t\t[1162,\t\t2,\t\t0.031984361657767045,\t\t1.5992180828883522,\t\t0, 0, 0],\n\t\t[1163,\t\t2,\t\t0.021010485834812704,\t\t1.0505242917406352,\t\t0, 0, 0],\n\t\t[1164,\t\t2,\t\t0.018183478445661972,\t\t0.9091739222830987,\t\t0, 0, 0],\n\t\t[1165,\t\t2,\t\t0.003640738012495192,\t\t0.18203690062475963,\t\t0, 0, 0],\n\t\t[1166,\t\t2,\t\t0.005301588846150501,\t\t0.26507944230752506,\t\t0, 0, 0],\n\t\t[1168,\t\t3,\t\t3.419450196278286e-05,\t\t0.0017097250981391431,\t\t2.22, 61.69, 0.004502],\n\t\t[1169,\t\t3,\t\t6.93880139226225e-05,\t\t0.003469400696131125,\t\t2.22, 61.69, 0.004502],\n\t\t[1171,\t\t3,\t\t0.0005748603194505088,\t\t0.02874301597252544,\t\t2.22, 61.69, 0.004502],\n\t\t[1172,\t\t3,\t\t0.00020447436337759674,\t\t0.010223718168879837,\t\t2.22, 61.69, 0.004502],\n\t\t[1173,\t\t2,\t\t0.01618626952698487,\t\t0.8093134763492436,\t\t0, 0, 0],\n\t\t[1175,\t\t3,\t\t2.1782391725402467e-05,\t\t0.0010891195862701233,\t\t2.22, 61.69, 0.004502],\n\t\t[1176,\t\t3,\t\t5.923360885186837e-06,\t\t0.0002961680442593419,\t\t2.22, 61.69, 0.004502],\n\t\t[1177,\t\t3,\t\t0.0007213874875701519,\t\t0.036069374378507595,\t\t2.22, 61.69, 0.004502],\n\t\t[1178,\t\t3,\t\t0.00010205808100824817,\t\t0.005102904050412409,\t\t2.22, 61.69, 0.004502],\n\t\t[1179,\t\t3,\t\t3.44925871051151e-05,\t\t0.0017246293552557552,\t\t2.22, 61.69, 0.004502],\n\t\t[1181,\t\t2,\t\t0.004495779034217764,\t\t0.2247889517108882,\t\t0, 0, 0],\n\t\t[1182,\t\t2,\t\t0.0037840530757545184,\t\t0.1892026537877259,\t\t0, 0, 0],\n\t\t[1183,\t\t3,\t\t0.00109035926940026,\t\t0.054517963470013,\t\t2.22, 61.69, 0.004502],\n\t\t[1184,\t\t3,\t\t0.00010790631226403063,\t\t0.005395315613201532,\t\t2.22, 61.69, 0.004502],\n\t\t[1186,\t\t3,\t\t0.001498769521577056,\t\t0.0749384760788528,\t\t2.22, 61.69, 0.004502],\n\t\t[1187,\t\t3,\t\t0.0002833468274902024,\t\t0.01416734137451012,\t\t2.22, 61.69, 0.004502],\n\t\t[1188,\t\t2,\t\t0.011440868435801076,\t\t0.5720434217900537,\t\t0, 0, 0],\n\t\t[1189,\t\t3,\t\t0.001289906586581014,\t\t0.06449532932905071,\t\t2.22, 61.69, 0.004502],\n\t\t[1190,\t\t2,\t\t0.01403960969000889,\t\t0.7019804845004446,\t\t0, 0, 0],\n\t\t[1191,\t\t2,\t\t0.004652379906159672,\t\t0.23261899530798363,\t\t0, 0, 0],\n\t\t[1192,\t\t3,\t\t0.0013658402687938922,\t\t0.06829201343969461,\t\t2.22, 61.69, 0.004502],\n\t\t[1193,\t\t3,\t\t0.00015278576957249078,\t\t0.007639288478624539,\t\t2.22, 61.69, 0.004502],\n\t\t[1194,\t\t3,\t\t0.0005720688022791215,\t\t0.028603440113956075,\t\t2.22, 61.69, 0.004502],\n\t\t[1195,\t\t3,\t\t1.2882573563174789e-05,\t\t0.0006441286781587394,\t\t2.22, 61.69, 0.004502],\n\t\t[1196,\t\t2,\t\t0.010230349597894291,\t\t0.5115174798947145,\t\t0, 0, 0],\n\t\t[1197,\t\t2,\t\t0.005767282789943071,\t\t0.2883641394971536,\t\t0, 0, 0],\n\t\t[1198,\t\t3,\t\t0.002534966273924786,\t\t0.12674831369623932,\t\t2.22, 61.69, 0.004502],\n\t\t[1199,\t\t2,\t\t0.012822920004466005,\t\t0.6411460002233003,\t\t0, 0, 0],\n\t\t[1200,\t\t2,\t\t0.003512885294685969,\t\t0.17564426473429848,\t\t0, 0, 0],\n\t\t[1201,\t\t3,\t\t0.0016021597716395785,\t\t0.08010798858197893,\t\t2.22, 61.69, 0.004502],\n\t\t[1202,\t\t3,\t\t0.0031762475555186724,\t\t0.15881237777593363,\t\t2.22, 61.69, 0.004502],\n\t\t[1203,\t\t2,\t\t0.011626157559117188,\t\t0.5813078779558594,\t\t0, 0, 0],\n\t\t[1204,\t\t3,\t\t0.0030266063343556363,\t\t0.15133031671778183,\t\t2.22, 61.69, 0.004502],\n\t\t[1205,\t\t3,\t\t3.4940417699210975e-05,\t\t0.0017470208849605492,\t\t2.22, 61.69, 0.004502],\n\t\t[1206,\t\t3,\t\t0.00024235441128435216,\t\t0.012117720564217609,\t\t2.22, 61.69, 0.004502],\n\t\t[1207,\t\t3,\t\t0.00022762038155293296,\t\t0.011381019077646649,\t\t2.22, 61.69, 0.004502],\n\t\t[1208,\t\t3,\t\t0.0001427321512302434,\t\t0.007136607561512171,\t\t2.22, 61.69, 0.004502],\n\t\t[1209,\t\t3,\t\t3.712569506330662e-05,\t\t0.0018562847531653312,\t\t2.22, 61.69, 0.004502],\n\t\t[1210,\t\t3,\t\t0.00030747517943711223,\t\t0.015373758971855613,\t\t2.22, 61.69, 0.004502],\n\t\t[1211,\t\t3,\t\t0.0011462484513341364,\t\t0.057312422566706815,\t\t2.22, 61.69, 0.004502],\n\t\t[1212,\t\t2,\t\t0.005804182676892941,\t\t0.290209133844647,\t\t0, 0, 0],\n\t\t[1213,\t\t2,\t\t0.0036505499187602444,\t\t0.18252749593801224,\t\t0, 0, 0],\n\t\t[1214,\t\t3,\t\t0.0002868549194435664,\t\t0.014342745972178321,\t\t2.22, 61.69, 0.004502],\n\t\t[1215,\t\t3,\t\t0.00014342822681200328,\t\t0.0071714113406001635,\t\t2.22, 61.69, 0.004502],\n\t\t[1216,\t\t2,\t\t0.00431338348440427,\t\t0.21566917422021353,\t\t0, 0, 0],\n\t\t[1217,\t\t3,\t\t0.0022836580531031417,\t\t0.11418290265515707,\t\t2.22, 61.69, 0.004502],\n\t\t[1218,\t\t3,\t\t6.241945072080783e-05,\t\t0.003120972536040392,\t\t2.22, 61.69, 0.004502],\n\t\t[1219,\t\t3,\t\t0.00038380486709714475,\t\t0.01919024335485724,\t\t2.22, 61.69, 0.004502],\n\t\t[1220,\t\t3,\t\t0.0011850020268110609,\t\t0.05925010134055305,\t\t2.22, 61.69, 0.004502],\n\t\t[1221,\t\t2,\t\t0.0377662225422596,\t\t1.88831112711298,\t\t0, 0, 0],\n\t\t[1222,\t\t2,\t\t0.013436354905899806,\t\t0.6718177452949904,\t\t0, 0, 0],\n\t\t[1223,\t\t3,\t\t0.00024230393037435297,\t\t0.01211519651871765,\t\t2.22, 61.69, 0.004502],\n\t\t[1224,\t\t2,\t\t0.010219261097938644,\t\t0.5109630548969322,\t\t0, 0, 0],\n\t\t[1225,\t\t3,\t\t0.0022238071565315737,\t\t0.1111903578265787,\t\t2.22, 61.69, 0.004502],\n\t\t[1226,\t\t3,\t\t0.0002535566380389208,\t\t0.012677831901946041,\t\t2.22, 61.69, 0.004502],\n\t\t[1227,\t\t3,\t\t0.0011129900410750567,\t\t0.05564950205375283,\t\t2.22, 61.69, 0.004502],\n\t\t[1228,\t\t3,\t\t0.00019234621639044032,\t\t0.009617310819522017,\t\t2.22, 61.69, 0.004502],\n\t\t[1229,\t\t2,\t\t0.0030085590951324306,\t\t0.15042795475662155,\t\t0, 0, 0],\n\t\t[1230,\t\t3,\t\t8.1951485973486e-05,\t\t0.0040975742986743,\t\t2.22, 61.69, 0.004502],\n\t\t[1231,\t\t3,\t\t0.00154847626324508,\t\t0.077423813162254,\t\t2.22, 61.69, 0.004502],\n\t\t[1232,\t\t2,\t\t0.003813185361664286,\t\t0.19065926808321432,\t\t0, 0, 0],\n\t\t[1233,\t\t2,\t\t0.03662908231521014,\t\t1.831454115760507,\t\t0, 0, 0],\n\t\t[1235,\t\t3,\t\t0.0005753349157073776,\t\t0.028766745785368877,\t\t2.22, 61.69, 0.004502],\n\t\t[1236,\t\t2,\t\t0.005234608320670995,\t\t0.26173041603354974,\t\t0, 0, 0],\n\t\t[1237,\t\t3,\t\t0.0008890105844342532,\t\t0.04445052922171266,\t\t2.22, 61.69, 0.004502],\n\t\t[1238,\t\t2,\t\t0.012012445276594919,\t\t0.600622263829746,\t\t0, 0, 0],\n\t\t[1239,\t\t3,\t\t0.0001443666373276477,\t\t0.007218331866382386,\t\t2.22, 61.69, 0.004502],\n\t\t[1240,\t\t2,\t\t0.021613910382114798,\t\t1.08069551910574,\t\t0, 0, 0],\n\t\t[1241,\t\t2,\t\t0.024532881090784327,\t\t1.2266440545392163,\t\t0, 0, 0],\n\t\t[1242,\t\t3,\t\t0.0015615143972363894,\t\t0.07807571986181946,\t\t2.22, 61.69, 0.004502],\n\t\t[1243,\t\t2,\t\t0.005289026999236673,\t\t0.26445134996183367,\t\t0, 0, 0],\n\t\t[1244,\t\t2,\t\t0.020592901244747865,\t\t1.0296450622373932,\t\t0, 0, 0],\n\t\t[1245,\t\t3,\t\t0.0005144458090049472,\t\t0.025722290450247362,\t\t2.22, 61.69, 0.004502],\n\t\t[1246,\t\t2,\t\t0.003636870278584459,\t\t0.18184351392922293,\t\t0, 0, 0],\n\t\t[1247,\t\t3,\t\t0.0013899571448864774,\t\t0.06949785724432388,\t\t2.22, 61.69, 0.004502],\n\t\t[1248,\t\t2,\t\t0.004047804296417853,\t\t0.2023902148208927,\t\t0, 0, 0],\n\t\t[1249,\t\t2,\t\t0.004846915908139961,\t\t0.24234579540699805,\t\t0, 0, 0],\n\t\t[1250,\t\t3,\t\t0.0019627317861894665,\t\t0.09813658930947333,\t\t2.22, 61.69, 0.004502],\n\t\t[1251,\t\t3,\t\t0.0014899668826355728,\t\t0.07449834413177864,\t\t2.22, 61.69, 0.004502],\n\t\t[1252,\t\t3,\t\t0.0009477821555247328,\t\t0.047389107776236644,\t\t2.22, 61.69, 0.004502],\n\t\t[1253,\t\t2,\t\t0.004106369053307717,\t\t0.20531845266538587,\t\t0, 0, 0],\n\t\t[1254,\t\t2,\t\t0.005238024431161238,\t\t0.2619012215580619,\t\t0, 0, 0],\n\t\t[1255,\t\t3,\t\t0.0002430881191708174,\t\t0.01215440595854087,\t\t2.22, 61.69, 0.004502],\n\t\t[1256,\t\t3,\t\t0.0009607764830526361,\t\t0.048038824152631804,\t\t2.22, 61.69, 0.004502],\n\t\t[1257,\t\t2,\t\t0.005662916214121937,\t\t0.28314581070609685,\t\t0, 0, 0],\n\t\t[1258,\t\t2,\t\t0.014991588973313675,\t\t0.7495794486656838,\t\t0, 0, 0],\n\t\t[1259,\t\t2,\t\t0.00695753592752513,\t\t0.34787679637625657,\t\t0, 0, 0],\n\t\t[1260,\t\t3,\t\t0.0012839803779623614,\t\t0.06419901889811806,\t\t2.22, 61.69, 0.004502],\n\t\t[1261,\t\t2,\t\t0.012840592447306919,\t\t0.6420296223653459,\t\t0, 0, 0],\n\t\t[1262,\t\t3,\t\t3.3365758929065435e-05,\t\t0.0016682879464532717,\t\t2.22, 61.69, 0.004502],\n\t\t[1263,\t\t3,\t\t2.243579925674327e-05,\t\t0.0011217899628371635,\t\t2.22, 61.69, 0.004502],\n\t\t[1264,\t\t2,\t\t0.005222533303161435,\t\t0.2611266651580718,\t\t0, 0, 0],\n\t\t[1265,\t\t3,\t\t0.0004236530619172327,\t\t0.021182653095861634,\t\t2.22, 61.69, 0.004502],\n\t\t[1266,\t\t2,\t\t0.007621029313600565,\t\t0.38105146568002835,\t\t0, 0, 0],\n\t\t[1267,\t\t3,\t\t0.002512674942558201,\t\t0.12563374712791006,\t\t2.22, 61.69, 0.004502],\n\t\t[1268,\t\t3,\t\t0.0002183287451274897,\t\t0.010916437256374485,\t\t2.22, 61.69, 0.004502],\n\t\t[1269,\t\t3,\t\t0.0003250471975980552,\t\t0.01625235987990276,\t\t2.22, 61.69, 0.004502],\n\t\t[1270,\t\t3,\t\t0.0024796665722395645,\t\t0.12398332861197821,\t\t2.22, 61.69, 0.004502],\n\t\t[1271,\t\t3,\t\t0.0030157819134425234,\t\t0.15078909567212617,\t\t2.22, 61.69, 0.004502],\n\t\t[1272,\t\t3,\t\t7.840992648188318e-05,\t\t0.003920496324094159,\t\t2.22, 61.69, 0.004502],\n\t\t[1273,\t\t3,\t\t9.236768632941541e-05,\t\t0.00461838431647077,\t\t2.22, 61.69, 0.004502],\n\t\t[1274,\t\t2,\t\t0.0033801727100761705,\t\t0.1690086355038085,\t\t0, 0, 0],\n\t\t[1275,\t\t2,\t\t0.006307329492962109,\t\t0.3153664746481055,\t\t0, 0, 0],\n\t\t[1276,\t\t3,\t\t0.001633288835647369,\t\t0.08166444178236844,\t\t2.22, 61.69, 0.004502],\n\t\t[1277,\t\t2,\t\t0.004176942042758357,\t\t0.20884710213791788,\t\t0, 0, 0],\n\t\t[1278,\t\t2,\t\t0.010850406134369231,\t\t0.5425203067184615,\t\t0, 0, 0],\n\t\t[1279,\t\t3,\t\t1.2957727984992993e-07,\t\t6.478863992496497e-06,\t\t2.22, 61.69, 0.004502],\n\t\t[1280,\t\t3,\t\t2.5822901719599235e-05,\t\t0.001291145085979962,\t\t2.22, 61.69, 0.004502],\n\t\t[1281,\t\t3,\t\t0.00013291594727662026,\t\t0.006645797363831013,\t\t2.22, 61.69, 0.004502],\n\t\t[1282,\t\t3,\t\t0.00021130763141584551,\t\t0.010565381570792277,\t\t2.22, 61.69, 0.004502],\n\t\t[1283,\t\t2,\t\t0.08261824948992594,\t\t4.130912474496298,\t\t0, 0, 0],\n\t\t[1284,\t\t3,\t\t0.0018096758437742202,\t\t0.09048379218871101,\t\t2.22, 61.69, 0.004502],\n\t\t[1285,\t\t3,\t\t0.0001399477244734882,\t\t0.006997386223674409,\t\t2.22, 61.69, 0.004502],\n\t\t[1286,\t\t3,\t\t0.0011377796471657795,\t\t0.05688898235828898,\t\t2.22, 61.69, 0.004502],\n\t\t[1287,\t\t2,\t\t0.005933272587501368,\t\t0.29666362937506835,\t\t0, 0, 0],\n\t\t[1288,\t\t2,\t\t0.00944760882155904,\t\t0.472380441077952,\t\t0, 0, 0],\n\t\t[1289,\t\t2,\t\t0.011723304434111076,\t\t0.5861652217055537,\t\t0, 0, 0],\n\t\t[1290,\t\t3,\t\t0.0003120693634598793,\t\t0.015603468172993969,\t\t2.22, 61.69, 0.004502],\n\t\t[1291,\t\t2,\t\t0.0062575490505418305,\t\t0.31287745252709154,\t\t0, 0, 0],\n\t\t[1292,\t\t3,\t\t0.002653563231501149,\t\t0.13267816157505744,\t\t2.22, 61.69, 0.004502],\n\t\t[1293,\t\t3,\t\t0.00015292290721046804,\t\t0.007646145360523402,\t\t2.22, 61.69, 0.004502],\n\t\t[1294,\t\t3,\t\t0.0003436110439431119,\t\t0.017180552197155596,\t\t2.22, 61.69, 0.004502],\n\t\t[1295,\t\t3,\t\t0.00037392918854889465,\t\t0.01869645942744473,\t\t2.22, 61.69, 0.004502],\n\t\t[1296,\t\t3,\t\t0.0017415681822428924,\t\t0.08707840911214464,\t\t2.22, 61.69, 0.004502],\n\t\t[1297,\t\t2,\t\t0.011317746197608284,\t\t0.5658873098804141,\t\t0, 0, 0],\n\t\t[1298,\t\t3,\t\t0.00025557758136610396,\t\t0.0127788790683052,\t\t2.22, 61.69, 0.004502],\n\t\t[1299,\t\t3,\t\t0.00013739570556443013,\t\t0.006869785278221508,\t\t2.22, 61.69, 0.004502],\n\t\t[1300,\t\t3,\t\t0.001511593201166196,\t\t0.07557966005830981,\t\t2.22, 61.69, 0.004502],\n\t\t[1301,\t\t2,\t\t0.0038746782543149596,\t\t0.193733912715748,\t\t0, 0, 0],\n\t\t[1302,\t\t3,\t\t0.0003104985267932093,\t\t0.015524926339660468,\t\t2.22, 61.69, 0.004502],\n\t\t[1303,\t\t3,\t\t0.00027600750632746427,\t\t0.013800375316373212,\t\t2.22, 61.69, 0.004502],\n\t\t[1304,\t\t3,\t\t0.000610793340517708,\t\t0.030539667025885397,\t\t2.22, 61.69, 0.004502],\n\t\t[1305,\t\t3,\t\t2.9075695387122924e-07,\t\t1.4537847693561463e-05,\t\t2.22, 61.69, 0.004502],\n\t\t[1306,\t\t3,\t\t4.785298727192918e-05,\t\t0.002392649363596459,\t\t2.22, 61.69, 0.004502],\n\t\t[1307,\t\t3,\t\t7.607863985215967e-06,\t\t0.0003803931992607984,\t\t2.22, 61.69, 0.004502],\n\t\t[1308,\t\t3,\t\t0.00020870441847665842,\t\t0.010435220923832922,\t\t2.22, 61.69, 0.004502],\n\t\t[1309,\t\t3,\t\t0.0002132096944766602,\t\t0.01066048472383301,\t\t2.22, 61.69, 0.004502],\n\t\t[1310,\t\t3,\t\t0.00010478060392325507,\t\t0.005239030196162754,\t\t2.22, 61.69, 0.004502],\n\t\t[1311,\t\t3,\t\t0.00042867578463455237,\t\t0.02143378923172762,\t\t2.22, 61.69, 0.004502],\n\t\t[1312,\t\t2,\t\t0.016696303623916272,\t\t0.8348151811958137,\t\t0, 0, 0],\n\t\t[1313,\t\t3,\t\t0.0019631283227609974,\t\t0.09815641613804986,\t\t2.22, 61.69, 0.004502],\n\t\t[1314,\t\t3,\t\t0.0007641975650906521,\t\t0.038209878254532606,\t\t2.22, 61.69, 0.004502],\n\t\t[1315,\t\t3,\t\t0.0005015944131679134,\t\t0.02507972065839567,\t\t2.22, 61.69, 0.004502],\n\t\t[1316,\t\t3,\t\t0.00012376478287903607,\t\t0.006188239143951804,\t\t2.22, 61.69, 0.004502],\n\t\t[1317,\t\t3,\t\t0.0009711351173103039,\t\t0.048556755865515194,\t\t2.22, 61.69, 0.004502],\n\t\t[1318,\t\t3,\t\t0.00012454395408676328,\t\t0.0062271977043381645,\t\t2.22, 61.69, 0.004502],\n\t\t[1319,\t\t3,\t\t0.001127343871228203,\t\t0.05636719356141015,\t\t2.22, 61.69, 0.004502],\n\t\t[1320,\t\t3,\t\t0.0013215329138219017,\t\t0.06607664569109509,\t\t2.22, 61.69, 0.004502],\n\t\t[1321,\t\t3,\t\t1.025741798764967e-05,\t\t0.0005128708993824835,\t\t2.22, 61.69, 0.004502],\n\t\t[1322,\t\t3,\t\t5.919056262068799e-05,\t\t0.0029595281310344,\t\t2.22, 61.69, 0.004502],\n\t\t[1323,\t\t2,\t\t0.012675857799799822,\t\t0.6337928899899912,\t\t0, 0, 0],\n\t\t[1324,\t\t3,\t\t0.0008316328586631403,\t\t0.04158164293315702,\t\t2.22, 61.69, 0.004502],\n\t\t[1325,\t\t2,\t\t0.0057612535388438385,\t\t0.2880626769421919,\t\t0, 0, 0],\n\t\t[1326,\t\t2,\t\t0.0036242041289439157,\t\t0.1812102064471958,\t\t0, 0, 0],\n\t\t[1327,\t\t2,\t\t0.0032338308031027566,\t\t0.16169154015513784,\t\t0, 0, 0],\n\t\t[1328,\t\t3,\t\t0.0010226241895011407,\t\t0.05113120947505704,\t\t2.22, 61.69, 0.004502],\n\t\t[1329,\t\t2,\t\t0.013921309839652627,\t\t0.6960654919826315,\t\t0, 0, 0],\n\t\t[1330,\t\t3,\t\t0.0019182008434651947,\t\t0.09591004217325974,\t\t2.22, 61.69, 0.004502],\n\t\t[1332,\t\t3,\t\t0.0016738699394560756,\t\t0.08369349697280379,\t\t2.22, 61.69, 0.004502],\n\t\t[1333,\t\t3,\t\t0.0029061854047842247,\t\t0.14530927023921122,\t\t2.22, 61.69, 0.004502],\n\t\t[1334,\t\t3,\t\t5.136054459913027e-05,\t\t0.0025680272299565135,\t\t2.22, 61.69, 0.004502],\n\t\t[1335,\t\t3,\t\t0.00021052629514022267,\t\t0.010526314757011134,\t\t2.22, 61.69, 0.004502],\n\t\t[1336,\t\t3,\t\t0.0018954102795459078,\t\t0.0947705139772954,\t\t2.22, 61.69, 0.004502],\n\t\t[1337,\t\t2,\t\t0.006020338798098282,\t\t0.3010169399049141,\t\t0, 0, 0],\n\t\t[1338,\t\t3,\t\t5.300015004820578e-05,\t\t0.0026500075024102894,\t\t2.22, 61.69, 0.004502],\n\t\t[1339,\t\t3,\t\t0.0006421253879349708,\t\t0.032106269396748544,\t\t2.22, 61.69, 0.004502],\n\t\t[1340,\t\t2,\t\t0.003355330861775994,\t\t0.1677665430887997,\t\t0, 0, 0],\n\t\t[1341,\t\t2,\t\t0.010682483732650976,\t\t0.5341241866325488,\t\t0, 0, 0],\n\t\t[1342,\t\t3,\t\t2.101043175532592e-05,\t\t0.0010505215877662961,\t\t2.22, 61.69, 0.004502],\n\t\t[1343,\t\t3,\t\t3.130239915703848e-05,\t\t0.0015651199578519243,\t\t2.22, 61.69, 0.004502],\n\t\t[1344,\t\t3,\t\t1.4391232894862565e-05,\t\t0.0007195616447431282,\t\t2.22, 61.69, 0.004502],\n\t\t[1345,\t\t3,\t\t0.00025281368060892654,\t\t0.012640684030446329,\t\t2.22, 61.69, 0.004502],\n\t\t[1346,\t\t2,\t\t0.013669449762218379,\t\t0.6834724881109189,\t\t0, 0, 0],\n\t\t[1347,\t\t2,\t\t0.02636344185792537,\t\t1.3181720928962688,\t\t0, 0, 0],\n\t\t[1348,\t\t3,\t\t0.0014456315404578254,\t\t0.07228157702289127,\t\t2.22, 61.69, 0.004502],\n\t\t[1349,\t\t3,\t\t0.002610949541382524,\t\t0.13054747706912617,\t\t2.22, 61.69, 0.004502],\n\t\t[1350,\t\t3,\t\t3.859851934953823e-06,\t\t0.00019299259674769115,\t\t2.22, 61.69, 0.004502],\n\t\t[1351,\t\t3,\t\t4.5085071524642273e-07,\t\t2.2542535762321137e-05,\t\t2.22, 61.69, 0.004502],\n\t\t[1352,\t\t3,\t\t2.5677954031977487e-05,\t\t0.0012838977015988745,\t\t2.22, 61.69, 0.004502],\n\t\t[1355,\t\t3,\t\t0.0001074820707981226,\t\t0.005374103539906131,\t\t2.22, 61.69, 0.004502],\n\t\t[1356,\t\t2,\t\t0.004678278776831856,\t\t0.23391393884159278,\t\t0, 0, 0],\n\t\t[1357,\t\t2,\t\t0.003594349677217709,\t\t0.17971748386088549,\t\t0, 0, 0],\n\t\t[1358,\t\t3,\t\t1.57431431082847e-05,\t\t0.0007871571554142351,\t\t2.22, 61.69, 0.004502],\n\t\t[1359,\t\t2,\t\t0.004496673943395517,\t\t0.22483369716977586,\t\t0, 0, 0],\n\t\t[1363,\t\t3,\t\t1.5265322222078787e-06,\t\t7.632661111039394e-05,\t\t2.22, 61.69, 0.004502],\n\t\t[1364,\t\t3,\t\t2.8687227851091924e-06,\t\t0.0001434361392554596,\t\t2.22, 61.69, 0.004502],\n\t\t[1365,\t\t3,\t\t2.1560465484574657e-08,\t\t1.078023274228733e-06,\t\t2.22, 61.69, 0.004502],\n\t\t[1366,\t\t3,\t\t7.830373844390861e-05,\t\t0.003915186922195431,\t\t2.22, 61.69, 0.004502],\n\t\t[1367,\t\t3,\t\t0.0027735977386081564,\t\t0.1386798869304078,\t\t2.22, 61.69, 0.004502],\n\t\t[1368,\t\t3,\t\t0.0001048661049437223,\t\t0.0052433052471861155,\t\t2.22, 61.69, 0.004502],\n\t\t[1369,\t\t3,\t\t0.0005073133310147165,\t\t0.025365666550735824,\t\t2.22, 61.69, 0.004502],\n\t\t[1370,\t\t3,\t\t2.185563890765493e-05,\t\t0.0010927819453827466,\t\t2.22, 61.69, 0.004502],\n\t\t[1371,\t\t2,\t\t0.004857683053723355,\t\t0.24288415268616778,\t\t0, 0, 0],\n\t\t[1372,\t\t2,\t\t0.012284634505654547,\t\t0.6142317252827274,\t\t0, 0, 0],\n\t\t[1373,\t\t3,\t\t0.0022409179594482334,\t\t0.11204589797241167,\t\t2.22, 61.69, 0.004502],\n\t\t[1374,\t\t2,\t\t0.006889508467327262,\t\t0.3444754233663631,\t\t0, 0, 0],\n\t\t[1375,\t\t2,\t\t0.003897629175102736,\t\t0.1948814587551368,\t\t0, 0, 0],\n\t\t[1376,\t\t2,\t\t0.006830907337989802,\t\t0.3415453668994901,\t\t0, 0, 0],\n\t\t[1377,\t\t2,\t\t0.01492085689824784,\t\t0.7460428449123921,\t\t0, 0, 0],\n\t\t[1378,\t\t2,\t\t0.01566275025445262,\t\t0.783137512722631,\t\t0, 0, 0],\n\t\t[1379,\t\t3,\t\t2.062505175023466e-05,\t\t0.001031252587511733,\t\t2.22, 61.69, 0.004502],\n\t\t[1381,\t\t3,\t\t2.601825872991241e-05,\t\t0.0013009129364956204,\t\t2.22, 61.69, 0.004502],\n\t\t[1382,\t\t2,\t\t0.008838822964419164,\t\t0.4419411482209583,\t\t0, 0, 0],\n\t\t[1383,\t\t2,\t\t0.0069522653092041085,\t\t0.34761326546020543,\t\t0, 0, 0],\n\t\t[1387,\t\t3,\t\t8.89643885212391e-05,\t\t0.0044482194260619555,\t\t2.22, 61.69, 0.004502],\n\t\t[1390,\t\t3,\t\t9.505708471011321e-05,\t\t0.004752854235505661,\t\t2.22, 61.69, 0.004502],\n\t\t[1391,\t\t3,\t\t1.3594941515348555e-05,\t\t0.0006797470757674278,\t\t2.22, 61.69, 0.004502],\n\t\t[1393,\t\t3,\t\t3.4943392392534786e-05,\t\t0.0017471696196267393,\t\t2.22, 61.69, 0.004502],\n\t\t[1394,\t\t3,\t\t2.737439864388922e-05,\t\t0.001368719932194461,\t\t2.22, 61.69, 0.004502],\n\t\t[1395,\t\t3,\t\t1.9308633391493333e-06,\t\t9.654316695746669e-05,\t\t2.22, 61.69, 0.004502],\n\t\t[1396,\t\t3,\t\t7.028796859200431e-07,\t\t3.514398429600216e-05,\t\t2.22, 61.69, 0.004502],\n\t\t[1397,\t\t3,\t\t0.0006377592842944558,\t\t0.03188796421472279,\t\t2.22, 61.69, 0.004502],\n\t\t[1398,\t\t3,\t\t7.075339318186764e-05,\t\t0.003537669659093382,\t\t2.22, 61.69, 0.004502],\n\t\t[1399,\t\t3,\t\t0.0005693538555165958,\t\t0.02846769277582979,\t\t2.22, 61.69, 0.004502],\n\t\t[1400,\t\t3,\t\t3.292902158897971e-05,\t\t0.0016464510794489857,\t\t2.22, 61.69, 0.004502],\n\t\t[1401,\t\t2,\t\t0.0037280958540986705,\t\t0.18640479270493354,\t\t0, 0, 0],\n\t\t[1402,\t\t3,\t\t0.0009460030317753202,\t\t0.047300151588766014,\t\t2.22, 61.69, 0.004502],\n\t\t[1403,\t\t2,\t\t0.007617262031172502,\t\t0.38086310155862513,\t\t0, 0, 0],\n\t\t[1404,\t\t2,\t\t0.008581667499251882,\t\t0.42908337496259413,\t\t0, 0, 0],\n\t\t[1405,\t\t3,\t\t0.0013777254553245623,\t\t0.06888627276622811,\t\t2.22, 61.69, 0.004502],\n\t\t[1406,\t\t3,\t\t0.0005951329463718105,\t\t0.029756647318590523,\t\t2.22, 61.69, 0.004502],\n\t\t[1407,\t\t3,\t\t8.42762798103069e-06,\t\t0.00042138139905153457,\t\t2.22, 61.69, 0.004502],\n\t\t[1408,\t\t3,\t\t0.002615151153581973,\t\t0.13075755767909866,\t\t2.22, 61.69, 0.004502],\n\t\t[1409,\t\t3,\t\t0.0007652033584917757,\t\t0.038260167924588785,\t\t2.22, 61.69, 0.004502],\n\t\t[1410,\t\t3,\t\t0.002385192626051519,\t\t0.11925963130257596,\t\t2.22, 61.69, 0.004502],\n\t\t[1411,\t\t3,\t\t0.0025079869254713357,\t\t0.1253993462735668,\t\t2.22, 61.69, 0.004502],\n\t\t[1412,\t\t3,\t\t0.0003811825487857675,\t\t0.01905912743928838,\t\t2.22, 61.69, 0.004502],\n\t\t[1413,\t\t3,\t\t0.0003615867173212219,\t\t0.018079335866061096,\t\t2.22, 61.69, 0.004502],\n\t\t[1414,\t\t3,\t\t0.001654733253695335,\t\t0.08273666268476676,\t\t2.22, 61.69, 0.004502],\n\t\t[1415,\t\t3,\t\t0.0004745682686545623,\t\t0.023728413432728118,\t\t2.22, 61.69, 0.004502],\n\t\t[1416,\t\t3,\t\t0.0005066221121186196,\t\t0.025331105605930982,\t\t2.22, 61.69, 0.004502],\n\t\t[1417,\t\t3,\t\t7.324966052452151e-08,\t\t3.662483026226075e-06,\t\t2.22, 61.69, 0.004502],\n\t\t[1418,\t\t2,\t\t0.005619099755523237,\t\t0.28095498777616185,\t\t0, 0, 0],\n\t\t[1419,\t\t3,\t\t0.00211745485704481,\t\t0.10587274285224049,\t\t2.22, 61.69, 0.004502],\n\t\t[1420,\t\t3,\t\t8.91112970779674e-05,\t\t0.00445556485389837,\t\t2.22, 61.69, 0.004502],\n\t\t[1421,\t\t3,\t\t0.00044387476697737416,\t\t0.02219373834886871,\t\t2.22, 61.69, 0.004502],\n\t\t[1422,\t\t3,\t\t0.00030115264331514286,\t\t0.015057632165757144,\t\t2.22, 61.69, 0.004502],\n\t\t[1423,\t\t3,\t\t0.00012293234040278847,\t\t0.006146617020139425,\t\t2.22, 61.69, 0.004502],\n\t\t[1424,\t\t2,\t\t0.01394783725195249,\t\t0.6973918625976245,\t\t0, 0, 0],\n\t\t[1425,\t\t3,\t\t0.0013602274146640447,\t\t0.06801137073320224,\t\t2.22, 61.69, 0.004502],\n\t\t[1426,\t\t2,\t\t0.004377563184547638,\t\t0.2188781592273819,\t\t0, 0, 0],\n\t\t[1427,\t\t2,\t\t0.03060222784928668,\t\t1.5301113924643341,\t\t0, 0, 0],\n\t\t[1428,\t\t2,\t\t0.021319488529000553,\t\t1.0659744264500277,\t\t0, 0, 0],\n\t\t[1429,\t\t3,\t\t0.000845419991215321,\t\t0.04227099956076605,\t\t2.22, 61.69, 0.004502],\n\t\t[1430,\t\t3,\t\t1.4103786308871584e-06,\t\t7.051893154435792e-05,\t\t2.22, 61.69, 0.004502],\n\t\t[1431,\t\t2,\t\t0.014493414492796078,\t\t0.724670724639804,\t\t0, 0, 0],\n\t\t[1432,\t\t3,\t\t0.0007676953741931287,\t\t0.03838476870965644,\t\t2.22, 61.69, 0.004502],\n\t\t[1433,\t\t2,\t\t0.08207564315805406,\t\t4.103782157902703,\t\t0, 0, 0],\n\t\t[1434,\t\t2,\t\t0.004580630870615056,\t\t0.2290315435307528,\t\t0, 0, 0],\n\t\t[1435,\t\t2,\t\t0.005241557112195593,\t\t0.2620778556097797,\t\t0, 0, 0],\n\t\t[1436,\t\t2,\t\t0.006266510483771511,\t\t0.31332552418857557,\t\t0, 0, 0],\n\t\t[1437,\t\t2,\t\t0.015172047044780135,\t\t0.7586023522390068,\t\t0, 0, 0],\n\t\t[1438,\t\t2,\t\t0.025007389641183632,\t\t1.2503694820591817,\t\t0, 0, 0],\n\t\t[1439,\t\t2,\t\t0.0063091033600462575,\t\t0.3154551680023129,\t\t0, 0, 0],\n\t\t[1440,\t\t3,\t\t5.306917668409132e-05,\t\t0.0026534588342045657,\t\t2.22, 61.69, 0.004502],\n\t\t[1441,\t\t3,\t\t1.0923020560921105e-05,\t\t0.0005461510280460552,\t\t2.22, 61.69, 0.004502],\n\t\t[1442,\t\t3,\t\t4.555157486056611e-05,\t\t0.0022775787430283057,\t\t2.22, 61.69, 0.004502],\n\t\t[1443,\t\t2,\t\t0.006557506818224797,\t\t0.3278753409112398,\t\t0, 0, 0],\n\t\t[1444,\t\t3,\t\t0.0005717925297728792,\t\t0.028589626488643962,\t\t2.22, 61.69, 0.004502],\n\t\t[1445,\t\t3,\t\t0.0015938921576921367,\t\t0.07969460788460683,\t\t2.22, 61.69, 0.004502],\n\t\t[1446,\t\t2,\t\t0.04829066125331256,\t\t2.414533062665628,\t\t0, 0, 0],\n\t\t[1447,\t\t2,\t\t0.005696308888305882,\t\t0.2848154444152941,\t\t0, 0, 0],\n\t\t[1448,\t\t3,\t\t0.0002813656970216781,\t\t0.014068284851083905,\t\t2.22, 61.69, 0.004502],\n\t\t[1449,\t\t2,\t\t0.0029348829924128405,\t\t0.14674414962064206,\t\t0, 0, 0],\n\t\t[1450,\t\t2,\t\t0.003726900047088699,\t\t0.18634500235443496,\t\t0, 0, 0],\n\t\t[1451,\t\t2,\t\t0.0036467833176776375,\t\t0.18233916588388188,\t\t0, 0, 0],\n\t\t[1452,\t\t3,\t\t0.0009308941175129764,\t\t0.046544705875648816,\t\t2.22, 61.69, 0.004502],\n\t\t[1453,\t\t2,\t\t0.004134065549943135,\t\t0.20670327749715672,\t\t0, 0, 0],\n\t\t[1454,\t\t2,\t\t0.009875666531734596,\t\t0.49378332658672985,\t\t0, 0, 0],\n\t\t[1455,\t\t3,\t\t1.66950830801293e-05,\t\t0.000834754154006465,\t\t2.22, 61.69, 0.004502],\n\t\t[1456,\t\t2,\t\t0.0013664683513056725,\t\t0.06832341756528364,\t\t0, 0, 0],\n\t\t[1459,\t\t3,\t\t0.00013477613298625794,\t\t0.006738806649312897,\t\t2.22, 61.69, 0.004502],\n\t\t[1460,\t\t2,\t\t0.0037971068076197746,\t\t0.18985534038098878,\t\t0, 0, 0],\n\t\t[1461,\t\t3,\t\t0.00045503010222392685,\t\t0.022751505111196346,\t\t2.22, 61.69, 0.004502],\n\t\t[1463,\t\t3,\t\t1.810231431840124e-05,\t\t0.0009051157159200621,\t\t2.22, 61.69, 0.004502],\n\t\t[1464,\t\t2,\t\t0.013934601684842136,\t\t0.6967300842421068,\t\t0, 0, 0],\n\t\t[1466,\t\t3,\t\t0.0001450748986048064,\t\t0.00725374493024032,\t\t2.22, 61.69, 0.004502],\n\t\t[1467,\t\t3,\t\t5.434743301684746e-05,\t\t0.0027173716508423736,\t\t2.22, 61.69, 0.004502],\n\t\t[1468,\t\t3,\t\t0.0006047748176593424,\t\t0.03023874088296712,\t\t2.22, 61.69, 0.004502],\n\t\t[1469,\t\t2,\t\t0.003233867943910748,\t\t0.16169339719553738,\t\t0, 0, 0],\n\t\t[1470,\t\t2,\t\t0.005027084884666319,\t\t0.2513542442333159,\t\t0, 0, 0],\n\t\t[1471,\t\t2,\t\t0.010132763321185349,\t\t0.5066381660592674,\t\t0, 0, 0],\n\t\t[1472,\t\t3,\t\t0.00036895330016970505,\t\t0.018447665008485253,\t\t2.22, 61.69, 0.004502],\n\t\t[1473,\t\t3,\t\t0.00021195071858909128,\t\t0.010597535929454565,\t\t2.22, 61.69, 0.004502],\n\t\t[1474,\t\t3,\t\t3.568357370609641e-05,\t\t0.0017841786853048205,\t\t2.22, 61.69, 0.004502],\n\t\t[1475,\t\t3,\t\t9.952961021421813e-06,\t\t0.0004976480510710907,\t\t2.22, 61.69, 0.004502],\n\t\t[1476,\t\t2,\t\t0.015946059282369706,\t\t0.7973029641184852,\t\t0, 0, 0],\n\t\t[1477,\t\t3,\t\t0.0007717725169969112,\t\t0.03858862584984556,\t\t2.22, 61.69, 0.004502],\n\t\t[1479,\t\t3,\t\t0.00035603636123413484,\t\t0.01780181806170674,\t\t2.22, 61.69, 0.004502],\n\t\t[1480,\t\t3,\t\t0.0011893307912248102,\t\t0.05946653956124052,\t\t2.22, 61.69, 0.004502],\n\t\t[1481,\t\t3,\t\t3.3833873695351113e-06,\t\t0.00016916936847675558,\t\t2.22, 61.69, 0.004502],\n\t\t[1482,\t\t3,\t\t0.0011147740798471094,\t\t0.055738703992355476,\t\t2.22, 61.69, 0.004502],\n\t\t[1483,\t\t3,\t\t9.504850518132428e-05,\t\t0.004752425259066214,\t\t2.22, 61.69, 0.004502],\n\t\t[1484,\t\t3,\t\t9.303002951875421e-07,\t\t4.651501475937711e-05,\t\t2.22, 61.69, 0.004502],\n\t\t[1485,\t\t3,\t\t1.7528399459215098e-05,\t\t0.000876419972960755,\t\t2.22, 61.69, 0.004502],\n\t\t[1486,\t\t3,\t\t9.018017162430775e-05,\t\t0.0045090085812153876,\t\t2.22, 61.69, 0.004502],\n\t\t[1487,\t\t3,\t\t7.276038526853737e-05,\t\t0.0036380192634268686,\t\t2.22, 61.69, 0.004502],\n\t\t[1488,\t\t3,\t\t0.00022382432076245898,\t\t0.01119121603812295,\t\t2.22, 61.69, 0.004502],\n\t\t[1489,\t\t3,\t\t3.0263189463062935e-06,\t\t0.0001513159473153147,\t\t2.22, 61.69, 0.004502],\n\t\t[1490,\t\t2,\t\t0.04905115781427449,\t\t2.4525578907137247,\t\t0, 0, 0],\n\t\t[1491,\t\t2,\t\t0.005387257187745477,\t\t0.26936285938727383,\t\t0, 0, 0],\n\t\t[1492,\t\t2,\t\t0.014637639488319377,\t\t0.7318819744159688,\t\t0, 0, 0],\n\t\t[1493,\t\t2,\t\t0.005319414988695112,\t\t0.26597074943475557,\t\t0, 0, 0],\n\t\t[1494,\t\t2,\t\t0.0257504251653254,\t\t1.28752125826627,\t\t0, 0, 0],\n\t\t[1495,\t\t2,\t\t0.004260305180484296,\t\t0.2130152590242148,\t\t0, 0, 0],\n\t\t[1496,\t\t3,\t\t1.641562267503393e-08,\t\t8.207811337516965e-07,\t\t2.22, 61.69, 0.004502],\n\t\t[1497,\t\t2,\t\t0.005670372667342641,\t\t0.28351863336713207,\t\t0, 0, 0],\n\t\t[1498,\t\t2,\t\t0.006735488235440387,\t\t0.3367744117720194,\t\t0, 0, 0],\n\t\t[1499,\t\t3,\t\t0.00014557430965896176,\t\t0.0072787154829480885,\t\t2.22, 61.69, 0.004502],\n\t\t[1500,\t\t3,\t\t9.284328907409222e-06,\t\t0.0004642164453704611,\t\t2.22, 61.69, 0.004502],\n\t\t[1501,\t\t3,\t\t0.00037483587777994396,\t\t0.018741793888997202,\t\t2.22, 61.69, 0.004502],\n\t\t[1502,\t\t3,\t\t3.9491818320371174e-05,\t\t0.0019745909160185583,\t\t2.22, 61.69, 0.004502],\n\t\t[1503,\t\t3,\t\t0.0029266803181735935,\t\t0.14633401590867967,\t\t2.22, 61.69, 0.004502],\n\t\t[1504,\t\t2,\t\t0.012020835078490423,\t\t0.6010417539245212,\t\t0, 0, 0],\n\t\t[1505,\t\t3,\t\t0.0017039709532498102,\t\t0.08519854766249052,\t\t2.22, 61.69, 0.004502],\n\t\t[1506,\t\t2,\t\t0.0035909631390018642,\t\t0.17954815695009319,\t\t0, 0, 0],\n\t\t[1507,\t\t3,\t\t0.000982816273068341,\t\t0.04914081365341705,\t\t2.22, 61.69, 0.004502],\n\t\t[1508,\t\t3,\t\t4.154538017488063e-06,\t\t0.00020772690087440316,\t\t2.22, 61.69, 0.004502],\n\t\t[1510,\t\t2,\t\t0.00681234986437375,\t\t0.34061749321868756,\t\t0, 0, 0],\n\t\t[1511,\t\t2,\t\t0.00988173435818505,\t\t0.4940867179092525,\t\t0, 0, 0],\n\t\t[1512,\t\t2,\t\t0.004082645917281524,\t\t0.20413229586407625,\t\t0, 0, 0],\n\t\t[1513,\t\t3,\t\t0.001467522271804366,\t\t0.07337611359021831,\t\t2.22, 61.69, 0.004502],\n\t\t[1514,\t\t3,\t\t8.434708679035484e-07,\t\t4.217354339517742e-05,\t\t2.22, 61.69, 0.004502],\n\t\t[1516,\t\t3,\t\t1.8340973111507537e-06,\t\t9.170486555753769e-05,\t\t2.22, 61.69, 0.004502],\n\t\t[1517,\t\t3,\t\t8.192048507877762e-05,\t\t0.0040960242539388805,\t\t2.22, 61.69, 0.004502],\n\t\t[1518,\t\t3,\t\t1.7149947944714273e-05,\t\t0.0008574973972357136,\t\t2.22, 61.69, 0.004502],\n\t\t[1519,\t\t3,\t\t1.1903058584033917e-06,\t\t5.951529292016959e-05,\t\t2.22, 61.69, 0.004502]\n\t])\n\tppc[\"branch_switch\"] = array([\n\t\t[586,\t\t1,\t\t0\t\t],\n\t\t[589,\t\t108,\t\t0\t\t],\n\t\t[590,\t\t108,\t\t0\t\t],\n\t\t[593,\t\t112,\t\t0\t\t],\n\t\t[595,\t\t115,\t\t0\t\t],\n\t\t[598,\t\t118,\t\t0\t\t],\n\t\t[599,\t\t119,\t\t0\t\t],\n\t\t[602,\t\t121,\t\t0\t\t],\n\t\t[603,\t\t526,\t\t0\t\t],\n\t\t[607,\t\t127,\t\t0\t\t],\n\t\t[608,\t\t127,\t\t0\t\t],\n\t\t[609,\t\t529,\t\t0\t\t],\n\t\t[612,\t\t493,\t\t0\t\t],\n\t\t[614,\t\t130,\t\t0\t\t],\n\t\t[616,\t\t132,\t\t0\t\t],\n\t\t[617,\t\t133,\t\t0\t\t],\n\t\t[618,\t\t133,\t\t0\t\t],\n\t\t[619,\t\t134,\t\t0\t\t],\n\t\t[624,\t\t14,\t\t0\t\t],\n\t\t[629,\t\t145,\t\t0\t\t],\n\t\t[632,\t\t145,\t\t0\t\t],\n\t\t[637,\t\t148,\t\t0\t\t],\n\t\t[638,\t\t149,\t\t0\t\t],\n\t\t[640,\t\t153,\t\t0\t\t],\n\t\t[641,\t\t155,\t\t0\t\t],\n\t\t[642,\t\t533,\t\t0\t\t],\n\t\t[643,\t\t534,\t\t0\t\t],\n\t\t[647,\t\t536,\t\t0\t\t],\n\t\t[652,\t\t167,\t\t0\t\t],\n\t\t[655,\t\t170,\t\t0\t\t],\n\t\t[663,\t\t178,\t\t0\t\t],\n\t\t[666,\t\t180,\t\t0\t\t],\n\t\t[670,\t\t183,\t\t0\t\t],\n\t\t[672,\t\t185,\t\t0\t\t],\n\t\t[676,\t\t19,\t\t0\t\t],\n\t\t[681,\t\t197,\t\t0\t\t],\n\t\t[683,\t\t200,\t\t0\t\t],\n\t\t[687,\t\t202,\t\t0\t\t],\n\t\t[694,\t\t21,\t\t0\t\t],\n\t\t[695,\t\t210,\t\t0\t\t],\n\t\t[697,\t\t211,\t\t0\t\t],\n\t\t[698,\t\t212,\t\t0\t\t],\n\t\t[702,\t\t215,\t\t0\t\t],\n\t\t[705,\t\t217,\t\t0\t\t],\n\t\t[707,\t\t219,\t\t0\t\t],\n\t\t[714,\t\t225,\t\t0\t\t],\n\t\t[716,\t\t226,\t\t0\t\t],\n\t\t[717,\t\t227,\t\t0\t\t],\n\t\t[722,\t\t545,\t\t0\t\t],\n\t\t[724,\t\t238,\t\t0\t\t],\n\t\t[730,\t\t547,\t\t0\t\t],\n\t\t[732,\t\t247,\t\t0\t\t],\n\t\t[735,\t\t253,\t\t0\t\t],\n\t\t[741,\t\t264,\t\t0\t\t],\n\t\t[742,\t\t264,\t\t0\t\t],\n\t\t[743,\t\t500,\t\t0\t\t],\n\t\t[747,\t\t273,\t\t0\t\t],\n\t\t[749,\t\t274,\t\t0\t\t],\n\t\t[750,\t\t557,\t\t0\t\t],\n\t\t[753,\t\t28,\t\t0\t\t],\n\t\t[761,\t\t288,\t\t0\t\t],\n\t\t[762,\t\t289,\t\t0\t\t],\n\t\t[765,\t\t560,\t\t0\t\t],\n\t\t[767,\t\t292,\t\t0\t\t],\n\t\t[772,\t\t3,\t\t0\t\t],\n\t\t[774,\t\t300,\t\t0\t\t],\n\t\t[777,\t\t300,\t\t0\t\t],\n\t\t[778,\t\t300,\t\t0\t\t],\n\t\t[781,\t\t303,\t\t0\t\t],\n\t\t[784,\t\t563,\t\t0\t\t],\n\t\t[785,\t\t501,\t\t0\t\t],\n\t\t[788,\t\t311,\t\t0\t\t],\n\t\t[789,\t\t565,\t\t0\t\t],\n\t\t[791,\t\t314,\t\t0\t\t],\n\t\t[792,\t\t316,\t\t0\t\t],\n\t\t[795,\t\t319,\t\t0\t\t],\n\t\t[800,\t\t326,\t\t0\t\t],\n\t\t[801,\t\t327,\t\t0\t\t],\n\t\t[802,\t\t327,\t\t0\t\t],\n\t\t[805,\t\t328,\t\t0\t\t],\n\t\t[806,\t\t328,\t\t0\t\t],\n\t\t[808,\t\t329,\t\t0\t\t],\n\t\t[809,\t\t329,\t\t0\t\t],\n\t\t[811,\t\t568,\t\t0\t\t],\n\t\t[814,\t\t570,\t\t0\t\t],\n\t\t[816,\t\t335,\t\t0\t\t],\n\t\t[817,\t\t571,\t\t0\t\t],\n\t\t[821,\t\t338,\t\t0\t\t],\n\t\t[826,\t\t339,\t\t0\t\t],\n\t\t[834,\t\t572,\t\t0\t\t],\n\t\t[835,\t\t572,\t\t0\t\t],\n\t\t[836,\t\t572,\t\t0\t\t],\n\t\t[837,\t\t350,\t\t0\t\t],\n\t\t[839,\t\t350,\t\t0\t\t],\n\t\t[841,\t\t573,\t\t0\t\t],\n\t\t[843,\t\t352,\t\t0\t\t],\n\t\t[844,\t\t352,\t\t0\t\t],\n\t\t[850,\t\t574,\t\t0\t\t],\n\t\t[851,\t\t575,\t\t0\t\t],\n\t\t[853,\t\t362,\t\t0\t\t],\n\t\t[856,\t\t363,\t\t0\t\t],\n\t\t[857,\t\t365,\t\t0\t\t],\n\t\t[858,\t\t368,\t\t0\t\t],\n\t\t[860,\t\t371,\t\t0\t\t],\n\t\t[865,\t\t375,\t\t0\t\t],\n\t\t[867,\t\t376,\t\t0\t\t],\n\t\t[869,\t\t503,\t\t0\t\t],\n\t\t[870,\t\t503,\t\t0\t\t],\n\t\t[872,\t\t378,\t\t0\t\t],\n\t\t[874,\t\t576,\t\t0\t\t],\n\t\t[875,\t\t381,\t\t0\t\t],\n\t\t[882,\t\t388,\t\t0\t\t],\n\t\t[883,\t\t388,\t\t0\t\t],\n\t\t[885,\t\t393,\t\t0\t\t],\n\t\t[886,\t\t394,\t\t0\t\t],\n\t\t[889,\t\t397,\t\t0\t\t],\n\t\t[890,\t\t40,\t\t0\t\t],\n\t\t[893,\t\t400,\t\t0\t\t],\n\t\t[894,\t\t400,\t\t0\t\t],\n\t\t[895,\t\t580,\t\t0\t\t],\n\t\t[896,\t\t581,\t\t0\t\t],\n\t\t[898,\t\t403,\t\t0\t\t],\n\t\t[902,\t\t405,\t\t0\t\t],\n\t\t[903,\t\t406,\t\t0\t\t],\n\t\t[905,\t\t413,\t\t0\t\t],\n\t\t[906,\t\t414,\t\t0\t\t],\n\t\t[907,\t\t583,\t\t0\t\t],\n\t\t[909,\t\t417,\t\t0\t\t],\n\t\t[917,\t\t43,\t\t0\t\t],\n\t\t[918,\t\t424,\t\t0\t\t],\n\t\t[920,\t\t428,\t\t0\t\t],\n\t\t[921,\t\t428,\t\t0\t\t],\n\t\t[922,\t\t429,\t\t0\t\t],\n\t\t[923,\t\t432,\t\t0\t\t],\n\t\t[925,\t\t44,\t\t0\t\t],\n\t\t[931,\t\t439,\t\t0\t\t],\n\t\t[936,\t\t445,\t\t0\t\t],\n\t\t[937,\t\t447,\t\t0\t\t],\n\t\t[939,\t\t450,\t\t0\t\t],\n\t\t[940,\t\t451,\t\t0\t\t],\n\t\t[944,\t\t458,\t\t0\t\t],\n\t\t[950,\t\t462,\t\t0\t\t],\n\t\t[952,\t\t47,\t\t0\t\t],\n\t\t[958,\t\t478,\t\t0\t\t],\n\t\t[959,\t\t478,\t\t0\t\t],\n\t\t[960,\t\t479,\t\t0\t\t],\n\t\t[963,\t\t481,\t\t0\t\t],\n\t\t[965,\t\t49,\t\t0\t\t],\n\t\t[967,\t\t49,\t\t0\t\t],\n\t\t[969,\t\t486,\t\t0\t\t],\n\t\t[971,\t\t51,\t\t0\t\t],\n\t\t[978,\t\t491,\t\t0\t\t],\n\t\t[982,\t\t62,\t\t0\t\t],\n\t\t[983,\t\t62,\t\t0\t\t],\n\t\t[984,\t\t63,\t\t0\t\t],\n\t\t[985,\t\t63,\t\t0\t\t],\n\t\t[986,\t\t64,\t\t0\t\t],\n\t\t[987,\t\t65,\t\t0\t\t],\n\t\t[988,\t\t66,\t\t0\t\t],\n\t\t[993,\t\t67,\t\t0\t\t],\n\t\t[994,\t\t67,\t\t0\t\t],\n\t\t[995,\t\t509,\t\t0\t\t],\n\t\t[997,\t\t510,\t\t0\t\t],\n\t\t[999,\t\t70,\t\t0\t\t],\n\t\t[1002,\t\t71,\t\t0\t\t],\n\t\t[1007,\t\t511,\t\t0\t\t],\n\t\t[1010,\t\t79,\t\t0\t\t],\n\t\t[1011,\t\t79,\t\t0\t\t],\n\t\t[1012,\t\t81,\t\t0\t\t],\n\t\t[1014,\t\t83,\t\t0\t\t],\n\t\t[1027,\t\t218,\t\t0\t\t],\n\t\t[1028,\t\t221,\t\t0\t\t],\n\t\t[1029,\t\t268,\t\t0\t\t],\n\t\t[1030,\t\t269,\t\t0\t\t],\n\t\t[1031,\t\t498,\t\t0\t\t],\n\t\t[1032,\t\t1,\t\t0\t\t],\n\t\t[1033,\t\t3,\t\t0\t\t],\n\t\t[1034,\t\t4,\t\t0\t\t],\n\t\t[1035,\t\t6,\t\t0\t\t],\n\t\t[1036,\t\t7,\t\t0\t\t],\n\t\t[1037,\t\t8,\t\t0\t\t],\n\t\t[1038,\t\t9,\t\t0\t\t],\n\t\t[1039,\t\t11,\t\t0\t\t],\n\t\t[1040,\t\t14,\t\t0\t\t],\n\t\t[1041,\t\t16,\t\t0\t\t],\n\t\t[1042,\t\t17,\t\t0\t\t],\n\t\t[1043,\t\t19,\t\t0\t\t],\n\t\t[1044,\t\t21,\t\t0\t\t],\n\t\t[1045,\t\t23,\t\t0\t\t],\n\t\t[1046,\t\t25,\t\t0\t\t],\n\t\t[1047,\t\t27,\t\t0\t\t],\n\t\t[1048,\t\t28,\t\t0\t\t],\n\t\t[1049,\t\t29,\t\t0\t\t],\n\t\t[1050,\t\t31,\t\t0\t\t],\n\t\t[1051,\t\t33,\t\t0\t\t],\n\t\t[1052,\t\t34,\t\t0\t\t],\n\t\t[1053,\t\t35,\t\t0\t\t],\n\t\t[1054,\t\t36,\t\t0\t\t],\n\t\t[1055,\t\t38,\t\t0\t\t],\n\t\t[1056,\t\t39,\t\t0\t\t],\n\t\t[1057,\t\t40,\t\t0\t\t],\n\t\t[1058,\t\t41,\t\t0\t\t],\n\t\t[1059,\t\t43,\t\t0\t\t],\n\t\t[1060,\t\t44,\t\t0\t\t],\n\t\t[1061,\t\t45,\t\t0\t\t],\n\t\t[1062,\t\t47,\t\t0\t\t],\n\t\t[1063,\t\t48,\t\t0\t\t],\n\t\t[1064,\t\t49,\t\t0\t\t],\n\t\t[1065,\t\t50,\t\t0\t\t],\n\t\t[1066,\t\t51,\t\t0\t\t],\n\t\t[1067,\t\t53,\t\t0\t\t],\n\t\t[1068,\t\t54,\t\t0\t\t],\n\t\t[1069,\t\t55,\t\t0\t\t],\n\t\t[1070,\t\t57,\t\t0\t\t],\n\t\t[1071,\t\t58,\t\t0\t\t],\n\t\t[1072,\t\t59,\t\t0\t\t],\n\t\t[1073,\t\t60,\t\t0\t\t],\n\t\t[1074,\t\t62,\t\t0\t\t],\n\t\t[1075,\t\t63,\t\t0\t\t],\n\t\t[1076,\t\t64,\t\t0\t\t],\n\t\t[1077,\t\t65,\t\t0\t\t],\n\t\t[1078,\t\t66,\t\t0\t\t],\n\t\t[1079,\t\t67,\t\t0\t\t],\n\t\t[1080,\t\t70,\t\t0\t\t],\n\t\t[1081,\t\t71,\t\t0\t\t],\n\t\t[1082,\t\t72,\t\t0\t\t],\n\t\t[1083,\t\t73,\t\t0\t\t],\n\t\t[1084,\t\t75,\t\t0\t\t],\n\t\t[1085,\t\t76,\t\t0\t\t],\n\t\t[1086,\t\t77,\t\t0\t\t],\n\t\t[1087,\t\t79,\t\t0\t\t],\n\t\t[1088,\t\t80,\t\t0\t\t],\n\t\t[1089,\t\t81,\t\t0\t\t],\n\t\t[1090,\t\t82,\t\t0\t\t],\n\t\t[1091,\t\t83,\t\t0\t\t],\n\t\t[1092,\t\t84,\t\t0\t\t],\n\t\t[1093,\t\t85,\t\t0\t\t],\n\t\t[1096,\t\t90,\t\t0\t\t],\n\t\t[1097,\t\t91,\t\t0\t\t],\n\t\t[1098,\t\t92,\t\t0\t\t],\n\t\t[1099,\t\t93,\t\t0\t\t],\n\t\t[1100,\t\t97,\t\t0\t\t],\n\t\t[1101,\t\t98,\t\t0\t\t],\n\t\t[1102,\t\t101,\t\t0\t\t],\n\t\t[1103,\t\t102,\t\t0\t\t],\n\t\t[1105,\t\t108,\t\t0\t\t],\n\t\t[1106,\t\t109,\t\t0\t\t],\n\t\t[1107,\t\t110,\t\t0\t\t],\n\t\t[1108,\t\t111,\t\t0\t\t],\n\t\t[1109,\t\t112,\t\t0\t\t],\n\t\t[1110,\t\t113,\t\t0\t\t],\n\t\t[1111,\t\t114,\t\t0\t\t],\n\t\t[1113,\t\t116,\t\t0\t\t],\n\t\t[1114,\t\t118,\t\t0\t\t],\n\t\t[1115,\t\t119,\t\t0\t\t],\n\t\t[1116,\t\t121,\t\t0\t\t],\n\t\t[1117,\t\t122,\t\t0\t\t],\n\t\t[1118,\t\t126,\t\t0\t\t],\n\t\t[1119,\t\t127,\t\t0\t\t],\n\t\t[1120,\t\t130,\t\t0\t\t],\n\t\t[1121,\t\t131,\t\t0\t\t],\n\t\t[1122,\t\t132,\t\t0\t\t],\n\t\t[1123,\t\t133,\t\t0\t\t],\n\t\t[1124,\t\t134,\t\t0\t\t],\n\t\t[1125,\t\t135,\t\t0\t\t],\n\t\t[1126,\t\t136,\t\t0\t\t],\n\t\t[1127,\t\t137,\t\t0\t\t],\n\t\t[1128,\t\t139,\t\t0\t\t],\n\t\t[1129,\t\t140,\t\t0\t\t],\n\t\t[1130,\t\t141,\t\t0\t\t],\n\t\t[1131,\t\t142,\t\t0\t\t],\n\t\t[1133,\t\t145,\t\t0\t\t],\n\t\t[1134,\t\t146,\t\t0\t\t],\n\t\t[1135,\t\t147,\t\t0\t\t],\n\t\t[1136,\t\t148,\t\t0\t\t],\n\t\t[1137,\t\t149,\t\t0\t\t],\n\t\t[1138,\t\t150,\t\t0\t\t],\n\t\t[1139,\t\t151,\t\t0\t\t],\n\t\t[1140,\t\t152,\t\t0\t\t],\n\t\t[1142,\t\t154,\t\t0\t\t],\n\t\t[1143,\t\t155,\t\t0\t\t],\n\t\t[1144,\t\t158,\t\t0\t\t],\n\t\t[1145,\t\t161,\t\t0\t\t],\n\t\t[1146,\t\t162,\t\t0\t\t],\n\t\t[1147,\t\t163,\t\t0\t\t],\n\t\t[1148,\t\t164,\t\t0\t\t],\n\t\t[1149,\t\t166,\t\t0\t\t],\n\t\t[1150,\t\t167,\t\t0\t\t],\n\t\t[1151,\t\t168,\t\t0\t\t],\n\t\t[1152,\t\t169,\t\t0\t\t],\n\t\t[1155,\t\t172,\t\t0\t\t],\n\t\t[1157,\t\t174,\t\t0\t\t],\n\t\t[1160,\t\t177,\t\t0\t\t],\n\t\t[1161,\t\t178,\t\t0\t\t],\n\t\t[1162,\t\t179,\t\t0\t\t],\n\t\t[1163,\t\t180,\t\t0\t\t],\n\t\t[1164,\t\t181,\t\t0\t\t],\n\t\t[1165,\t\t182,\t\t0\t\t],\n\t\t[1166,\t\t183,\t\t0\t\t],\n\t\t[1168,\t\t186,\t\t0\t\t],\n\t\t[1169,\t\t187,\t\t0\t\t],\n\t\t[1171,\t\t189,\t\t0\t\t],\n\t\t[1172,\t\t190,\t\t0\t\t],\n\t\t[1173,\t\t192,\t\t0\t\t],\n\t\t[1175,\t\t194,\t\t0\t\t],\n\t\t[1176,\t\t196,\t\t0\t\t],\n\t\t[1177,\t\t197,\t\t0\t\t],\n\t\t[1178,\t\t198,\t\t0\t\t],\n\t\t[1179,\t\t199,\t\t0\t\t],\n\t\t[1181,\t\t202,\t\t0\t\t],\n\t\t[1182,\t\t203,\t\t0\t\t],\n\t\t[1183,\t\t204,\t\t0\t\t],\n\t\t[1184,\t\t205,\t\t0\t\t],\n\t\t[1186,\t\t207,\t\t0\t\t],\n\t\t[1187,\t\t208,\t\t0\t\t],\n\t\t[1188,\t\t209,\t\t0\t\t],\n\t\t[1189,\t\t210,\t\t0\t\t],\n\t\t[1190,\t\t211,\t\t0\t\t],\n\t\t[1191,\t\t212,\t\t0\t\t],\n\t\t[1192,\t\t213,\t\t0\t\t],\n\t\t[1193,\t\t214,\t\t0\t\t],\n\t\t[1194,\t\t215,\t\t0\t\t],\n\t\t[1195,\t\t216,\t\t0\t\t],\n\t\t[1196,\t\t217,\t\t0\t\t],\n\t\t[1197,\t\t218,\t\t0\t\t],\n\t\t[1198,\t\t219,\t\t0\t\t],\n\t\t[1199,\t\t221,\t\t0\t\t],\n\t\t[1200,\t\t222,\t\t0\t\t],\n\t\t[1201,\t\t223,\t\t0\t\t],\n\t\t[1202,\t\t224,\t\t0\t\t],\n\t\t[1203,\t\t225,\t\t0\t\t],\n\t\t[1204,\t\t226,\t\t0\t\t],\n\t\t[1205,\t\t227,\t\t0\t\t],\n\t\t[1206,\t\t228,\t\t0\t\t],\n\t\t[1207,\t\t229,\t\t0\t\t],\n\t\t[1208,\t\t230,\t\t0\t\t],\n\t\t[1209,\t\t234,\t\t0\t\t],\n\t\t[1210,\t\t235,\t\t0\t\t],\n\t\t[1211,\t\t237,\t\t0\t\t],\n\t\t[1212,\t\t238,\t\t0\t\t],\n\t\t[1213,\t\t239,\t\t0\t\t],\n\t\t[1214,\t\t240,\t\t0\t\t],\n\t\t[1215,\t\t241,\t\t0\t\t],\n\t\t[1216,\t\t242,\t\t0\t\t],\n\t\t[1217,\t\t243,\t\t0\t\t],\n\t\t[1218,\t\t244,\t\t0\t\t],\n\t\t[1219,\t\t247,\t\t0\t\t],\n\t\t[1220,\t\t251,\t\t0\t\t],\n\t\t[1221,\t\t252,\t\t0\t\t],\n\t\t[1222,\t\t253,\t\t0\t\t],\n\t\t[1223,\t\t254,\t\t0\t\t],\n\t\t[1224,\t\t255,\t\t0\t\t],\n\t\t[1225,\t\t256,\t\t0\t\t],\n\t\t[1226,\t\t257,\t\t0\t\t],\n\t\t[1227,\t\t258,\t\t0\t\t],\n\t\t[1228,\t\t260,\t\t0\t\t],\n\t\t[1229,\t\t263,\t\t0\t\t],\n\t\t[1230,\t\t264,\t\t0\t\t],\n\t\t[1231,\t\t266,\t\t0\t\t],\n\t\t[1232,\t\t267,\t\t0\t\t],\n\t\t[1233,\t\t268,\t\t0\t\t],\n\t\t[1235,\t\t271,\t\t0\t\t],\n\t\t[1236,\t\t272,\t\t0\t\t],\n\t\t[1237,\t\t273,\t\t0\t\t],\n\t\t[1238,\t\t274,\t\t0\t\t],\n\t\t[1239,\t\t275,\t\t0\t\t],\n\t\t[1240,\t\t276,\t\t0\t\t],\n\t\t[1241,\t\t278,\t\t0\t\t],\n\t\t[1242,\t\t281,\t\t0\t\t],\n\t\t[1243,\t\t282,\t\t0\t\t],\n\t\t[1244,\t\t283,\t\t0\t\t],\n\t\t[1245,\t\t284,\t\t0\t\t],\n\t\t[1246,\t\t285,\t\t0\t\t],\n\t\t[1247,\t\t286,\t\t0\t\t],\n\t\t[1248,\t\t287,\t\t0\t\t],\n\t\t[1249,\t\t288,\t\t0\t\t],\n\t\t[1250,\t\t289,\t\t0\t\t],\n\t\t[1251,\t\t291,\t\t0\t\t],\n\t\t[1252,\t\t292,\t\t0\t\t],\n\t\t[1253,\t\t293,\t\t0\t\t],\n\t\t[1254,\t\t294,\t\t0\t\t],\n\t\t[1255,\t\t295,\t\t0\t\t],\n\t\t[1256,\t\t296,\t\t0\t\t],\n\t\t[1257,\t\t297,\t\t0\t\t],\n\t\t[1258,\t\t298,\t\t0\t\t],\n\t\t[1259,\t\t299,\t\t0\t\t],\n\t\t[1260,\t\t300,\t\t0\t\t],\n\t\t[1261,\t\t302,\t\t0\t\t],\n\t\t[1262,\t\t303,\t\t0\t\t],\n\t\t[1263,\t\t304,\t\t0\t\t],\n\t\t[1264,\t\t307,\t\t0\t\t],\n\t\t[1265,\t\t308,\t\t0\t\t],\n\t\t[1266,\t\t309,\t\t0\t\t],\n\t\t[1267,\t\t311,\t\t0\t\t],\n\t\t[1268,\t\t312,\t\t0\t\t],\n\t\t[1269,\t\t314,\t\t0\t\t],\n\t\t[1270,\t\t316,\t\t0\t\t],\n\t\t[1271,\t\t317,\t\t0\t\t],\n\t\t[1272,\t\t318,\t\t0\t\t],\n\t\t[1273,\t\t319,\t\t0\t\t],\n\t\t[1274,\t\t321,\t\t0\t\t],\n\t\t[1275,\t\t322,\t\t0\t\t],\n\t\t[1276,\t\t323,\t\t0\t\t],\n\t\t[1277,\t\t324,\t\t0\t\t],\n\t\t[1278,\t\t325,\t\t0\t\t],\n\t\t[1279,\t\t326,\t\t0\t\t],\n\t\t[1280,\t\t327,\t\t0\t\t],\n\t\t[1281,\t\t328,\t\t0\t\t],\n\t\t[1282,\t\t329,\t\t0\t\t],\n\t\t[1283,\t\t331,\t\t0\t\t],\n\t\t[1284,\t\t333,\t\t0\t\t],\n\t\t[1285,\t\t335,\t\t0\t\t],\n\t\t[1286,\t\t337,\t\t0\t\t],\n\t\t[1287,\t\t338,\t\t0\t\t],\n\t\t[1288,\t\t339,\t\t0\t\t],\n\t\t[1289,\t\t340,\t\t0\t\t],\n\t\t[1290,\t\t341,\t\t0\t\t],\n\t\t[1291,\t\t342,\t\t0\t\t],\n\t\t[1292,\t\t343,\t\t0\t\t],\n\t\t[1293,\t\t344,\t\t0\t\t],\n\t\t[1294,\t\t345,\t\t0\t\t],\n\t\t[1295,\t\t346,\t\t0\t\t],\n\t\t[1296,\t\t347,\t\t0\t\t],\n\t\t[1297,\t\t348,\t\t0\t\t],\n\t\t[1298,\t\t350,\t\t0\t\t],\n\t\t[1299,\t\t352,\t\t0\t\t],\n\t\t[1300,\t\t353,\t\t0\t\t],\n\t\t[1301,\t\t354,\t\t0\t\t],\n\t\t[1302,\t\t355,\t\t0\t\t],\n\t\t[1303,\t\t356,\t\t0\t\t],\n\t\t[1304,\t\t357,\t\t0\t\t],\n\t\t[1305,\t\t359,\t\t0\t\t],\n\t\t[1306,\t\t361,\t\t0\t\t],\n\t\t[1307,\t\t362,\t\t0\t\t],\n\t\t[1308,\t\t363,\t\t0\t\t],\n\t\t[1309,\t\t364,\t\t0\t\t],\n\t\t[1310,\t\t365,\t\t0\t\t],\n\t\t[1311,\t\t366,\t\t0\t\t],\n\t\t[1312,\t\t367,\t\t0\t\t],\n\t\t[1313,\t\t368,\t\t0\t\t],\n\t\t[1314,\t\t369,\t\t0\t\t],\n\t\t[1315,\t\t370,\t\t0\t\t],\n\t\t[1316,\t\t371,\t\t0\t\t],\n\t\t[1317,\t\t372,\t\t0\t\t],\n\t\t[1318,\t\t373,\t\t0\t\t],\n\t\t[1319,\t\t374,\t\t0\t\t],\n\t\t[1320,\t\t375,\t\t0\t\t],\n\t\t[1321,\t\t376,\t\t0\t\t],\n\t\t[1322,\t\t377,\t\t0\t\t],\n\t\t[1323,\t\t378,\t\t0\t\t],\n\t\t[1324,\t\t379,\t\t0\t\t],\n\t\t[1325,\t\t381,\t\t0\t\t],\n\t\t[1326,\t\t384,\t\t0\t\t],\n\t\t[1327,\t\t385,\t\t0\t\t],\n\t\t[1328,\t\t386,\t\t0\t\t],\n\t\t[1329,\t\t387,\t\t0\t\t],\n\t\t[1330,\t\t388,\t\t0\t\t],\n\t\t[1332,\t\t391,\t\t0\t\t],\n\t\t[1333,\t\t392,\t\t0\t\t],\n\t\t[1334,\t\t393,\t\t0\t\t],\n\t\t[1335,\t\t394,\t\t0\t\t],\n\t\t[1336,\t\t395,\t\t0\t\t],\n\t\t[1337,\t\t396,\t\t0\t\t],\n\t\t[1338,\t\t397,\t\t0\t\t],\n\t\t[1339,\t\t398,\t\t0\t\t],\n\t\t[1340,\t\t399,\t\t0\t\t],\n\t\t[1341,\t\t400,\t\t0\t\t],\n\t\t[1342,\t\t403,\t\t0\t\t],\n\t\t[1343,\t\t404,\t\t0\t\t],\n\t\t[1344,\t\t405,\t\t0\t\t],\n\t\t[1345,\t\t406,\t\t0\t\t],\n\t\t[1346,\t\t407,\t\t0\t\t],\n\t\t[1347,\t\t408,\t\t0\t\t],\n\t\t[1348,\t\t410,\t\t0\t\t],\n\t\t[1349,\t\t411,\t\t0\t\t],\n\t\t[1350,\t\t412,\t\t0\t\t],\n\t\t[1351,\t\t413,\t\t0\t\t],\n\t\t[1352,\t\t414,\t\t0\t\t],\n\t\t[1355,\t\t418,\t\t0\t\t],\n\t\t[1356,\t\t419,\t\t0\t\t],\n\t\t[1357,\t\t420,\t\t0\t\t],\n\t\t[1358,\t\t421,\t\t0\t\t],\n\t\t[1359,\t\t422,\t\t0\t\t],\n\t\t[1363,\t\t426,\t\t0\t\t],\n\t\t[1364,\t\t427,\t\t0\t\t],\n\t\t[1365,\t\t428,\t\t0\t\t],\n\t\t[1366,\t\t429,\t\t0\t\t],\n\t\t[1367,\t\t430,\t\t0\t\t],\n\t\t[1368,\t\t431,\t\t0\t\t],\n\t\t[1369,\t\t432,\t\t0\t\t],\n\t\t[1370,\t\t433,\t\t0\t\t],\n\t\t[1371,\t\t434,\t\t0\t\t],\n\t\t[1372,\t\t435,\t\t0\t\t],\n\t\t[1373,\t\t436,\t\t0\t\t],\n\t\t[1374,\t\t437,\t\t0\t\t],\n\t\t[1375,\t\t438,\t\t0\t\t],\n\t\t[1376,\t\t439,\t\t0\t\t],\n\t\t[1377,\t\t440,\t\t0\t\t],\n\t\t[1378,\t\t441,\t\t0\t\t],\n\t\t[1379,\t\t442,\t\t0\t\t],\n\t\t[1381,\t\t445,\t\t0\t\t],\n\t\t[1382,\t\t446,\t\t0\t\t],\n\t\t[1383,\t\t447,\t\t0\t\t],\n\t\t[1387,\t\t451,\t\t0\t\t],\n\t\t[1390,\t\t455,\t\t0\t\t],\n\t\t[1391,\t\t456,\t\t0\t\t],\n\t\t[1393,\t\t458,\t\t0\t\t],\n\t\t[1394,\t\t459,\t\t0\t\t],\n\t\t[1395,\t\t460,\t\t0\t\t],\n\t\t[1396,\t\t461,\t\t0\t\t],\n\t\t[1397,\t\t462,\t\t0\t\t],\n\t\t[1398,\t\t463,\t\t0\t\t],\n\t\t[1399,\t\t464,\t\t0\t\t],\n\t\t[1400,\t\t465,\t\t0\t\t],\n\t\t[1401,\t\t466,\t\t0\t\t],\n\t\t[1402,\t\t467,\t\t0\t\t],\n\t\t[1403,\t\t468,\t\t0\t\t],\n\t\t[1404,\t\t469,\t\t0\t\t],\n\t\t[1405,\t\t470,\t\t0\t\t],\n\t\t[1406,\t\t471,\t\t0\t\t],\n\t\t[1407,\t\t472,\t\t0\t\t],\n\t\t[1408,\t\t473,\t\t0\t\t],\n\t\t[1409,\t\t474,\t\t0\t\t],\n\t\t[1410,\t\t475,\t\t0\t\t],\n\t\t[1411,\t\t476,\t\t0\t\t],\n\t\t[1412,\t\t477,\t\t0\t\t],\n\t\t[1413,\t\t478,\t\t0\t\t],\n\t\t[1414,\t\t479,\t\t0\t\t],\n\t\t[1415,\t\t480,\t\t0\t\t],\n\t\t[1416,\t\t481,\t\t0\t\t],\n\t\t[1417,\t\t482,\t\t0\t\t],\n\t\t[1418,\t\t483,\t\t0\t\t],\n\t\t[1419,\t\t484,\t\t0\t\t],\n\t\t[1420,\t\t485,\t\t0\t\t],\n\t\t[1421,\t\t486,\t\t0\t\t],\n\t\t[1422,\t\t487,\t\t0\t\t],\n\t\t[1423,\t\t488,\t\t0\t\t],\n\t\t[1424,\t\t489,\t\t0\t\t],\n\t\t[1425,\t\t490,\t\t0\t\t],\n\t\t[1426,\t\t491,\t\t0\t\t],\n\t\t[1427,\t\t492,\t\t0\t\t],\n\t\t[1428,\t\t493,\t\t0\t\t],\n\t\t[1429,\t\t494,\t\t0\t\t],\n\t\t[1430,\t\t495,\t\t0\t\t],\n\t\t[1431,\t\t496,\t\t0\t\t],\n\t\t[1432,\t\t497,\t\t0\t\t],\n\t\t[1433,\t\t498,\t\t0\t\t],\n\t\t[1434,\t\t499,\t\t0\t\t],\n\t\t[1435,\t\t500,\t\t0\t\t],\n\t\t[1436,\t\t501,\t\t0\t\t],\n\t\t[1437,\t\t502,\t\t0\t\t],\n\t\t[1438,\t\t503,\t\t0\t\t],\n\t\t[1439,\t\t504,\t\t0\t\t],\n\t\t[1440,\t\t505,\t\t0\t\t],\n\t\t[1441,\t\t506,\t\t0\t\t],\n\t\t[1442,\t\t507,\t\t0\t\t],\n\t\t[1443,\t\t508,\t\t0\t\t],\n\t\t[1444,\t\t509,\t\t0\t\t],\n\t\t[1445,\t\t510,\t\t0\t\t],\n\t\t[1446,\t\t511,\t\t0\t\t],\n\t\t[1447,\t\t512,\t\t0\t\t],\n\t\t[1448,\t\t513,\t\t0\t\t],\n\t\t[1449,\t\t514,\t\t0\t\t],\n\t\t[1450,\t\t515,\t\t0\t\t],\n\t\t[1451,\t\t516,\t\t0\t\t],\n\t\t[1452,\t\t517,\t\t0\t\t],\n\t\t[1453,\t\t518,\t\t0\t\t],\n\t\t[1454,\t\t519,\t\t0\t\t],\n\t\t[1455,\t\t520,\t\t0\t\t],\n\t\t[1456,\t\t521,\t\t0\t\t],\n\t\t[1459,\t\t524,\t\t0\t\t],\n\t\t[1460,\t\t525,\t\t0\t\t],\n\t\t[1461,\t\t526,\t\t0\t\t],\n\t\t[1463,\t\t528,\t\t0\t\t],\n\t\t[1464,\t\t529,\t\t0\t\t],\n\t\t[1466,\t\t531,\t\t0\t\t],\n\t\t[1467,\t\t532,\t\t0\t\t],\n\t\t[1468,\t\t533,\t\t0\t\t],\n\t\t[1469,\t\t534,\t\t0\t\t],\n\t\t[1470,\t\t535,\t\t0\t\t],\n\t\t[1471,\t\t536,\t\t0\t\t],\n\t\t[1472,\t\t537,\t\t0\t\t],\n\t\t[1473,\t\t538,\t\t0\t\t],\n\t\t[1474,\t\t539,\t\t0\t\t],\n\t\t[1475,\t\t540,\t\t0\t\t],\n\t\t[1476,\t\t541,\t\t0\t\t],\n\t\t[1477,\t\t542,\t\t0\t\t],\n\t\t[1479,\t\t544,\t\t0\t\t],\n\t\t[1480,\t\t545,\t\t0\t\t],\n\t\t[1481,\t\t546,\t\t0\t\t],\n\t\t[1482,\t\t547,\t\t0\t\t],\n\t\t[1483,\t\t548,\t\t0\t\t],\n\t\t[1484,\t\t549,\t\t0\t\t],\n\t\t[1485,\t\t550,\t\t0\t\t],\n\t\t[1486,\t\t551,\t\t0\t\t],\n\t\t[1487,\t\t552,\t\t0\t\t],\n\t\t[1488,\t\t554,\t\t0\t\t],\n\t\t[1489,\t\t555,\t\t0\t\t],\n\t\t[1490,\t\t556,\t\t0\t\t],\n\t\t[1491,\t\t557,\t\t0\t\t],\n\t\t[1492,\t\t558,\t\t0\t\t],\n\t\t[1493,\t\t559,\t\t0\t\t],\n\t\t[1494,\t\t560,\t\t0\t\t],\n\t\t[1495,\t\t561,\t\t0\t\t],\n\t\t[1496,\t\t562,\t\t0\t\t],\n\t\t[1497,\t\t563,\t\t0\t\t],\n\t\t[1498,\t\t564,\t\t0\t\t],\n\t\t[1499,\t\t565,\t\t0\t\t],\n\t\t[1500,\t\t566,\t\t0\t\t],\n\t\t[1501,\t\t567,\t\t0\t\t],\n\t\t[1502,\t\t568,\t\t0\t\t],\n\t\t[1503,\t\t569,\t\t0\t\t],\n\t\t[1504,\t\t570,\t\t0\t\t],\n\t\t[1505,\t\t571,\t\t0\t\t],\n\t\t[1506,\t\t572,\t\t0\t\t],\n\t\t[1507,\t\t573,\t\t0\t\t],\n\t\t[1508,\t\t574,\t\t0\t\t],\n\t\t[1510,\t\t576,\t\t0\t\t],\n\t\t[1511,\t\t577,\t\t0\t\t],\n\t\t[1512,\t\t578,\t\t0\t\t],\n\t\t[1513,\t\t579,\t\t0\t\t],\n\t\t[1514,\t\t580,\t\t0\t\t],\n\t\t[1516,\t\t582,\t\t0\t\t],\n\t\t[1517,\t\t583,\t\t0\t\t],\n\t\t[1518,\t\t584,\t\t0\t\t],\n\t\t[1519,\t\t585,\t\t0\t\t],\n\t\t[1,\t\t490,\t\t0\t\t],\n\t\t[3,\t\t4,\t\t1\t\t],\n\t\t[491,\t\t6,\t\t0\t\t],\n\t\t[7,\t\t5,\t\t0\t\t],\n\t\t[8,\t\t9,\t\t0\t\t],\n\t\t[492,\t\t11,\t\t0\t\t],\n\t\t[11,\t\t493,\t\t0\t\t],\n\t\t[492,\t\t493,\t\t1\t\t],\n\t\t[494,\t\t14,\t\t0\t\t],\n\t\t[13,\t\t15,\t\t0\t\t],\n\t\t[16,\t\t5,\t\t0\t\t],\n\t\t[17,\t\t18,\t\t1\t\t],\n\t\t[17,\t\t12,\t\t0\t\t],\n\t\t[14,\t\t495,\t\t0\t\t],\n\t\t[494,\t\t19,\t\t0\t\t],\n\t\t[20,\t\t21,\t\t0\t\t],\n\t\t[20,\t\t22,\t\t1\t\t],\n\t\t[497,\t\t23,\t\t0\t\t],\n\t\t[23,\t\t499,\t\t1\t\t],\n\t\t[25,\t\t26,\t\t0\t\t],\n\t\t[25,\t\t22,\t\t0\t\t],\n\t\t[23,\t\t27,\t\t0\t\t],\n\t\t[28,\t\t23,\t\t0\t\t],\n\t\t[8,\t\t21,\t\t0\t\t],\n\t\t[9,\t\t29,\t\t0\t\t],\n\t\t[30,\t\t25,\t\t1\t\t],\n\t\t[31,\t\t32,\t\t1\t\t],\n\t\t[32,\t\t33,\t\t1\t\t],\n\t\t[34,\t\t35,\t\t0\t\t],\n\t\t[35,\t\t36,\t\t0\t\t],\n\t\t[490,\t\t6,\t\t1\t\t],\n\t\t[37,\t\t10,\t\t1\t\t],\n\t\t[10,\t\t38,\t\t0\t\t],\n\t\t[37,\t\t38,\t\t1\t\t],\n\t\t[39,\t\t40,\t\t1\t\t],\n\t\t[39,\t\t41,\t\t1\t\t],\n\t\t[42,\t\t41,\t\t1\t\t],\n\t\t[18,\t\t42,\t\t1\t\t],\n\t\t[492,\t\t43,\t\t1\t\t],\n\t\t[44,\t\t45,\t\t0\t\t],\n\t\t[44,\t\t505,\t\t0\t\t],\n\t\t[46,\t\t12,\t\t0\t\t],\n\t\t[47,\t\t48,\t\t0\t\t],\n\t\t[49,\t\t50,\t\t0\t\t],\n\t\t[31,\t\t33,\t\t1\t\t],\n\t\t[31,\t\t51,\t\t0\t\t],\n\t\t[52,\t\t53,\t\t1\t\t],\n\t\t[52,\t\t54,\t\t0\t\t],\n\t\t[506,\t\t55,\t\t0\t\t],\n\t\t[506,\t\t507,\t\t1\t\t],\n\t\t[57,\t\t506,\t\t0\t\t],\n\t\t[57,\t\t58,\t\t0\t\t],\n\t\t[58,\t\t506,\t\t0\t\t],\n\t\t[59,\t\t60,\t\t1\t\t],\n\t\t[508,\t\t62,\t\t0\t\t],\n\t\t[30,\t\t61,\t\t1\t\t],\n\t\t[63,\t\t506,\t\t0\t\t],\n\t\t[13,\t\t64,\t\t0\t\t],\n\t\t[65,\t\t66,\t\t1\t\t],\n\t\t[59,\t\t67,\t\t0\t\t],\n\t\t[61,\t\t67,\t\t0\t\t],\n\t\t[68,\t\t69,\t\t1\t\t],\n\t\t[70,\t\t69,\t\t1\t\t],\n\t\t[71,\t\t72,\t\t1\t\t],\n\t\t[73,\t\t74,\t\t1\t\t],\n\t\t[37,\t\t75,\t\t1\t\t],\n\t\t[72,\t\t75,\t\t0\t\t],\n\t\t[37,\t\t72,\t\t1\t\t],\n\t\t[76,\t\t77,\t\t1\t\t],\n\t\t[77,\t\t51,\t\t0\t\t],\n\t\t[73,\t\t72,\t\t1\t\t],\n\t\t[18,\t\t40,\t\t1\t\t],\n\t\t[492,\t\t45,\t\t1\t\t],\n\t\t[10,\t\t74,\t\t1\t\t],\n\t\t[45,\t\t511,\t\t1\t\t],\n\t\t[78,\t\t32,\t\t1\t\t],\n\t\t[79,\t\t80,\t\t0\t\t],\n\t\t[81,\t\t79,\t\t1\t\t],\n\t\t[34,\t\t82,\t\t0\t\t],\n\t\t[83,\t\t84,\t\t0\t\t],\n\t\t[83,\t\t499,\t\t0\t\t],\n\t\t[85,\t\t86,\t\t0\t\t],\n\t\t[87,\t\t86,\t\t1\t\t],\n\t\t[88,\t\t89,\t\t0\t\t],\n\t\t[90,\t\t86,\t\t1\t\t],\n\t\t[91,\t\t86,\t\t0\t\t],\n\t\t[86,\t\t92,\t\t0\t\t],\n\t\t[86,\t\t93,\t\t0\t\t],\n\t\t[94,\t\t86,\t\t1\t\t],\n\t\t[86,\t\t95,\t\t1\t\t],\n\t\t[513,\t\t517,\t\t0\t\t],\n\t\t[97,\t\t66,\t\t1\t\t],\n\t\t[42,\t\t98,\t\t0\t\t],\n\t\t[99,\t\t100,\t\t1\t\t],\n\t\t[42,\t\t101,\t\t0\t\t],\n\t\t[102,\t\t42,\t\t1\t\t],\n\t\t[103,\t\t87,\t\t0\t\t],\n\t\t[104,\t\t103,\t\t0\t\t],\n\t\t[105,\t\t87,\t\t0\t\t],\n\t\t[106,\t\t107,\t\t0\t\t],\n\t\t[108,\t\t107,\t\t0\t\t],\n\t\t[109,\t\t106,\t\t0\t\t],\n\t\t[110,\t\t111,\t\t1\t\t],\n\t\t[87,\t\t112,\t\t0\t\t],\n\t\t[113,\t\t87,\t\t0\t\t],\n\t\t[87,\t\t85,\t\t1\t\t],\n\t\t[110,\t\t114,\t\t1\t\t],\n\t\t[115,\t\t116,\t\t0\t\t],\n\t\t[117,\t\t118,\t\t0\t\t],\n\t\t[117,\t\t119,\t\t0\t\t],\n\t\t[117,\t\t120,\t\t1\t\t],\n\t\t[121,\t\t122,\t\t0\t\t],\n\t\t[123,\t\t124,\t\t0\t\t],\n\t\t[125,\t\t126,\t\t0\t\t],\n\t\t[127,\t\t119,\t\t0\t\t],\n\t\t[118,\t\t128,\t\t0\t\t],\n\t\t[121,\t\t119,\t\t0\t\t],\n\t\t[530,\t\t527,\t\t0\t\t],\n\t\t[125,\t\t130,\t\t0\t\t],\n\t\t[125,\t\t123,\t\t0\t\t],\n\t\t[131,\t\t132,\t\t0\t\t],\n\t\t[133,\t\t123,\t\t0\t\t],\n\t\t[524,\t\t134,\t\t0\t\t],\n\t\t[135,\t\t136,\t\t0\t\t],\n\t\t[123,\t\t131,\t\t0\t\t],\n\t\t[117,\t\t128,\t\t1\t\t],\n\t\t[137,\t\t521,\t\t0\t\t],\n\t\t[531,\t\t514,\t\t0\t\t],\n\t\t[139,\t\t521,\t\t0\t\t],\n\t\t[140,\t\t514,\t\t0\t\t],\n\t\t[522,\t\t141,\t\t0\t\t],\n\t\t[142,\t\t523,\t\t0\t\t],\n\t\t[530,\t\t526,\t\t0\t\t],\n\t\t[140,\t\t532,\t\t0\t\t],\n\t\t[142,\t\t144,\t\t0\t\t],\n\t\t[140,\t\t522,\t\t0\t\t],\n\t\t[145,\t\t146,\t\t0\t\t],\n\t\t[147,\t\t523,\t\t0\t\t],\n\t\t[144,\t\t523,\t\t0\t\t],\n\t\t[139,\t\t523,\t\t0\t\t],\n\t\t[140,\t\t141,\t\t0\t\t],\n\t\t[528,\t\t526,\t\t0\t\t],\n\t\t[528,\t\t148,\t\t0\t\t],\n\t\t[149,\t\t150,\t\t0\t\t],\n\t\t[145,\t\t528,\t\t0\t\t],\n\t\t[530,\t\t151,\t\t0\t\t],\n\t\t[524,\t\t152,\t\t0\t\t],\n\t\t[149,\t\t525,\t\t1\t\t],\n\t\t[139,\t\t514,\t\t0\t\t],\n\t\t[126,\t\t120,\t\t1\t\t],\n\t\t[530,\t\t153,\t\t0\t\t],\n\t\t[528,\t\t147,\t\t1\t\t],\n\t\t[528,\t\t154,\t\t0\t\t],\n\t\t[130,\t\t120,\t\t1\t\t],\n\t\t[528,\t\t155,\t\t1\t\t],\n\t\t[524,\t\t533,\t\t0\t\t],\n\t\t[524,\t\t149,\t\t0\t\t],\n\t\t[154,\t\t150,\t\t0\t\t],\n\t\t[157,\t\t110,\t\t1\t\t],\n\t\t[119,\t\t158,\t\t0\t\t],\n\t\t[159,\t\t60,\t\t0\t\t],\n\t\t[536,\t\t161,\t\t0\t\t],\n\t\t[115,\t\t151,\t\t0\t\t],\n\t\t[162,\t\t134,\t\t0\t\t],\n\t\t[115,\t\t526,\t\t0\t\t],\n\t\t[138,\t\t87,\t\t0\t\t],\n\t\t[123,\t\t163,\t\t0\t\t],\n\t\t[112,\t\t164,\t\t0\t\t],\n\t\t[112,\t\t165,\t\t0\t\t],\n\t\t[166,\t\t165,\t\t0\t\t],\n\t\t[167,\t\t537,\t\t0\t\t],\n\t\t[168,\t\t104,\t\t0\t\t],\n\t\t[531,\t\t520,\t\t0\t\t],\n\t\t[139,\t\t520,\t\t0\t\t],\n\t\t[520,\t\t169,\t\t0\t\t],\n\t\t[168,\t\t105,\t\t0\t\t],\n\t\t[520,\t\t170,\t\t0\t\t],\n\t\t[171,\t\t89,\t\t0\t\t],\n\t\t[521,\t\t172,\t\t0\t\t],\n\t\t[123,\t\t173,\t\t0\t\t],\n\t\t[521,\t\t174,\t\t0\t\t],\n\t\t[37,\t\t39,\t\t0\t\t],\n\t\t[530,\t\t175,\t\t0\t\t],\n\t\t[530,\t\t176,\t\t0\t\t],\n\t\t[88,\t\t530,\t\t0\t\t],\n\t\t[177,\t\t496,\t\t1\t\t],\n\t\t[178,\t\t525,\t\t0\t\t],\n\t\t[179,\t\t493,\t\t1\t\t],\n\t\t[180,\t\t181,\t\t1\t\t],\n\t\t[182,\t\t180,\t\t0\t\t],\n\t\t[179,\t\t181,\t\t0\t\t],\n\t\t[180,\t\t493,\t\t1\t\t],\n\t\t[183,\t\t30,\t\t0\t\t],\n\t\t[183,\t\t21,\t\t0\t\t],\n\t\t[538,\t\t185,\t\t0\t\t],\n\t\t[538,\t\t89,\t\t0\t\t],\n\t\t[184,\t\t186,\t\t0\t\t],\n\t\t[184,\t\t187,\t\t0\t\t],\n\t\t[520,\t\t172,\t\t0\t\t],\n\t\t[89,\t\t175,\t\t0\t\t],\n\t\t[185,\t\t89,\t\t0\t\t],\n\t\t[89,\t\t188,\t\t0\t\t],\n\t\t[189,\t\t190,\t\t0\t\t],\n\t\t[539,\t\t172,\t\t0\t\t],\n\t\t[504,\t\t192,\t\t0\t\t],\n\t\t[105,\t\t186,\t\t0\t\t],\n\t\t[105,\t\t187,\t\t0\t\t],\n\t\t[539,\t\t193,\t\t0\t\t],\n\t\t[187,\t\t194,\t\t0\t\t],\n\t\t[539,\t\t540,\t\t0\t\t],\n\t\t[539,\t\t196,\t\t0\t\t],\n\t\t[197,\t\t540,\t\t0\t\t],\n\t\t[110,\t\t198,\t\t0\t\t],\n\t\t[197,\t\t539,\t\t0\t\t],\n\t\t[199,\t\t537,\t\t0\t\t],\n\t\t[134,\t\t526,\t\t0\t\t],\n\t\t[200,\t\t193,\t\t0\t\t],\n\t\t[4,\t\t201,\t\t1\t\t],\n\t\t[202,\t\t86,\t\t0\t\t],\n\t\t[85,\t\t203,\t\t0\t\t],\n\t\t[147,\t\t204,\t\t0\t\t],\n\t\t[147,\t\t205,\t\t0\t\t],\n\t\t[123,\t\t206,\t\t0\t\t],\n\t\t[537,\t\t207,\t\t0\t\t],\n\t\t[165,\t\t208,\t\t0\t\t],\n\t\t[4,\t\t94,\t\t1\t\t],\n\t\t[4,\t\t2,\t\t0\t\t],\n\t\t[209,\t\t4,\t\t0\t\t],\n\t\t[119,\t\t163,\t\t0\t\t],\n\t\t[210,\t\t3,\t\t0\t\t],\n\t\t[99,\t\t211,\t\t0\t\t],\n\t\t[99,\t\t69,\t\t1\t\t],\n\t\t[212,\t\t99,\t\t0\t\t],\n\t\t[213,\t\t214,\t\t0\t\t],\n\t\t[510,\t\t215,\t\t0\t\t],\n\t\t[128,\t\t69,\t\t1\t\t],\n\t\t[216,\t\t69,\t\t1\t\t],\n\t\t[217,\t\t98,\t\t0\t\t],\n\t\t[504,\t\t218,\t\t0\t\t],\n\t\t[177,\t\t504,\t\t1\t\t],\n\t\t[219,\t\t209,\t\t0\t\t],\n\t\t[219,\t\t220,\t\t0\t\t],\n\t\t[94,\t\t95,\t\t1\t\t],\n\t\t[159,\t\t221,\t\t1\t\t],\n\t\t[34,\t\t161,\t\t0\t\t],\n\t\t[222,\t\t221,\t\t0\t\t],\n\t\t[211,\t\t52,\t\t1\t\t],\n\t\t[215,\t\t223,\t\t1\t\t],\n\t\t[224,\t\t215,\t\t0\t\t],\n\t\t[225,\t\t224,\t\t1\t\t],\n\t\t[224,\t\t223,\t\t0\t\t],\n\t\t[226,\t\t6,\t\t0\t\t],\n\t\t[7,\t\t3,\t\t1\t\t],\n\t\t[216,\t\t227,\t\t1\t\t],\n\t\t[228,\t\t229,\t\t0\t\t],\n\t\t[227,\t\t230,\t\t0\t\t],\n\t\t[231,\t\t53,\t\t1\t\t],\n\t\t[544,\t\t545,\t\t0\t\t],\n\t\t[234,\t\t235,\t\t1\t\t],\n\t\t[546,\t\t214,\t\t1\t\t],\n\t\t[233,\t\t227,\t\t0\t\t],\n\t\t[237,\t\t238,\t\t0\t\t],\n\t\t[212,\t\t100,\t\t0\t\t],\n\t\t[519,\t\t239,\t\t0\t\t],\n\t\t[238,\t\t519,\t\t0\t\t],\n\t\t[213,\t\t240,\t\t0\t\t],\n\t\t[241,\t\t242,\t\t1\t\t],\n\t\t[70,\t\t241,\t\t0\t\t],\n\t\t[509,\t\t213,\t\t0\t\t],\n\t\t[68,\t\t243,\t\t0\t\t],\n\t\t[243,\t\t244,\t\t0\t\t],\n\t\t[68,\t\t244,\t\t0\t\t],\n\t\t[544,\t\t547,\t\t1\t\t],\n\t\t[245,\t\t227,\t\t1\t\t],\n\t\t[246,\t\t208,\t\t0\t\t],\n\t\t[112,\t\t208,\t\t0\t\t],\n\t\t[165,\t\t247,\t\t0\t\t],\n\t\t[537,\t\t549,\t\t0\t\t],\n\t\t[537,\t\t550,\t\t0\t\t],\n\t\t[537,\t\t551,\t\t0\t\t],\n\t\t[110,\t\t251,\t\t0\t\t],\n\t\t[510,\t\t252,\t\t1\t\t],\n\t\t[529,\t\t253,\t\t1\t\t],\n\t\t[237,\t\t239,\t\t1\t\t],\n\t\t[254,\t\t238,\t\t1\t\t],\n\t\t[69,\t\t255,\t\t0\t\t],\n\t\t[510,\t\t225,\t\t1\t\t],\n\t\t[256,\t\t257,\t\t0\t\t],\n\t\t[258,\t\t190,\t\t0\t\t],\n\t\t[258,\t\t259,\t\t0\t\t],\n\t\t[260,\t\t261,\t\t1\t\t],\n\t\t[554,\t\t553,\t\t1\t\t],\n\t\t[515,\t\t263,\t\t0\t\t],\n\t\t[14,\t\t264,\t\t1\t\t],\n\t\t[116,\t\t555,\t\t0\t\t],\n\t\t[151,\t\t116,\t\t0\t\t],\n\t\t[111,\t\t114,\t\t1\t\t],\n\t\t[77,\t\t111,\t\t0\t\t],\n\t\t[266,\t\t525,\t\t0\t\t],\n\t\t[267,\t\t120,\t\t1\t\t],\n\t\t[268,\t\t269,\t\t0\t\t],\n\t\t[556,\t\t271,\t\t0\t\t],\n\t\t[556,\t\t272,\t\t0\t\t],\n\t\t[529,\t\t273,\t\t0\t\t],\n\t\t[128,\t\t274,\t\t0\t\t],\n\t\t[34,\t\t275,\t\t0\t\t],\n\t\t[503,\t\t276,\t\t0\t\t],\n\t\t[503,\t\t504,\t\t1\t\t],\n\t\t[177,\t\t218,\t\t1\t\t],\n\t\t[277,\t\t278,\t\t1\t\t],\n\t\t[557,\t\t558,\t\t1\t\t],\n\t\t[557,\t\t559,\t\t1\t\t],\n\t\t[559,\t\t558,\t\t1\t\t],\n\t\t[277,\t\t78,\t\t1\t\t],\n\t\t[277,\t\t279,\t\t1\t\t],\n\t\t[78,\t\t279,\t\t0\t\t],\n\t\t[281,\t\t282,\t\t0\t\t],\n\t\t[283,\t\t161,\t\t1\t\t],\n\t\t[268,\t\t161,\t\t1\t\t],\n\t\t[256,\t\t284,\t\t0\t\t],\n\t\t[515,\t\t516,\t\t1\t\t],\n\t\t[263,\t\t516,\t\t0\t\t],\n\t\t[516,\t\t285,\t\t0\t\t],\n\t\t[63,\t\t286,\t\t0\t\t],\n\t\t[287,\t\t516,\t\t0\t\t],\n\t\t[8,\t\t102,\t\t1\t\t],\n\t\t[8,\t\t101,\t\t1\t\t],\n\t\t[80,\t\t288,\t\t0\t\t],\n\t\t[80,\t\t289,\t\t0\t\t],\n\t\t[276,\t\t560,\t\t0\t\t],\n\t\t[37,\t\t290,\t\t0\t\t],\n\t\t[290,\t\t74,\t\t1\t\t],\n\t\t[512,\t\t291,\t\t0\t\t],\n\t\t[78,\t\t292,\t\t1\t\t],\n\t\t[199,\t\t548,\t\t0\t\t],\n\t\t[491,\t\t293,\t\t0\t\t],\n\t\t[4,\t\t294,\t\t0\t\t],\n\t\t[490,\t\t541,\t\t1\t\t],\n\t\t[491,\t\t295,\t\t0\t\t],\n\t\t[491,\t\t296,\t\t0\t\t],\n\t\t[295,\t\t297,\t\t0\t\t],\n\t\t[508,\t\t161,\t\t0\t\t],\n\t\t[117,\t\t123,\t\t0\t\t],\n\t\t[133,\t\t117,\t\t0\t\t],\n\t\t[71,\t\t74,\t\t1\t\t],\n\t\t[74,\t\t278,\t\t1\t\t],\n\t\t[298,\t\t515,\t\t0\t\t],\n\t\t[5,\t\t299,\t\t0\t\t],\n\t\t[32,\t\t292,\t\t1\t\t],\n\t\t[5,\t\t29,\t\t1\t\t],\n\t\t[503,\t\t560,\t\t0\t\t],\n\t\t[300,\t\t301,\t\t1\t\t],\n\t\t[51,\t\t300,\t\t0\t\t],\n\t\t[244,\t\t302,\t\t1\t\t],\n\t\t[31,\t\t302,\t\t1\t\t],\n\t\t[51,\t\t282,\t\t1\t\t],\n\t\t[303,\t\t304,\t\t0\t\t],\n\t\t[305,\t\t304,\t\t0\t\t],\n\t\t[305,\t\t259,\t\t0\t\t],\n\t\t[306,\t\t307,\t\t1\t\t],\n\t\t[305,\t\t308,\t\t0\t\t],\n\t\t[305,\t\t309,\t\t0\t\t],\n\t\t[310,\t\t309,\t\t1\t\t],\n\t\t[306,\t\t309,\t\t1\t\t],\n\t\t[311,\t\t280,\t\t0\t\t],\n\t\t[280,\t\t278,\t\t1\t\t],\n\t\t[311,\t\t32,\t\t1\t\t],\n\t\t[13,\t\t312,\t\t1\t\t],\n\t\t[313,\t\t314,\t\t0\t\t],\n\t\t[312,\t\t313,\t\t1\t\t],\n\t\t[547,\t\t566,\t\t1\t\t],\n\t\t[245,\t\t315,\t\t1\t\t],\n\t\t[312,\t\t316,\t\t0\t\t],\n\t\t[312,\t\t314,\t\t0\t\t],\n\t\t[554,\t\t546,\t\t1\t\t],\n\t\t[262,\t\t216,\t\t1\t\t],\n\t\t[317,\t\t233,\t\t0\t\t],\n\t\t[318,\t\t317,\t\t0\t\t],\n\t\t[231,\t\t52,\t\t1\t\t],\n\t\t[319,\t\t567,\t\t0\t\t],\n\t\t[557,\t\t321,\t\t0\t\t],\n\t\t[277,\t\t65,\t\t1\t\t],\n\t\t[322,\t\t288,\t\t1\t\t],\n\t\t[322,\t\t323,\t\t0\t\t],\n\t\t[277,\t\t324,\t\t1\t\t],\n\t\t[324,\t\t325,\t\t0\t\t],\n\t\t[277,\t\t325,\t\t0\t\t],\n\t\t[326,\t\t327,\t\t0\t\t],\n\t\t[328,\t\t326,\t\t1\t\t],\n\t\t[328,\t\t327,\t\t1\t\t],\n\t\t[326,\t\t329,\t\t0\t\t],\n\t\t[568,\t\t329,\t\t1\t\t],\n\t\t[568,\t\t326,\t\t0\t\t],\n\t\t[332,\t\t78,\t\t1\t\t],\n\t\t[333,\t\t306,\t\t0\t\t],\n\t\t[332,\t\t333,\t\t0\t\t],\n\t\t[332,\t\t334,\t\t0\t\t],\n\t\t[66,\t\t334,\t\t1\t\t],\n\t\t[330,\t\t335,\t\t1\t\t],\n\t\t[336,\t\t66,\t\t0\t\t],\n\t\t[330,\t\t336,\t\t1\t\t],\n\t\t[68,\t\t70,\t\t0\t\t],\n\t\t[509,\t\t337,\t\t1\t\t],\n\t\t[324,\t\t288,\t\t0\t\t],\n\t\t[338,\t\t559,\t\t0\t\t],\n\t\t[339,\t\t559,\t\t0\t\t],\n\t\t[339,\t\t340,\t\t1\t\t],\n\t\t[559,\t\t340,\t\t1\t\t],\n\t\t[341,\t\t292,\t\t0\t\t],\n\t\t[557,\t\t342,\t\t0\t\t],\n\t\t[558,\t\t343,\t\t0\t\t],\n\t\t[502,\t\t340,\t\t1\t\t],\n\t\t[72,\t\t32,\t\t1\t\t],\n\t\t[344,\t\t345,\t\t0\t\t],\n\t\t[346,\t\t47,\t\t0\t\t],\n\t\t[46,\t\t47,\t\t0\t\t],\n\t\t[346,\t\t345,\t\t0\t\t],\n\t\t[347,\t\t328,\t\t0\t\t],\n\t\t[347,\t\t348,\t\t1\t\t],\n\t\t[571,\t\t348,\t\t1\t\t],\n\t\t[347,\t\t572,\t\t0\t\t],\n\t\t[571,\t\t570,\t\t1\t\t],\n\t\t[14,\t\t350,\t\t0\t\t],\n\t\t[350,\t\t573,\t\t0\t\t],\n\t\t[15,\t\t351,\t\t1\t\t],\n\t\t[352,\t\t15,\t\t0\t\t],\n\t\t[15,\t\t335,\t\t1\t\t],\n\t\t[232,\t\t227,\t\t0\t\t],\n\t\t[565,\t\t544,\t\t1\t\t],\n\t\t[235,\t\t567,\t\t1\t\t],\n\t\t[567,\t\t286,\t\t0\t\t],\n\t\t[353,\t\t519,\t\t0\t\t],\n\t\t[354,\t\t353,\t\t0\t\t],\n\t\t[355,\t\t354,\t\t0\t\t],\n\t\t[354,\t\t356,\t\t0\t\t],\n\t\t[357,\t\t358,\t\t0\t\t],\n\t\t[574,\t\t359,\t\t0\t\t],\n\t\t[235,\t\t575,\t\t0\t\t],\n\t\t[167,\t\t361,\t\t0\t\t],\n\t\t[528,\t\t362,\t\t0\t\t],\n\t\t[363,\t\t344,\t\t0\t\t],\n\t\t[259,\t\t364,\t\t1\t\t],\n\t\t[54,\t\t56,\t\t0\t\t],\n\t\t[365,\t\t364,\t\t0\t\t],\n\t\t[231,\t\t366,\t\t0\t\t],\n\t\t[30,\t\t367,\t\t0\t\t],\n\t\t[61,\t\t367,\t\t1\t\t],\n\t\t[254,\t\t368,\t\t0\t\t],\n\t\t[254,\t\t369,\t\t0\t\t],\n\t\t[254,\t\t370,\t\t0\t\t],\n\t\t[99,\t\t358,\t\t0\t\t],\n\t\t[354,\t\t519,\t\t0\t\t],\n\t\t[571,\t\t371,\t\t0\t\t],\n\t\t[207,\t\t372,\t\t0\t\t],\n\t\t[57,\t\t373,\t\t0\t\t],\n\t\t[209,\t\t374,\t\t0\t\t],\n\t\t[375,\t\t376,\t\t0\t\t],\n\t\t[376,\t\t377,\t\t0\t\t],\n\t\t[16,\t\t49,\t\t0\t\t],\n\t\t[318,\t\t377,\t\t0\t\t],\n\t\t[378,\t\t297,\t\t0\t\t],\n\t\t[562,\t\t379,\t\t0\t\t],\n\t\t[576,\t\t563,\t\t0\t\t],\n\t\t[576,\t\t381,\t\t0\t\t],\n\t\t[577,\t\t576,\t\t1\t\t],\n\t\t[244,\t\t383,\t\t0\t\t],\n\t\t[244,\t\t306,\t\t1\t\t],\n\t\t[383,\t\t306,\t\t1\t\t],\n\t\t[380,\t\t306,\t\t0\t\t],\n\t\t[252,\t\t225,\t\t0\t\t],\n\t\t[220,\t\t76,\t\t0\t\t],\n\t\t[542,\t\t384,\t\t0\t\t],\n\t\t[385,\t\t384,\t\t0\t\t],\n\t\t[542,\t\t385,\t\t0\t\t],\n\t\t[386,\t\t385,\t\t0\t\t],\n\t\t[387,\t\t578,\t\t0\t\t],\n\t\t[332,\t\t388,\t\t1\t\t],\n\t\t[382,\t\t332,\t\t1\t\t],\n\t\t[382,\t\t388,\t\t0\t\t],\n\t\t[579,\t\t578,\t\t0\t\t],\n\t\t[577,\t\t387,\t\t1\t\t],\n\t\t[144,\t\t390,\t\t0\t\t],\n\t\t[37,\t\t49,\t\t0\t\t],\n\t\t[391,\t\t233,\t\t0\t\t],\n\t\t[392,\t\t310,\t\t0\t\t],\n\t\t[260,\t\t393,\t\t0\t\t],\n\t\t[394,\t\t230,\t\t0\t\t],\n\t\t[395,\t\t282,\t\t1\t\t],\n\t\t[395,\t\t244,\t\t0\t\t],\n\t\t[25,\t\t396,\t\t1\t\t],\n\t\t[81,\t\t74,\t\t0\t\t],\n\t\t[278,\t\t80,\t\t1\t\t],\n\t\t[81,\t\t278,\t\t1\t\t],\n\t\t[569,\t\t570,\t\t0\t\t],\n\t\t[397,\t\t552,\t\t0\t\t],\n\t\t[542,\t\t398,\t\t0\t\t],\n\t\t[398,\t\t385,\t\t0\t\t],\n\t\t[399,\t\t499,\t\t0\t\t],\n\t\t[83,\t\t399,\t\t0\t\t],\n\t\t[498,\t\t400,\t\t0\t\t],\n\t\t[518,\t\t239,\t\t1\t\t],\n\t\t[575,\t\t543,\t\t0\t\t],\n\t\t[401,\t\t360,\t\t0\t\t],\n\t\t[580,\t\t581,\t\t0\t\t],\n\t\t[401,\t\t402,\t\t0\t\t],\n\t\t[403,\t\t231,\t\t0\t\t],\n\t\t[189,\t\t360,\t\t1\t\t],\n\t\t[234,\t\t404,\t\t0\t\t],\n\t\t[235,\t\t404,\t\t1\t\t],\n\t\t[235,\t\t580,\t\t0\t\t],\n\t\t[216,\t\t259,\t\t0\t\t],\n\t\t[405,\t\t259,\t\t0\t\t],\n\t\t[405,\t\t318,\t\t0\t\t],\n\t\t[406,\t\t230,\t\t0\t\t],\n\t\t[542,\t\t407,\t\t0\t\t],\n\t\t[23,\t\t408,\t\t0\t\t],\n\t\t[577,\t\t348,\t\t0\t\t],\n\t\t[562,\t\t564,\t\t1\t\t],\n\t\t[582,\t\t507,\t\t0\t\t],\n\t\t[27,\t\t410,\t\t0\t\t],\n\t\t[501,\t\t27,\t\t0\t\t],\n\t\t[27,\t\t411,\t\t0\t\t],\n\t\t[411,\t\t410,\t\t0\t\t],\n\t\t[403,\t\t360,\t\t0\t\t],\n\t\t[412,\t\t360,\t\t0\t\t],\n\t\t[326,\t\t413,\t\t0\t\t],\n\t\t[414,\t\t413,\t\t0\t\t],\n\t\t[6,\t\t297,\t\t0\t\t],\n\t\t[554,\t\t580,\t\t1\t\t],\n\t\t[262,\t\t401,\t\t1\t\t],\n\t\t[499,\t\t556,\t\t1\t\t],\n\t\t[224,\t\t229,\t\t0\t\t],\n\t\t[583,\t\t507,\t\t0\t\t],\n\t\t[415,\t\t307,\t\t0\t\t],\n\t\t[416,\t\t507,\t\t0\t\t],\n\t\t[284,\t\t561,\t\t0\t\t],\n\t\t[543,\t\t417,\t\t0\t\t],\n\t\t[418,\t\t506,\t\t0\t\t],\n\t\t[220,\t\t157,\t\t0\t\t],\n\t\t[295,\t\t419,\t\t0\t\t],\n\t\t[295,\t\t420,\t\t0\t\t],\n\t\t[541,\t\t62,\t\t0\t\t],\n\t\t[52,\t\t421,\t\t0\t\t],\n\t\t[60,\t\t160,\t\t0\t\t],\n\t\t[535,\t\t161,\t\t0\t\t],\n\t\t[267,\t\t282,\t\t0\t\t],\n\t\t[52,\t\t365,\t\t0\t\t],\n\t\t[28,\t\t27,\t\t0\t\t],\n\t\t[30,\t\t201,\t\t1\t\t],\n\t\t[422,\t\t81,\t\t0\t\t],\n\t\t[119,\t\t425,\t\t0\t\t],\n\t\t[423,\t\t425,\t\t0\t\t],\n\t\t[424,\t\t425,\t\t0\t\t],\n\t\t[426,\t\t428,\t\t0\t\t],\n\t\t[427,\t\t428,\t\t0\t\t],\n\t\t[19,\t\t428,\t\t1\t\t],\n\t\t[45,\t\t429,\t\t0\t\t],\n\t\t[44,\t\t429,\t\t0\t\t],\n\t\t[505,\t\t429,\t\t0\t\t],\n\t\t[231,\t\t431,\t\t1\t\t],\n\t\t[190,\t\t431,\t\t1\t\t],\n\t\t[430,\t\t431,\t\t0\t\t],\n\t\t[286,\t\t433,\t\t0\t\t],\n\t\t[432,\t\t433,\t\t0\t\t],\n\t\t[506,\t\t433,\t\t0\t\t],\n\t\t[23,\t\t434,\t\t0\t\t],\n\t\t[400,\t\t434,\t\t0\t\t],\n\t\t[500,\t\t434,\t\t0\t\t],\n\t\t[32,\t\t436,\t\t0\t\t],\n\t\t[435,\t\t436,\t\t0\t\t],\n\t\t[78,\t\t436,\t\t1\t\t],\n\t\t[86,\t\t438,\t\t1\t\t],\n\t\t[437,\t\t438,\t\t0\t\t],\n\t\t[221,\t\t438,\t\t0\t\t],\n\t\t[207,\t\t439,\t\t0\t\t],\n\t\t[516,\t\t439,\t\t0\t\t],\n\t\t[513,\t\t439,\t\t0\t\t],\n\t\t[181,\t\t441,\t\t1\t\t],\n\t\t[440,\t\t441,\t\t0\t\t],\n\t\t[504,\t\t441,\t\t1\t\t],\n\t\t[135,\t\t442,\t\t0\t\t],\n\t\t[109,\t\t442,\t\t0\t\t],\n\t\t[112,\t\t442,\t\t0\t\t],\n\t\t[113,\t\t443,\t\t0\t\t],\n\t\t[132,\t\t443,\t\t0\t\t],\n\t\t[107,\t\t443,\t\t0\t\t],\n\t\t[444,\t\t445,\t\t0\t\t],\n\t\t[112,\t\t445,\t\t0\t\t],\n\t\t[109,\t\t445,\t\t0\t\t],\n\t\t[119,\t\t447,\t\t1\t\t],\n\t\t[100,\t\t447,\t\t1\t\t],\n\t\t[446,\t\t447,\t\t0\t\t],\n\t\t[124,\t\t448,\t\t0\t\t],\n\t\t[125,\t\t448,\t\t0\t\t],\n\t\t[131,\t\t448,\t\t0\t\t],\n\t\t[449,\t\t450,\t\t0\t\t],\n\t\t[173,\t\t450,\t\t0\t\t],\n\t\t[184,\t\t450,\t\t0\t\t],\n\t\t[144,\t\t451,\t\t0\t\t],\n\t\t[140,\t\t451,\t\t0\t\t],\n\t\t[514,\t\t451,\t\t0\t\t],\n\t\t[537,\t\t585,\t\t1\t\t],\n\t\t[141,\t\t585,\t\t0\t\t],\n\t\t[584,\t\t585,\t\t0\t\t],\n\t\t[522,\t\t454,\t\t0\t\t],\n\t\t[144,\t\t454,\t\t0\t\t],\n\t\t[453,\t\t454,\t\t0\t\t],\n\t\t[199,\t\t456,\t\t0\t\t],\n\t\t[140,\t\t456,\t\t0\t\t],\n\t\t[455,\t\t456,\t\t0\t\t],\n\t\t[537,\t\t456,\t\t0\t\t],\n\t\t[538,\t\t457,\t\t0\t\t],\n\t\t[153,\t\t457,\t\t0\t\t],\n\t\t[176,\t\t457,\t\t0\t\t],\n\t\t[524,\t\t459,\t\t0\t\t],\n\t\t[458,\t\t459,\t\t0\t\t],\n\t\t[134,\t\t459,\t\t0\t\t],\n\t\t[460,\t\t461,\t\t0\t\t],\n\t\t[150,\t\t461,\t\t0\t\t],\n\t\t[149,\t\t461,\t\t0\t\t],\n\t\t[521,\t\t463,\t\t0\t\t],\n\t\t[462,\t\t463,\t\t0\t\t],\n\t\t[538,\t\t463,\t\t0\t\t],\n\t\t[110,\t\t464,\t\t0\t\t],\n\t\t[90,\t\t464,\t\t0\t\t],\n\t\t[165,\t\t464,\t\t0\t\t],\n\t\t[458,\t\t465,\t\t0\t\t],\n\t\t[134,\t\t465,\t\t0\t\t],\n\t\t[524,\t\t465,\t\t0\t\t],\n\t\t[466,\t\t467,\t\t0\t\t],\n\t\t[110,\t\t467,\t\t0\t\t],\n\t\t[165,\t\t467,\t\t0\t\t],\n\t\t[468,\t\t469,\t\t0\t\t],\n\t\t[541,\t\t469,\t\t0\t\t],\n\t\t[490,\t\t469,\t\t0\t\t],\n\t\t[263,\t\t471,\t\t0\t\t],\n\t\t[470,\t\t471,\t\t0\t\t],\n\t\t[534,\t\t471,\t\t0\t\t],\n\t\t[136,\t\t472,\t\t0\t\t],\n\t\t[110,\t\t472,\t\t0\t\t],\n\t\t[251,\t\t472,\t\t0\t\t],\n\t\t[226,\t\t474,\t\t0\t\t],\n\t\t[473,\t\t474,\t\t0\t\t],\n\t\t[257,\t\t474,\t\t0\t\t],\n\t\t[6,\t\t474,\t\t1\t\t],\n\t\t[299,\t\t475,\t\t1\t\t],\n\t\t[3,\t\t475,\t\t0\t\t],\n\t\t[210,\t\t475,\t\t0\t\t],\n\t\t[297,\t\t476,\t\t0\t\t],\n\t\t[296,\t\t476,\t\t0\t\t],\n\t\t[295,\t\t476,\t\t0\t\t],\n\t\t[313,\t\t478,\t\t1\t\t],\n\t\t[477,\t\t478,\t\t0\t\t],\n\t\t[245,\t\t478,\t\t0\t\t],\n\t\t[479,\t\t481,\t\t0\t\t],\n\t\t[565,\t\t481,\t\t0\t\t],\n\t\t[480,\t\t481,\t\t0\t\t],\n\t\t[415,\t\t482,\t\t0\t\t],\n\t\t[56,\t\t482,\t\t0\t\t],\n\t\t[409,\t\t482,\t\t0\t\t],\n\t\t[483,\t\t484,\t\t0\t\t],\n\t\t[3,\t\t484,\t\t0\t\t],\n\t\t[301,\t\t484,\t\t0\t\t],\n\t\t[233,\t\t485,\t\t0\t\t],\n\t\t[392,\t\t485,\t\t0\t\t],\n\t\t[391,\t\t485,\t\t0\t\t],\n\t\t[579,\t\t488,\t\t0\t\t],\n\t\t[486,\t\t488,\t\t0\t\t],\n\t\t[487,\t\t488,\t\t0\t\t],\n\t\t[270,\t\t489,\t\t0\t\t],\n\t\t[331,\t\t489,\t\t0\t\t],\n\t\t[396,\t\t489,\t\t1\t\t],\n\t\t[519,\t\t253,\t\t0\t\t],\n\t\t[382,\t\t349,\t\t1\t\t],\n\t\t[349,\t\t351,\t\t0\t\t],\n\t\t[459,\t\t465,\t\t0\t\t],\n\t\t[549,\t\t550,\t\t0\t\t],\n\t\t[550,\t\t551,\t\t0\t\t],\n\t\t[194,\t\t195,\t\t0\t\t],\n\t\t[247,\t\t248,\t\t0\t\t],\n\t\t[2,\t\t294,\t\t0\t\t],\n\t\t[549,\t\t551,\t\t0\t\t],\n\t\t[54,\t\t365,\t\t0\t\t],\n\t\t[131,\t\t265,\t\t0\t\t],\n\t\t[91,\t\t92,\t\t0\t\t],\n\t\t[247,\t\t249,\t\t0\t\t],\n\t\t[186,\t\t191,\t\t0\t\t],\n\t\t[129,\t\t173,\t\t0\t\t],\n\t\t[96,\t\t202,\t\t0\t\t],\n\t\t[53,\t\t320,\t\t0\t\t],\n\t\t[24,\t\t396,\t\t0\t\t],\n\t\t[133,\t\t156,\t\t0\t\t],\n\t\t[442,\t\t452,\t\t0\t\t],\n\t\t[445,\t\t452,\t\t0\t\t],\n\t\t[247,\t\t250,\t\t0\t\t],\n\t\t[187,\t\t195,\t\t0\t\t],\n\t\t[216,\t\t236,\t\t0\t\t],\n\t\t[244,\t\t389,\t\t0\t\t],\n\t\t[394,\t\t406,\t\t0\t\t],\n\t\t[442,\t\t445,\t\t0\t\t],\n\t\t[442,\t\t444,\t\t0\t\t],\n\t\t[198,\t\t472,\t\t0\t\t],\n\t\t[464,\t\t467,\t\t0\t\t],\n\t\t[198,\t\t251,\t\t0\t\t],\n\t\t[112,\t\t143,\t\t0\t\t],\n\t\t[2,\t\t490,\t\t0\t\t],\n\t\t[5,\t\t491,\t\t0\t\t],\n\t\t[10,\t\t492,\t\t0\t\t],\n\t\t[12,\t\t493,\t\t0\t\t],\n\t\t[13,\t\t494,\t\t0\t\t],\n\t\t[15,\t\t495,\t\t0\t\t],\n\t\t[18,\t\t496,\t\t0\t\t],\n\t\t[20,\t\t497,\t\t0\t\t],\n\t\t[22,\t\t498,\t\t0\t\t],\n\t\t[24,\t\t499,\t\t0\t\t],\n\t\t[26,\t\t500,\t\t0\t\t],\n\t\t[30,\t\t501,\t\t0\t\t],\n\t\t[32,\t\t502,\t\t0\t\t],\n\t\t[37,\t\t503,\t\t0\t\t],\n\t\t[42,\t\t504,\t\t0\t\t],\n\t\t[46,\t\t505,\t\t0\t\t],\n\t\t[52,\t\t506,\t\t0\t\t],\n\t\t[56,\t\t507,\t\t0\t\t],\n\t\t[61,\t\t508,\t\t0\t\t],\n\t\t[68,\t\t509,\t\t0\t\t],\n\t\t[69,\t\t510,\t\t0\t\t],\n\t\t[74,\t\t511,\t\t0\t\t],\n\t\t[78,\t\t512,\t\t0\t\t],\n\t\t[86,\t\t513,\t\t0\t\t],\n\t\t[87,\t\t514,\t\t0\t\t],\n\t\t[94,\t\t515,\t\t0\t\t],\n\t\t[95,\t\t516,\t\t0\t\t],\n\t\t[96,\t\t517,\t\t0\t\t],\n\t\t[99,\t\t518,\t\t0\t\t],\n\t\t[100,\t\t519,\t\t0\t\t],\n\t\t[104,\t\t520,\t\t0\t\t],\n\t\t[105,\t\t521,\t\t0\t\t],\n\t\t[106,\t\t522,\t\t0\t\t],\n\t\t[107,\t\t523,\t\t0\t\t],\n\t\t[117,\t\t524,\t\t0\t\t],\n\t\t[120,\t\t525,\t\t0\t\t],\n\t\t[123,\t\t526,\t\t0\t\t],\n\t\t[124,\t\t527,\t\t0\t\t],\n\t\t[125,\t\t528,\t\t0\t\t],\n\t\t[128,\t\t529,\t\t0\t\t],\n\t\t[129,\t\t530,\t\t0\t\t],\n\t\t[138,\t\t531,\t\t0\t\t],\n\t\t[143,\t\t532,\t\t0\t\t],\n\t\t[156,\t\t533,\t\t0\t\t],\n\t\t[157,\t\t534,\t\t0\t\t],\n\t\t[159,\t\t535,\t\t0\t\t],\n\t\t[160,\t\t536,\t\t0\t\t],\n\t\t[165,\t\t537,\t\t0\t\t],\n\t\t[184,\t\t538,\t\t0\t\t],\n\t\t[191,\t\t539,\t\t0\t\t],\n\t\t[195,\t\t540,\t\t0\t\t],\n\t\t[201,\t\t541,\t\t0\t\t],\n\t\t[220,\t\t542,\t\t0\t\t],\n\t\t[231,\t\t543,\t\t0\t\t],\n\t\t[232,\t\t544,\t\t0\t\t],\n\t\t[233,\t\t545,\t\t0\t\t],\n\t\t[236,\t\t546,\t\t0\t\t],\n\t\t[245,\t\t547,\t\t0\t\t],\n\t\t[246,\t\t548,\t\t0\t\t],\n\t\t[248,\t\t549,\t\t0\t\t],\n\t\t[249,\t\t550,\t\t0\t\t],\n\t\t[250,\t\t551,\t\t0\t\t],\n\t\t[259,\t\t552,\t\t0\t\t],\n\t\t[261,\t\t553,\t\t0\t\t],\n\t\t[262,\t\t554,\t\t0\t\t],\n\t\t[265,\t\t555,\t\t0\t\t],\n\t\t[270,\t\t556,\t\t0\t\t],\n\t\t[277,\t\t557,\t\t0\t\t],\n\t\t[279,\t\t558,\t\t0\t\t],\n\t\t[280,\t\t559,\t\t0\t\t],\n\t\t[290,\t\t560,\t\t0\t\t],\n\t\t[301,\t\t561,\t\t0\t\t],\n\t\t[305,\t\t562,\t\t0\t\t],\n\t\t[306,\t\t563,\t\t0\t\t],\n\t\t[310,\t\t564,\t\t0\t\t],\n\t\t[313,\t\t565,\t\t0\t\t],\n\t\t[315,\t\t566,\t\t0\t\t],\n\t\t[320,\t\t567,\t\t0\t\t],\n\t\t[330,\t\t568,\t\t0\t\t],\n\t\t[332,\t\t569,\t\t0\t\t],\n\t\t[334,\t\t570,\t\t0\t\t],\n\t\t[336,\t\t571,\t\t0\t\t],\n\t\t[349,\t\t572,\t\t0\t\t],\n\t\t[351,\t\t573,\t\t0\t\t],\n\t\t[358,\t\t574,\t\t0\t\t],\n\t\t[360,\t\t575,\t\t0\t\t],\n\t\t[380,\t\t576,\t\t0\t\t],\n\t\t[382,\t\t577,\t\t0\t\t],\n\t\t[383,\t\t578,\t\t0\t\t],\n\t\t[389,\t\t579,\t\t0\t\t],\n\t\t[401,\t\t580,\t\t0\t\t],\n\t\t[402,\t\t581,\t\t0\t\t],\n\t\t[409,\t\t582,\t\t0\t\t],\n\t\t[415,\t\t583,\t\t0\t\t],\n\t\t[444,\t\t584,\t\t0\t\t],\n\t\t[452,\t\t585,\t\t0\t\t]\n\t])\n\tppc[\"parameters\"] = {\n\t\t\"x_trans_sg\": 0.003, \n\t\t\"x_trans_fm\": 0.001, \n\t\t\"x_trans_fl\": 0.001, \n\t\t\"d_l\": 1e-3, \n\t\t\"d_l_perturb\": 1e-5, \n\t\t\"w_1_ij\": 1, \n\t\t\"w_2_ij\": 1, \n\t\t\"w_3_ij\": 1, \n\t\t\"w_4_ij\": 1, \n\t\t\"b_r\": 238, \n\t\t\"b_c\": 248 }\n\treturn ppc"
] | [
[
"numpy.array"
]
] |
nestauk/la_funding_analysis | [
"bc338583817174f47f2cff2105f4a20a89df4c99"
] | [
"la_funding_analysis/pipeline/cleaning.py"
] | [
"# File: pipeline/cleaning.py\n\"\"\"Functions to clean datasets.\nCalling each function returns a clean version of the associated dataset.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom la_funding_analysis.getters.local_authority_data import (\n get_epc,\n get_grants,\n get_imd,\n get_old_parties,\n get_parties_models,\n get_fuel_poverty,\n)\nfrom la_funding_analysis.utils.name_cleaners import (\n clean_names,\n model_type,\n strip_and_titlecase,\n)\n\n\ndef get_clean_fuel_poverty():\n \"\"\"Gets and cleans fuel poverty dataset.\"\"\"\n fuel_poverty = get_fuel_poverty()\n #\n fuel_poverty = fuel_poverty.rename(\n columns={\n \"Area Codes\": \"code\",\n \"Area name\": \"region_1\",\n \"Unnamed: 2\": \"region_2\",\n \"Unnamed: 3\": \"region_3\",\n \"Number of households1\": \"total_households\",\n \"Number of households in fuel poverty1\": \"fp_households\",\n \"Proportion of households fuel poor (%)\": \"fp_proportion\",\n }\n )\n #\n # Remove trailing spaces and fix capitalisation in region columns\n fuel_poverty[\"region_1\"] = fuel_poverty[\"region_1\"].apply(strip_and_titlecase)\n fuel_poverty[\"region_2\"] = fuel_poverty[\"region_2\"].apply(strip_and_titlecase)\n fuel_poverty[\"region_3\"] = fuel_poverty[\"region_3\"].apply(strip_and_titlecase)\n #\n # Merge the different 'region' columns into one and apply clean_names -\n # this allows for joining onto data in which local authorities\n # are only referred to by name and not ID\n fuel_poverty[\"clean_name\"] = (\n fuel_poverty[\"region_1\"]\n .fillna(fuel_poverty[\"region_2\"])\n .fillna(fuel_poverty[\"region_3\"])\n .apply(clean_names)\n )\n # Fill in NaN values in region columns so that all region_3 rows\n # have associated region_1 and region_2 data,\n # and all region_2 rows have associated region_1 data.\n # First copy region_1 values into region_2 then forward-fill region_2 -\n # the 'region_1's stop the filling from going too far\n fuel_poverty[\"region_2\"] = (\n fuel_poverty[\"region_2\"].fillna(fuel_poverty[\"region_1\"]).ffill()\n )\n # Set the copied-over values in region_2 back to NaN\n fuel_poverty[\"region_2\"].loc[~fuel_poverty[\"region_1\"].isna()] = np.nan\n # Then forward-fill region_1\n fuel_poverty[\"region_1\"] = fuel_poverty[\"region_1\"].ffill()\n # Filter out all of the region_1 rows - they are not local authorities\n fuel_poverty = fuel_poverty[~fuel_poverty[\"region_2\"].isna()]\n # Additionally remove all Met Counties and Inner/Outer London -\n # these are rows that contain (Met County) or Inner/Outer London in region_2\n # and have NA region_3\n def not_la_condition(string):\n return (\"(Met County)\" in string) | (string in [\"Inner London\", \"Outer London\"])\n\n #\n #\n not_las = [not_la_condition(string) for string in fuel_poverty[\"region_2\"]]\n no_region_3 = list(fuel_poverty.region_3.isna())\n both = [a and b for a, b in zip(not_las, no_region_3)]\n fuel_poverty = fuel_poverty.drop(fuel_poverty[both].index)\n #\n # Append rows for Greater London Authority and\n # Greater Manchester Combined Authority -\n # these are not LAs but some grants went to them\n combined_authorities = pd.DataFrame(\n [\n [\n np.nan,\n \"London\",\n \"Greater London Authority\",\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n \"Greater London Authority\",\n ],\n [\n np.nan,\n \"North West\",\n \"Greater Manchester Combined Authority\",\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n \"Greater Manchester Combined Authority\",\n ],\n ],\n columns=fuel_poverty.columns,\n )\n #\n fuel_poverty = fuel_poverty.append(combined_authorities, ignore_index=True)\n #\n return fuel_poverty\n\n\ndef get_clean_parties_models():\n \"\"\"Gets and cleans current LA majority party and model (e.g. county, district) data.\"\"\"\n parties_models = get_parties_models()\n #\n parties_models = parties_models.rename(\n columns={\n \"model (C=county, D=district, 1=all-up, 3=thirds, etc.)\": \"model\",\n }\n )\n # 'Buckinghamshire' row in this dataset is incorrect -\n # it is labelled as a County council but it has become unitary\n # Manually replace with the correct data\n # Source: http://opencouncildata.co.uk/council.php?c=413&y=0\n parties_models.loc[2] = [\"Buckinghamshire\", \"U1\", \"CON\"]\n #\n # Rename models to full names\n parties_models[\"model\"] = parties_models[\"model\"].apply(model_type)\n #\n # Apply clean_names to all names in parties/models data\n parties_models[\"clean_name\"] = parties_models[\"name\"].apply(clean_names)\n parties_models = parties_models.drop(columns=\"name\")\n #\n return parties_models\n\n\ndef get_clean_old_parties():\n \"\"\"Gets and cleans data about political majorities as of August 2020.\"\"\"\n op = get_old_parties()\n op[\"clean_name\"] = op[\"Authority\"].apply(clean_names)\n op[\"old_majority\"] = [string.upper() for string in op[\"Control\"]]\n op = op.drop(columns=[\"Authority\", \"Control\"]).reset_index(drop=True)\n return op\n\n\ndef get_clean_imd():\n \"\"\"Gets and cleans IMD data.\"\"\"\n imd = get_imd()\n imd = imd.rename(\n columns={\n \"Reference area\": \"full_name\",\n \" Local concentration\": \"imd_concentration\",\n }\n )\n #\n imd[\"clean_name\"] = imd[\"full_name\"].apply(clean_names)\n imd = imd.drop(columns=\"full_name\")\n #\n return imd\n\n\ndef get_clean_grants():\n \"\"\"Gets and cleans data on grants received by LAs.\"\"\"\n grants = get_grants()\n grants = grants.rename(\n columns={\n \"Local authority\": \"full_name\",\n \"GHG LADS 1a\": \"GHG_1a_individuals\",\n \"1a Consortium Leads\": \"GHG_1a_leads\",\n \"1a Consortium bodies\": \"GHG_1a_bodies\",\n \"GHG LADS 1b\": \"GHG_1b_individuals\",\n \"1b Consortium leads\": \"GHG_1b_leads\",\n \"1b Consortium bodies\": \"GHG_1b_bodies\",\n \"Social Housing Decarbonisation Fund - Demonstrator \": \"SHDDF\",\n \"Total\": \"total_grants\",\n }\n )\n #\n # Some regions appear twice in the grants data\n duplicate_strings = [\"Greenwich\", \"Lewisham\", \"Redbridge\"]\n regex_exp = \"|\".join(duplicate_strings)\n clean_grants = grants[~grants[\"full_name\"].str.contains(regex_exp, regex=True)]\n #\n for string in duplicate_strings:\n duplicate_df = grants[grants[\"full_name\"].str.contains(string)]\n replacement_row = duplicate_df.iloc[0] + duplicate_df.iloc[1]\n replacement_row[\"full_name\"] = string\n clean_grants = clean_grants.append(replacement_row, ignore_index=True)\n #\n # Babergh and Mid Suffolk are shown in one row in the grants data,\n # but they are actually two different LAs - the stated grants\n # apply to both individually\n babergh_ms = clean_grants[\n [(\"Babergh and Mid Suffolk\" in name) for name in clean_grants[\"full_name\"]]\n ]\n babergh = babergh_ms.copy()\n babergh[\"full_name\"] = \"Babergh\"\n ms = babergh_ms.copy()\n ms[\"full_name\"] = \"Mid Suffolk\"\n clean_grants = (\n clean_grants[\n [\n (\"Babergh and Mid Suffolk\" not in name)\n for name in clean_grants[\"full_name\"]\n ]\n ]\n .append(babergh)\n .append(ms)\n .reset_index(drop=True)\n )\n #\n # As before, apply clean_names in order to join data\n clean_grants[\"clean_name\"] = clean_grants[\"full_name\"].apply(clean_names)\n clean_grants = clean_grants.drop(columns=\"full_name\")\n #\n return clean_grants\n\n\ndef get_clean_epc():\n \"\"\"Processes EPC dataset to obtain median EPC for each LA\n and counts/proportions of improvable social housing.\n \"\"\"\n epc = get_epc()\n #\n # Calculate median energy rating for each LA:\n epc_medians = (\n epc.groupby(\"LOCAL_AUTHORITY\")[\"CURRENT_ENERGY_EFFICIENCY\"]\n .apply(np.median)\n .reset_index(name=\"median_energy_efficiency\")\n )\n #\n # Calculate proportions of 'improvable' social housing\n # (socially rented dwellings that are currently EPC D or below,\n # and have the potential to be C or above)\n #\n # There are two different strings signifying socially rented\n # in the TENURE column of the EPC data:\n epc_social = epc.loc[epc[\"TENURE\"].isin([\"rental (social)\", \"Rented (social)\"])]\n #\n epc_social[\"is_improvable\"] = (\n epc_social[\"CURRENT_ENERGY_RATING\"].isin([\"G\", \"F\", \"E\", \"D\"])\n ) & (epc_social[\"POTENTIAL_ENERGY_RATING\"].isin([\"C\", \"B\", \"A\"]))\n #\n # Find the numbers of improvable / not improvable social houses in each LA\n potential_counts = (\n epc_social.groupby([\"LOCAL_AUTHORITY\", \"is_improvable\"])[\n [\"LOCAL_AUTHORITY\", \"is_improvable\"]\n ]\n .size()\n .reset_index(name=\"count\")\n .pivot(index=\"LOCAL_AUTHORITY\", columns=\"is_improvable\", values=\"count\")\n .rename(columns={True: \"total_improvable\", False: \"total_not_improvable\"})\n )\n # Calculate proportions\n potential_counts.columns.name = None\n potential_counts[\"total_social\"] = potential_counts.sum(axis=1)\n potential_counts[\"prop_improvable\"] = (\n potential_counts[\"total_improvable\"] / potential_counts[\"total_social\"]\n )\n potential_counts = potential_counts.reset_index()[\n [\"LOCAL_AUTHORITY\", \"total_improvable\", \"prop_improvable\"]\n ]\n # Join to medians\n clean_epc = epc_medians.merge(potential_counts, on=\"LOCAL_AUTHORITY\").rename(\n columns={\"LOCAL_AUTHORITY\": \"code\"}\n )\n #\n return clean_epc\n"
] | [
[
"pandas.DataFrame"
]
] |
HwangDongJun/Federated_Learning_using_Websockets | [
"87c2873ae9b6a651750d08f4cd0ad5757893ce88"
] | [
"federated_learning_without_transfer_learning/ntf_client_fit_model.py"
] | [
"# Setup library\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport numpy as np\nimport PIL.Image as Image\nfrom PIL import ImageFile\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow.keras import layers\nimport matplotlib.pylab as plt\nimport efficientnet.tfkeras as efn\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"\"\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n\ttry:\n\t\tfor gpu in gpus:\n\t\t\ttf.config.experimental.set_memory_growth(gpu, True)\n\t\tlogical_gpus = tf.config.experimental.list_logical_devices('GPU')\n\t\tprint(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n\texcept RuntimeError as e:\n\t\tprint(e)\n\n\nclass transfer_learning_fit(object):\n\tdef __init__(self, config, weights):\n\t\tself.weights = weights\n\t\tself.image_shape = (config['image_shape'], config['image_shape'])\n\t\tself.batch_size = config['batch_size']\n\t\tself.learning_rate = config['learning_rate']\n\t\tself.epochs = config['epochs']\n\t\tself.optimizer = config['optimizer']\n\t\tself.model_link = config['model']\n\t\tself.class_names = np.array(['book', 'laptop', 'phone', 'wash', 'water'])\n\n\t\ttf.random.set_seed(2020)\n\n\tdef image_generator(self):\n\t\timage_gen_train = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trotation_range=15,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\thorizontal_flip=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbrightness_range=[0.7,1.0])\n\t\timage_gen_val = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)\n\t\treturn image_gen_train, image_gen_val\n\n\tdef gen_train_val_data(self):\n\t\tgen_train, gen_val = self.image_generator()\n\n\t\ttrain_data_dir = os.path.abspath('INPUT YOUR TRANING DATA SET PATH')\n\t\ttrain_data_gen = gen_train.flow_from_directory(directory=str(train_data_dir),\n\t\t\t\t\t\t\t\t\t\t\tbatch_size=self.batch_size,\n\t\t\t\t\t\t\t\t\t\t\tcolor_mode='rgb',\n\t\t\t\t\t\t\t\t\t\t\tshuffle=True,\n\t\t\t\t\t\t\t\t\t\t\ttarget_size=self.image_shape,\n\t\t\t\t\t\t\t\t\t\t\tclasses=list(self.class_names))\n\n\t\treturn train_data_gen\n\n\tdef select_optimizer(self, opti, lr):\n\t\tif opti == 'adam':\n\t\t\treturn tf.keras.optimizers.Adam(learning_rate=lr)\n\n\tdef set_model(self, vector_layer):\n\t\t#efficient_net = efn.EfficientNetB0(\n\t\t#\tweights=None,\n\t\t#\tinput_shape=self.image_shape+(3,),\n\t\t#\tinclude_top=False,\n\t\t#\tpooling='max'\n\t\t#)\n\n\t\t#model = tf.keras.Sequential([\n\t\t#\tefficient_net,\n\t\t#\tlayers.Dense(5, activation='softmax')\n\t\t#])\n\n\t\tmobilenet_v2 = tf.keras.applications.MobileNetV2(\n\t\t\tweights=None,\n\t\t\tinput_shape=self.image_shape+(3,),\n\t\t\tinclude_top=False,\n\t\t\tpooling='max'\n\t\t)\n\n\t\tmodel = tf.keras.Sequential([\n\t\t\tmobilenet_v2,\n\t\t\tlayers.Dense(5, activation='softmax')\n\t\t])\n\n\t\treturn model\n\n\tdef build_model(self):\n\t\tfeature_vector_url = self.model_link\n\t\tfeature_vector_layer = hub.KerasLayer(feature_vector_url,\n\t\t\t\t\t\t\t\t\t\tinput_shape=self.image_shape+(3,))\n\t\t\n\t\tfeature_vector_layer.trainable = True\n\n\t\tmade_model = self.set_model(feature_vector_layer)\n\n\t\tprint(made_model.summary())\n\n\t\tmade_model.compile(\n\t\t\toptimizer=self.select_optimizer(self.optimizer, self.learning_rate),\n\t\t\tloss='categorical_crossentropy',\n\t\t\tmetrics=['acc'])\n\n\t\treturn made_model, feature_vector_layer\n\n\tdef train_model_tosave(self, weight):\n\t\tcallback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)\n\n\t\tif weight == list():\n\t\t\tlocal_model, feature_layer = self.build_model()\n\t\t\tgen_train_data = self.gen_train_val_data()\n\t\t\tlocal_model.fit_generator(gen_train_data, epochs=self.epochs, callbacks=[callback])\n\t\telse:\n\t\t\tlocal_model, feature_layer = self.build_model()\n\t\t\tgen_train_data = self.gen_train_val_data()\n\t\t\tlocal_model.set_weights(weight)\n\t\t\tlocal_model.fit_generator(gen_train_data, epochs=self.epochs, callbacks=[callback])\n\t\t\t\n\t\treturn local_model.get_weights()\n\n\tdef get_weight_finetune_model(self, expath, feature_layer, gtrain_data):\n\t\treloaded_model = tf.keras.models.load_model(expath)\n\t\t\n\t\tfeature_layer.trainable = True\n\n\t\tcallback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)\n\n\t\treloaded_model.compile(\n\t\t\toptimizer=self.select_optimizer(self.optimizer, self.learning_rate*0.1),\n\t\t\tloss='categorical_crossentropy',\n\t\t\tmetrics=['accuracy'])\n\t\treloaded_model.fit_generator(gtrain_data, epochs=self.epochs+(self.epochs*2),\n\t\t\t\t\t\tinitial_epoch=self.epochs, callbacks=[callback])\n\n\t\treturn reloaded_model.get_weights() # Dense layer weight는 제외하고 반환\n\n\tdef manage_train(self):\n\t\tget_weights = list()\n\t\ttraining_weight = self.train_model_tosave(self.weights)\n\t\t\t\n\t\treturn training_weight\n"
] | [
[
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.applications.MobileNetV2",
"tensorflow.keras.models.load_model",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.config.experimental.list_logical_devices",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Dense",
"numpy.array",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.random.set_seed"
]
] |
pedersor/google-research | [
"6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6",
"6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6",
"6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6"
] | [
"representation_batch_rl/representation_batch_rl/cql_pixels.py",
"ml_debiaser/randomized_threshold.py",
"goemotions/analyze_data.py"
] | [
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Implementation of DDPG.\"\"\"\n\nimport typing\n\nfrom dm_env import specs as dm_env_specs\nimport numpy as np\nimport tensorflow as tf\nfrom tf_agents.specs.tensor_spec import TensorSpec\n\nfrom representation_batch_rl.batch_rl import critic\nfrom representation_batch_rl.batch_rl.encoders import ConvStack\nfrom representation_batch_rl.batch_rl.encoders import ImageEncoder\nfrom representation_batch_rl.batch_rl.encoders import make_impala_cnn_network\nfrom representation_batch_rl.representation_batch_rl import tf_utils\n\n\nclass CQL(object):\n \"\"\"Class performing CQL training.\"\"\"\n\n def __init__(self,\n observation_spec,\n action_spec,\n actor_lr = 1e-4,\n critic_lr = 3e-4,\n discount = 0.99,\n tau = 0.005,\n target_entropy = 0.0,\n reg = 0.0,\n num_cql_actions = 10,\n bc_pretraining_steps = 40_000,\n min_q_weight = 10.0,\n num_augmentations = 1,\n rep_learn_keywords = 'outer',\n batch_size = 256):\n \"\"\"Creates networks.\n\n Args:\n observation_spec: environment observation spec.\n action_spec: Action spec.\n actor_lr: Actor learning rate.\n critic_lr: Critic learning rate.\n discount: MDP discount.\n tau: Soft target update parameter.\n target_entropy: Target entropy.\n reg: Coefficient for out of distribution regularization.\n num_cql_actions: Number of actions to sample for CQL loss.\n bc_pretraining_steps: Use BC loss instead of CQL loss for N steps.\n min_q_weight: CQL alpha.\n num_augmentations: Num of random crops\n rep_learn_keywords: Representation learning loss to add.\n batch_size: Batch size\n \"\"\"\n self.num_augmentations = num_augmentations\n self.batch_size = batch_size\n self.rep_learn_keywords = rep_learn_keywords.split('__')\n\n critic_kwargs = {}\n\n if observation_spec.shape == (64, 64, 3):\n # IMPALA for Procgen\n def conv_stack():\n return make_impala_cnn_network(\n depths=[16, 32, 32], use_batch_norm=False, dropout_rate=0.)\n\n state_dim = 256\n else:\n # Reduced architecture for DMC\n def conv_stack():\n return ConvStack(observation_spec.shape)\n state_dim = 50\n\n conv_stack_critic = conv_stack()\n conv_target_stack_critic = conv_stack()\n\n if observation_spec.shape == (64, 64, 3):\n conv_stack_critic.output_size = state_dim\n conv_target_stack_critic.output_size = state_dim\n # Combine and stop_grad some of the above conv stacks\n critic_kwargs['encoder'] = ImageEncoder(\n conv_stack_critic, feature_dim=state_dim, bprop_conv_stack=True)\n # Note: the target critic does not share any weights.\n critic_kwargs['encoder_target'] = ImageEncoder(\n conv_target_stack_critic, feature_dim=state_dim, bprop_conv_stack=True)\n\n if self.num_augmentations == 0:\n dummy_state = tf.constant(\n np.zeros(shape=[1] + list(observation_spec.shape)))\n else: # account for padding of +4 everywhere and then cropping out 68\n dummy_state = tf.constant(np.zeros(shape=[1, 68, 68, 3]))\n\n @tf.function\n def init_models():\n critic_kwargs['encoder'](dummy_state)\n critic_kwargs['encoder_target'](dummy_state)\n\n init_models()\n\n hidden_dims = (256, 256)\n # self.actor = policies.CategoricalPolicy(state_dim, action_spec,\n # hidden_dims=hidden_dims, encoder=actor_kwargs['encoder'])\n action_dim = action_spec.maximum.item() + 1\n\n self.action_dim = action_dim\n\n self.log_alpha = tf.Variable(tf.math.log(1.0), trainable=True)\n self.log_cql_alpha = self.log_alpha\n self.alpha_optimizer = tf.keras.optimizers.Adam(learning_rate=actor_lr)\n\n self.critic = critic.Critic(\n state_dim,\n action_dim,\n hidden_dims=hidden_dims,\n encoder=critic_kwargs['encoder'],\n discrete_actions=True,\n linear='linear_Q' in self.rep_learn_keywords)\n self.critic_target = critic.Critic(\n state_dim,\n action_dim,\n hidden_dims=hidden_dims,\n encoder=critic_kwargs['encoder_target'],\n discrete_actions=True,\n linear='linear_Q' in self.rep_learn_keywords)\n\n @tf.function\n def init_models2():\n \"\"\"This function initializes all auxiliary networks (state and action encoders) with dummy input (Procgen-specific, 68x68x3, 15 actions).\n \"\"\"\n dummy_state = tf.zeros((1, 68, 68, 3), dtype=tf.float32)\n phi_s = self.critic.encoder(dummy_state)\n phi_a = tf.eye(15, dtype=tf.float32)\n if 'linear_Q' in self.rep_learn_keywords:\n _ = self.critic.critic1.state_encoder(phi_s)\n _ = self.critic.critic2.state_encoder(phi_s)\n _ = self.critic.critic1.action_encoder(phi_a)\n _ = self.critic.critic2.action_encoder(phi_a)\n _ = self.critic_target.critic1.state_encoder(phi_s)\n _ = self.critic_target.critic2.state_encoder(phi_s)\n _ = self.critic_target.critic1.action_encoder(phi_a)\n _ = self.critic_target.critic2.action_encoder(phi_a)\n\n init_models2()\n\n critic.soft_update(self.critic, self.critic_target, tau=1.0)\n self.critic_optimizer = tf.keras.optimizers.Adam(learning_rate=critic_lr)\n self.tau = tau\n\n self.reg = reg\n self.target_entropy = target_entropy\n self.discount = discount\n\n self.num_cql_actions = num_cql_actions\n self.bc_pretraining_steps = bc_pretraining_steps\n self.min_q_weight = min_q_weight\n\n self.bc = None\n\n self.model_dict = {\n 'critic': self.critic,\n 'critic_target': self.critic_target,\n 'critic_optimizer': self.critic_optimizer,\n 'alpha_optimizer': self.alpha_optimizer\n }\n\n @property\n def alpha(self):\n return tf.constant(0.)\n\n @property\n def cql_alpha(self):\n return tf.exp(self.log_cql_alpha)\n\n def fit_critic(self, states, actions,\n next_states, next_actions, rewards,\n discounts):\n \"\"\"Updates critic parameters.\n\n Args:\n states: Batch of states.\n actions: Batch of actions.\n next_states: Batch of next states.\n next_actions: Batch of next actions from training policy.\n rewards: Batch of rewards.\n discounts: Batch of masks indicating the end of the episodes.\n\n Returns:\n Dictionary with information to track.\n \"\"\"\n action_indices = tf.stack(\n [tf.range(tf.shape(actions)[0], dtype=tf.int64), actions], axis=-1)\n next_action_indices = tf.stack(\n [tf.range(tf.shape(next_actions)[0], dtype=tf.int64), next_actions],\n axis=-1)\n\n if self.num_augmentations > 1:\n target_q = 0.\n for i in range(self.num_augmentations):\n next_q1_i, next_q2_i = self.critic_target(next_states[i], actions=None)\n target_q_i = tf.expand_dims(\n rewards, 1) + self.discount * tf.expand_dims(\n discounts, 1) * tf.minimum(next_q1_i, next_q2_i)\n target_q += target_q_i\n target_q /= self.num_augmentations\n elif self.num_augmentations == 1:\n next_q1, next_q2 = self.critic_target(\n next_states[0], actions=None, stop_grad_features=False)\n target_q = tf.expand_dims(\n rewards, 1) + self.discount * tf.expand_dims(\n discounts, 1) * tf.minimum(next_q1, next_q2)\n else:\n next_q1, next_q2 = self.critic_target(next_states, actions=None)\n target_q = tf.expand_dims(rewards, 1) + self.discount * tf.expand_dims(\n discounts, 1) * tf.minimum(next_q1, next_q2)\n\n target_q = tf.gather_nd(target_q, indices=next_action_indices)\n\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(self.critic.trainable_variables)\n\n if self.num_augmentations > 1:\n critic_loss = 0.\n q1 = 0.\n q2 = 0.\n for i in range(self.num_augmentations):\n q1_i, q2_i = self.critic(states[i], actions=None)\n critic_loss_i = (\n tf.losses.mean_squared_error(\n target_q, tf.gather_nd(q1_i, indices=action_indices)) +\n tf.losses.mean_squared_error(\n target_q, tf.gather_nd(q2_i, indices=action_indices)))\n q1 += q1_i\n q2 += q2_i\n critic_loss += critic_loss_i\n q1 /= self.num_augmentations\n q2 /= self.num_augmentations\n critic_loss /= self.num_augmentations\n elif self.num_augmentations == 1:\n q1, q2 = self.critic(states[0], actions=None)\n critic_loss = (\n tf.losses.mean_squared_error(\n target_q, tf.gather_nd(q1, indices=action_indices)) +\n tf.losses.mean_squared_error(\n target_q, tf.gather_nd(q2, indices=action_indices)))\n else:\n # Ensure num_augmentations is non-negative\n assert self.num_augmentations == 0\n q1, q2 = self.critic(states, actions=None)\n critic_loss = (\n tf.losses.mean_squared_error(\n target_q, tf.gather_nd(q1, indices=action_indices)) +\n tf.losses.mean_squared_error(\n target_q, tf.gather_nd(q2, indices=action_indices)))\n q = tf.minimum(q1, q2)\n cql_logsumexp = tf.reduce_logsumexp(q, 1)\n cql_loss = tf.reduce_mean(cql_logsumexp -\n tf.gather_nd(q, indices=action_indices))\n\n critic_loss += (self.reg * cql_loss)\n\n critic_grads = tape.gradient(critic_loss, self.critic.trainable_variables)\n\n self.critic_optimizer.apply_gradients(\n zip(critic_grads, self.critic.trainable_variables))\n\n critic.soft_update(self.critic, self.critic_target, tau=self.tau)\n\n return {\n 'q1': tf.reduce_mean(q1),\n 'q2': tf.reduce_mean(q2),\n 'critic_loss': critic_loss,\n 'cql_loss': cql_loss\n }\n\n @tf.function\n def update_step(self,\n replay_buffer_iter,\n train_target='both'):\n \"\"\"Performs a single training step for critic and embedding.\n\n Args:\n replay_buffer_iter: A tensorflow graph iteratable object.\n train_target: string specifying whether update RL and or representation\n\n Returns:\n Dictionary with losses to track.\n \"\"\"\n del train_target\n transition = next(replay_buffer_iter)\n numpy_dataset = isinstance(replay_buffer_iter, np.ndarray)\n # observation: n_batch x n_timesteps x 1 x H*W*3*n_frames x 1 ->\n # n_batch x H x W x 3*n_frames\n if not numpy_dataset:\n states = transition.observation[:, 0]\n next_states = transition.observation[:, 1]\n actions = transition.action[:, 0]\n rewards = transition.reward[:, 0]\n discounts = transition.discount[:, 0]\n\n if transition.observation.dtype == tf.uint8:\n states = tf.cast(states, tf.float32) / 255.\n next_states = tf.cast(next_states, tf.float32) / 255.\n else:\n states, actions, rewards, next_states, discounts = transition\n\n if self.num_augmentations > 0:\n states, next_states = tf_utils.image_aug(\n states,\n next_states,\n img_pad=4,\n num_augmentations=self.num_augmentations,\n obs_dim=64,\n channels=3,\n cropped_shape=[self.batch_size, 68, 68, 3])\n\n next_actions = self.act(next_states, data_aug=True)\n\n critic_dict = self.fit_critic(states, actions, next_states, next_actions,\n rewards, discounts)\n\n return critic_dict\n\n @tf.function\n def act(self, states, data_aug=False):\n \"\"\"Act with batch of states.\n\n Args:\n states: tf.tensor n_batch x 64 x 64 x 3\n data_aug: bool, whether to use stochastic data aug (else deterministic)\n\n Returns:\n action: tf.tensor\n \"\"\"\n if data_aug and self.num_augmentations > 0:\n states = states[0]\n if self.num_augmentations > 0:\n # use pad of 2 to bump 64 to 68 with 2 + 64 + 2 on each side\n img_pad = 2\n paddings = tf.constant(\n [[0, 0], [img_pad, img_pad], [img_pad, img_pad], [0, 0]],\n dtype=tf.int32)\n states = tf.cast(\n tf.pad(tf.cast(states * 255., tf.int32), paddings, 'SYMMETRIC'),\n tf.float32) / 255.\n\n q1, q2 = self.critic(states, actions=None)\n q = tf.minimum(q1, q2)\n actions = tf.argmax(q, -1)\n return actions\n",
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utils for debiasing ML models.\"\"\"\n\nimport math\n\nimport numpy as np\n\n\nclass RandomizedThreshold:\n \"\"\"Threshold optimizer (RTO) to debias models via postprocessing.\n\n See: https://arxiv.org/abs/2106.12887.\n\n This is a solver to the following optimiation problem:\n minimize gamma/2 ||x||^2 - y^Tx\n s.t. x satisfies DP constraint with tolerance eps and parameter rho.\n\n There are no assumptions about y in this code but, in general, y should be the\n predictions of the original classifier.\n \"\"\"\n\n def __init__(self, gamma=1.0, eps=0.0, rho=None):\n \"\"\"Instantiate object.\n\n Args:\n gamma: The regularization parameter gamma (for randomization). Set this to\n 1 if the goal is to minmize changes to the original scores.\n eps: Tolerance parameter for bias between 0 and 1 inclusive.\n rho: The rho parameter in the post-hoc rule. If None, rho = E[y].\n \"\"\"\n if eps < 0:\n raise ValueError('eps must be non-negative.')\n\n if gamma <= 0:\n raise ValueError('gamma must be a strictly positive number.')\n\n if rho is not None and rho <= 0:\n raise ValueError('rho must be either None or a strictly positive number.')\n\n self.num_groups = 1\n self.gamma = gamma\n self.eps = eps\n self.rho = rho\n self.avrg_y_score = 0\n\n # model paramters (Lagrange dual variables)\n self.lambdas = []\n self.mus = []\n\n def fit(self, y_orig, group_feature, sgd_steps,\n full_gradient_epochs=1_000, verbose=True, batch_size=256,\n ignore_warnings=False):\n \"\"\"Debias predictions w.r.t. the sensitive class in each demographic group.\n\n This procedure takes as input a vector y=y_orig and solves the optimization\n problem subject to the statistical parity constraint.\n minimize_x gamma/2 ||x||^2 - y^Tx\n s.t. x satisfies DP constraints with tolerance eps and parameter rho.\n\n IMPORTANT: If this is used for postprocessing a classifier,\n the scores y_orig need to be rescaled linearly to [-1, +1].\n\n Training proceeds in two rounds. First is SGD. Second is full gradient\n descent. Full gradient descent is recommended when debiasing deep neural\n nets because the scores are concentrated around the extremes\n so high preciseion might be needed. Because the loss is smooth, the lr\n in full gradient method does not need tuning. It can be set to gamma / 2.0.\n\n Args:\n y_orig: A vector of the original probability scores. If this is used for\n debiasing binary classifiers, y_orig = 2 * p(y=1) -1.\n group_feature: An array containing the group id of each instance starting\n from group 0 to group K-1.\n sgd_steps: Number of minibatch steps in SGD.\n full_gradient_epochs: Number of epochs in full gradient descent phase.\n verbose: Set to True to display progress.\n batch_size: Size of minibatches in SGD.\n ignore_warnings: Set to True to suppress warnings.\n\n Returns:\n None.\n \"\"\"\n if min(y_orig) >= 0:\n self.yscale = 'positive'\n else:\n self.yscale = 'negative'\n\n y_orig = np.array(y_orig)\n num_groups = len(set(group_feature)) # number of demographic groups\n\n if (min(y_orig) < -1 or max(y_orig) > 1) and not ignore_warnings:\n print('Warning: the scores y_orig are not in the range [-1, +1].'\n 'To suppress this message, set ignore_warnings=True.')\n\n if self.yscale == 'positive' and not ignore_warnings:\n print('Warning: if this is for postprocessing a binary classifier, '\n 'the scores need to be rescaled to [-1, +1]. To suppress this '\n 'message, set ignore_warnings=True.')\n if min(group_feature) != 0 or (max(group_feature) != num_groups - 1):\n raise ValueError('group_feature should be in {0, 1, .. K-1} where '\n 'K is the nubmer of groups. Some groups are missing.')\n\n self.num_groups = num_groups\n eps0 = self.eps / 2.0\n gamma = self.gamma\n\n # Store group membership ids in a dictionary.\n xk_groups = {}\n for k in range(num_groups):\n xk_groups[k] = []\n for i in range(len(group_feature)):\n xk_groups[group_feature[i]].append(i)\n\n for k in xk_groups:\n assert xk_groups[k] # All groups must be non-empty.\n\n self.avrg_y_score = float(sum(y_orig))/len(y_orig)\n if self.rho is None:\n if self.yscale == 'positive':\n self.rho = self.avrg_y_score\n else:\n self.rho = self.avrg_y_score / 2.0 + 0.5\n\n # The parameters we optimize in the algorithm are lambdas and mus.\n # lambdas_final and mus_final are running averages (final output).\n lambdas = np.zeros((num_groups,))\n mus = np.zeros((num_groups,))\n lambdas_final = np.zeros((num_groups,)) # running averages\n mus_final = np.zeros((num_groups,)) # running averages\n\n # SGD is carried out in each group separately due to decomposition of the\n # optimization problem.\n num_samples_sgd = sgd_steps * batch_size\n lr = gamma * math.sqrt(1.0 / num_samples_sgd)\n\n # Begin the projected SGD phase.\n if verbose:\n print('SGD phase started:')\n for k in range(num_groups):\n if verbose:\n print('Group %d.\\t\\t%02d%%'%(k, int(100*k/num_groups)), end='\\r')\n\n idx = np.array(list(xk_groups[k])) # instance IDs in group k\n group_size = len(idx)\n for _ in range(sgd_steps):\n # Using random.randint is 10x faster than random.choice.\n batch_ids = np.random.randint(0, group_size, batch_size)\n batch_ids = idx[batch_ids]\n\n # The code below is a faster implementation of:\n # xi_arg = y_orig[batch_ids] - (lambdas[k] - mus[k])\n # xi_gradient = xi_arg/gamma\n # xi_gradient = np.maximum(xi_gradient, 0.)\n # xi_gradient = np.minimum(xi_gradient, 1.)\n\n lambda_minus_mu = lambdas[k] - mus[k]\n xi_arg = np.maximum(y_orig[batch_ids], lambda_minus_mu)\n xi_arg = np.minimum(xi_arg, gamma + lambda_minus_mu)\n mean_xi = (np.mean(xi_arg) - lambda_minus_mu) / gamma\n\n lambda_gradient = eps0 + self.rho - mean_xi\n mu_gradient = eps0 - self.rho + mean_xi\n\n # stochastic gradient descent\n if eps0 > 1e-3:\n lambdas[k] = max(0, lambdas[k] - lr * batch_size * lambda_gradient)\n mus[k] = max(0, mus[k] - lr * batch_size * mu_gradient)\n else:\n # If self.eps=0, we can drop mus and optimize lambdas only but\n # lambdas will not be constrained to be non-negative in this case.\n lambdas[k] = lambdas[k] - lr * batch_size * lambda_gradient\n\n # lambdas_final and mus_final are running averages.\n lambdas_final[k] += lambdas[k] / sgd_steps\n mus_final[k] += mus[k] / sgd_steps\n\n # Now switch to full gradient descent.\n # Because the objective is smooth, lr=gamma/2 works.\n if verbose and full_gradient_epochs:\n print('\\nFull gradient descent phase started:')\n for k in range(num_groups):\n if verbose:\n print('Group {}.'.format(k))\n\n idx = np.array(list(xk_groups[k]))\n for _ in range(full_gradient_epochs):\n lambda_minus_mu = lambdas_final[k] - mus_final[k]\n xi_arg = np.maximum(y_orig[idx], lambda_minus_mu)\n xi_arg = np.minimum(xi_arg, gamma + lambda_minus_mu)\n mean_xi = (np.mean(xi_arg) - lambda_minus_mu) / gamma\n\n full_grad_lambda = eps0 + self.rho - mean_xi\n full_grad_mu = eps0 - self.rho + mean_xi\n\n if eps0 > 1e-3:\n lambdas_final[k] = max(0,\n lambdas_final[k] - 0.5*gamma*full_grad_lambda)\n mus_final[k] = max(0, mus_final[k] - 0.5*gamma*full_grad_mu)\n else:\n lambdas_final[k] = lambdas_final[k] - 0.5*gamma*full_grad_lambda\n\n self.lambdas = lambdas_final\n self.mus = mus_final\n\n def predict(self, y_orig, group_feature, ignore_warnings=False):\n \"\"\"Debiases the predictions.\n\n Given the original scores y, post-process them according to the learned\n model such that the predictions satisfy the desired fairness criteria.\n\n Args:\n y_orig: Original classifier scores. If this is for postprocessing binary\n classifiers, y_orig = 2 * p(y=1) -1.\n group_feature: An array containing the group id of each instance starting\n from group 0 to group K-1.\n ignore_warnings: Set to True to suppress warnings.\n\n Returns:\n y_new_prob: y_new_prob[i] is the probability of predicting the positive\n class for the instance i.\n \"\"\"\n if (((min(y_orig) >= 0 and self.yscale == 'negative') or\n (min(y_orig) < 0 and self.yscale == 'positive')) and\n not ignore_warnings):\n print('Warning: the scores seem to have a difference scale from the '\n 'training data. '\n 'If the data is scaled in [0, 1], e.g. for preprocessing, or '\n 'in [-1, +1], e.g. for postprocessing, make sure the test labels '\n 'are scaled similarly.')\n\n num_examples = len(y_orig) # number of training examples\n gamma = self.gamma\n lambdas = self.lambdas\n mus = self.mus\n\n y_new_prob = np.zeros((num_examples,))\n for i in range(num_examples):\n k = group_feature[i]\n if y_orig[i] < (lambdas[k]-mus[k]):\n y_new_prob[i] = 0\n elif y_orig[i] < (lambdas[k]-mus[k]) + gamma:\n y_new_prob[i] = (1.0/gamma)*(y_orig[i]-(lambdas[k]-mus[k]))\n else:\n y_new_prob[i] = 1.0\n\n return y_new_prob\n",
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Script for analyzing the annotations of GoEmotions.\n\nThe analysis includes calculating high-level statistics as well as correlation\namong emotion labels.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport json\nimport os\nfrom absl import app\nfrom absl import flags\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom scipy.cluster.hierarchy import dendrogram\nfrom scipy.cluster.hierarchy import linkage\nfrom scipy.spatial.distance import pdist\nimport seaborn as sns\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"data\", \"data/full_dataset\",\n \"Directory containing full dataset.\")\n\nflags.DEFINE_string(\"plot_dir\", \"plots\",\n \"Directory for saving plots and analyses.\")\n\nflags.DEFINE_string(\"emotion_file\", \"data/emotions.txt\",\n \"File containing list of emotions.\")\nflags.DEFINE_string(\"sentiment_dict\", \"data/sentiment_dict.json\",\n \"Sentiment dictionary.\")\n\n\ndef CheckAgreement(ex, min_agreement, all_emotions, max_agreement=100):\n \"\"\"Return the labels that at least min_agreement raters agree on.\"\"\"\n sum_ratings = ex[all_emotions].sum(axis=0)\n agreement = ((sum_ratings >= min_agreement) & (sum_ratings <= max_agreement))\n return \",\".join(sum_ratings.index[agreement].tolist())\n\n\ndef CountLabels(labels):\n if (not isinstance(labels, float)) and labels:\n return len(labels.split(\",\"))\n return 0\n\n\ndef main(_):\n print(\"Loading data...\")\n dfs = []\n for filename in os.listdir(FLAGS.data):\n if filename.endswith(\".csv\"):\n dfs.append(\n pd.read_csv(os.path.join(FLAGS.data, filename), encoding=\"utf-8\"))\n data = pd.concat(dfs)\n print(\"%d Examples\" % (len(set(data[\"id\"]))))\n print(\"%d Annotations\" % len(data))\n\n if not os.path.isdir(FLAGS.plot_dir):\n os.makedirs(FLAGS.plot_dir)\n\n with open(FLAGS.emotion_file, \"r\") as f:\n all_emotions = f.read().splitlines()\n all_emotions_neutral = all_emotions + [\"neutral\"]\n print(\"%d emotion Categories\" % len(all_emotions))\n\n print(\"%d unique raters\" % len(data[\"rater_id\"].unique()))\n print(\"%.3f marked unclear\" %\n (data[\"example_very_unclear\"].sum() / len(data)))\n\n # Since the ones marked as difficult have no labels, exclude those\n data = data[data[all_emotions_neutral].sum(axis=1) != 0]\n\n print(\"Distribution of number of labels per example:\")\n print(data[all_emotions_neutral].sum(axis=1).value_counts() / len(data))\n print(\"%.2f with more than 3 labels\" %\n ((data[all_emotions_neutral].sum(axis=1) > 3).sum() /\n len(data))) # more than 3 labels\n\n print(\"Label distributions:\")\n print((data[all_emotions_neutral].sum(axis=0).sort_values(ascending=False) /\n len(data) * 100).round(2))\n\n print(\"Plotting label correlations...\")\n ratings = data.groupby(\"id\")[all_emotions].mean()\n\n # Compute the correlation matrix\n corr = ratings.corr()\n\n # Generate a mask for the upper triangle\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n\n # Set up the matplotlib figure\n fig, _ = plt.subplots(figsize=(11, 9))\n\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n\n # Draw the heatmap with the mask and correct aspect ratio\n sns.heatmap(\n corr,\n mask=mask,\n cmap=cmap,\n vmax=.3,\n center=0,\n square=True,\n linewidths=.5,\n cbar_kws={\"shrink\": .5})\n fig.savefig(\n FLAGS.plot_dir + \"/correlations.pdf\",\n dpi=500,\n format=\"pdf\",\n bbox_inches=\"tight\")\n\n print(\"Plotting hierarchical relations...\")\n z = linkage(\n pdist(ratings.T, metric=\"correlation\"),\n method=\"ward\",\n optimal_ordering=True)\n fig = plt.figure(figsize=(11, 4), dpi=400)\n plt.xlabel(\"\")\n plt.ylabel(\"\")\n dendrogram(\n z,\n labels=ratings.columns,\n leaf_rotation=90., # rotates the x axis labels\n leaf_font_size=12, # font size for the x axis labels\n color_threshold=1.05,\n )\n fig.savefig(\n FLAGS.plot_dir + \"/hierarchical_clustering.pdf\",\n dpi=600,\n format=\"pdf\",\n bbox_inches=\"tight\")\n\n sent_color_map = {\n \"positive\": \"#BEECAF\",\n \"negative\": \"#94bff5\",\n \"ambiguous\": \"#FFFC9E\"\n }\n with open(FLAGS.sentiment_dict) as f:\n sent_dict = json.loads(f.read())\n sent_colors = {}\n for e in all_emotions:\n if e in sent_dict[\"positive\"]:\n sent_colors[e] = sent_color_map[\"positive\"]\n elif e in sent_dict[\"negative\"]:\n sent_colors[e] = sent_color_map[\"negative\"]\n else:\n sent_colors[e] = sent_color_map[\"ambiguous\"]\n\n # Generate a mask for the upper triangle\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.diag_indices(mask.shape[0])] = True\n\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n\n row_colors = pd.Series(\n corr.columns, index=corr.columns, name=\"sentiment\").map(sent_colors)\n\n # Draw the heatmap with the mask and correct aspect ratio\n g = sns.clustermap(\n corr,\n mask=mask,\n cmap=cmap,\n vmax=.3,\n vmin=-0.3,\n center=0,\n row_linkage=z,\n col_linkage=z,\n col_colors=row_colors,\n linewidths=.1,\n cbar_kws={\n \"ticks\": [-.3, -.15, 0, .15, .3],\n \"use_gridspec\": False,\n \"orientation\": \"horizontal\",\n },\n figsize=(10, 10))\n\n g.ax_row_dendrogram.set_visible(False)\n g.cax.set_position([.34, -0.05, .5, .03])\n\n for label in sent_color_map:\n g.ax_col_dendrogram.bar(\n 0, 0, color=sent_color_map[label], label=label, linewidth=0)\n\n g.ax_col_dendrogram.legend(\n title=\"Sentiment\", loc=\"center\", bbox_to_anchor=(1.1, .5))\n\n g.savefig(FLAGS.plot_dir + \"/hierarchical_corr.pdf\", dpi=600, format=\"pdf\")\n\n print(\"Calculating agreements...\")\n unique_labels = data.groupby(\"id\").apply(CheckAgreement, 1,\n all_emotions_neutral).to_dict()\n data[\"unique_labels\"] = data[\"id\"].map(unique_labels)\n agree_dict_2 = data.groupby(\"id\").apply(CheckAgreement, 2,\n all_emotions_neutral).to_dict()\n data[\"agree_2\"] = data[\"id\"].map(agree_dict_2)\n agree_dict = data.groupby(\"id\").apply(CheckAgreement, 3,\n all_emotions_neutral).to_dict()\n data[\"agree_3\"] = data[\"id\"].map(agree_dict)\n agree_dict = data.groupby(\"id\").apply(CheckAgreement, 1, all_emotions_neutral,\n 1).to_dict()\n data[\"no_agree\"] = data[\"id\"].map(agree_dict)\n\n filtered_2 = data[data[\"agree_2\"].str.len() > 0]\n print(\n \"%d (%d%%) of the examples have 2+ raters agreeing on at least one emotion label\"\n % (len(filtered_2[\"id\"].unique()), (len(filtered_2) / len(data) * 100)))\n\n filtered_3 = data[data[\"agree_3\"].str.len() > 0]\n print(\n \"%d (%d%%) of the examples have 3+ raters agreeing on at least one emotion label\"\n % (len(filtered_3[\"id\"].unique()), (len(filtered_3) / len(data) * 100)))\n\n print(\"Plotting number of labels...\")\n data[\"num_unique_prefilter\"] = data[\"unique_labels\"].apply(CountLabels)\n data[\"num_unique_postfilter\"] = data[\"agree_2\"].apply(CountLabels)\n unique_ex = data.drop_duplicates(\"id\")\n df = pd.DataFrame({\n \"count\":\n unique_ex[\"num_unique_prefilter\"].tolist() +\n unique_ex[\"num_unique_postfilter\"].tolist(),\n \"type\": [\"pre-filter\"] * len(unique_ex) + [\"post-filter\"] * len(unique_ex)\n })\n\n fig = plt.figure(dpi=600)\n ax = sns.countplot(\n data=df, x=\"count\", hue=\"type\", palette=[\"skyblue\", \"navy\"])\n plt.xlim(-.5, 7.5)\n plt.legend(loc=\"center right\", fontsize=\"x-large\")\n plt.ylabel(\"Number of Examples\", fontsize=\"x-large\")\n plt.xlabel(\"Number of Labels\", fontsize=\"x-large\")\n plt.draw()\n labels = [item.get_text() for item in ax.get_yticklabels()]\n ax.set_yticklabels([\"%dk\" % (int(int(label) / 1000)) for label in labels])\n plt.tight_layout()\n\n fig.savefig(\n FLAGS.plot_dir + \"/number_of_labels.pdf\",\n dpi=600,\n format=\"pdf\",\n bbox_inches=\"tight\")\n\n print(\"Proportion of agreement per label:\")\n print(\n filtered_2[all_emotions_neutral].sum(axis=0).sort_values(ascending=False)\n / len(data))\n\n\nif __name__ == \"__main__\":\n app.run(main)\n"
] | [
[
"tensorflow.reduce_logsumexp",
"tensorflow.keras.optimizers.Adam",
"tensorflow.math.log",
"tensorflow.minimum",
"tensorflow.zeros",
"numpy.zeros",
"tensorflow.gather_nd",
"tensorflow.shape",
"tensorflow.eye",
"tensorflow.reduce_mean",
"tensorflow.expand_dims",
"tensorflow.exp",
"tensorflow.GradientTape",
"tensorflow.cast",
"tensorflow.argmax",
"tensorflow.constant"
],
[
"numpy.zeros",
"numpy.maximum",
"numpy.array",
"numpy.random.randint",
"numpy.mean",
"numpy.minimum"
],
[
"numpy.zeros_like",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.draw",
"scipy.cluster.hierarchy.dendrogram",
"numpy.triu_indices_from",
"pandas.Series",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"pandas.concat",
"numpy.diag_indices",
"matplotlib.pyplot.xlabel"
]
] |
bburan/cochlear | [
"1e7ea32730a794b9f6936440a32e4a82c4bf73e7"
] | [
"cochlear/noise_exposure.py"
] | [
"from __future__ import division\n\nimport logging\nlog = logging.getLogger(__name__)\n\nimport numpy as np\nfrom scipy import signal\n\nfrom traits.api import Instance, Float, Property, Int\nfrom traitsui.api import (View, Item, ToolBar, Action, ActionGroup, VGroup,\n HSplit, MenuBar, Menu, HGroup)\nfrom chaco.api import Plot, ArrayPlotData\nfrom enable.api import Component, ComponentEditor\nfrom pyface.api import ImageResource\n\nfrom experiment import (AbstractParadigm, Expression, AbstractData,\n AbstractController, AbstractExperiment, icon_dir)\nfrom experiment.channel import FileChannel\nfrom experiment.coroutine import blocked, rms\n\nfrom neurogen.block_definitions import (BandlimitedNoise, Cos2Envelope)\nfrom neurogen.calibration import InterpCalibration\nfrom neurogen.calibration.util import (psd, psd_freq, tone_power_conv_nf)\nfrom neurogen.util import db, dbtopa\n\nfrom cochlear.nidaqmx import (DAQmxDefaults, DAQmxChannel,\n ContinuousDAQmxPlayer, DAQmxAttenControl,\n ContinuousDAQmxSource)\n\nDAC_FS = 100e3\nADC_FS = 100e3\n\nclass NoiseExposureData(AbstractData):\n\n noise_channel = Instance('experiment.channel.Channel')\n\n def _noise_channel_default(self):\n return FileChannel(node=self.store_node, name='mic_input',\n expected_duration=60*60*2, dtype=np.float32)\n\n\nclass NoiseExposureParadigm(AbstractParadigm):\n\n kw = dict(context=True, log=True)\n center_frequency = \\\n Expression(6e3, label='Center frequency (Hz)', dtype=np.float, **kw)\n bandwidth = Expression(4e3, label='Bandwidth (Hz)', dtype=np.float, **kw)\n rs = Expression(85, label='Min. atten. in stop band (dB)',\n dtype=np.float, **kw)\n rp = Expression(0.3, label='Max. ripple in pass band (dB)',\n dtype=np.float, **kw)\n order = Expression(7, label='Filter order', dtype=np.float, **kw)\n\n level = Expression(100, label='Level (dB SPL)', dtype=np.float, **kw)\n seed = Expression(1, label='Noise seed', dtype=np.int, **kw)\n duration = Expression(60, label='Exposure duration (sec)',\n dtype=np.float, **kw)\n rise_time = Expression(0, label='Noise rise time (sec)',\n dtype=np.float, **kw)\n\n mic_sens = Float(2.7, label='Mic. sens. (mV/Pa)', dtype=np.float, **kw)\n mic_sens_dbv = Property(depends_on='mic_sens', dtype=np.float,\n label='Mic. sens. dB(V/Pa)', **kw)\n speaker_sens = Float(86.89, label='Speaker sens. (mV/Pa)', dtype=np.float,\n **kw)\n speaker_sens_dbv = Property(depends_on='speaker_sens', dtype=np.float,\n label='Speaker sens. dB(V/Pa)', **kw)\n\n def _get_mic_sens_dbv(self):\n return db(self.mic_sens*1e-3)\n\n def _get_speaker_sens_dbv(self):\n return db(self.speaker_sens*1e-3)\n\n traits_view = View(\n VGroup(\n VGroup(\n VGroup(\n 'center_frequency',\n 'bandwidth',\n 'rp',\n 'rs',\n 'order',\n label='Filter settings',\n show_border=True\n ),\n 'level',\n 'seed',\n 'duration',\n 'rise_time',\n label='Stimulus',\n show_border=True\n ),\n HGroup(\n VGroup('mic_sens', 'speaker_sens'),\n VGroup('mic_sens_dbv', 'speaker_sens_dbv', style='readonly'),\n label='Hardware settings',\n show_border=True\n ),\n )\n )\n\n\nclass NoiseExposureController(AbstractController, DAQmxDefaults):\n\n mic_cal = Instance('neurogen.calibration.InterpCalibration')\n poll_rate = 1\n\n def setup_experiment(self, info=None):\n # Set up the speaker output\n token = BandlimitedNoise(name='noise') >> Cos2Envelope(name='envelope')\n channel = DAQmxChannel(calibration=InterpCalibration.as_attenuation(),\n token=token, voltage_min=-10, voltage_max=10)\n iface_dac = ContinuousDAQmxPlayer(fs=DAC_FS, done_callback=self.stop)\n iface_dac.add_channel(channel, name='primary')\n\n # Set up the mic input\n adc_pipeline = blocked(int(ADC_FS*self.poll_rate), -1, self)\n iface_adc = ContinuousDAQmxSource(fs=ADC_FS, pipeline=adc_pipeline,\n callback_samples=25e3,\n input_line='/Dev1/ai1')\n\n # Save the results\n self.channel = channel\n self.iface_adc = iface_adc\n self.iface_dac = iface_dac\n self.token = token\n super(NoiseExposureController, self).setup_experiment(info)\n\n def send(self, data):\n self.model.update_plots(ADC_FS, data)\n self.model.data.noise_channel.send(data)\n\n def start_experiment(self, info=None):\n self.refresh_context(evaluate=True)\n self.iface_adc.start()\n self.iface_dac.play_continuous()\n self.log_trial()\n\n def stop_experiment(self, info=None):\n self.iface_adc.stop()\n self.iface_dac.stop()\n\n def set_duration(self, value):\n self.iface_dac.set_value('primary.envelope.duration', value)\n self.iface_dac.duration = value\n self.model.overall_rms_plot.index_range.high_setting = value\n\n def set_ramp_duration(self, value):\n self.iface_dac.set_value('primary.envelope.rise_time', value)\n self.iface_dac.duration = value\n\n def set_center_frequency(self, value):\n self.iface_dac.set_value('primary.noise.fc', value)\n\n def set_bandwidth(self, value):\n self.iface_dac.set_value('primary.noise.bandwidth', value)\n\n def set_level(self, value):\n self.iface_dac.set_value('primary.noise.level', value)\n\n def set_seed(self, value):\n self.iface_dac.set_value('primary.noise.seed', value)\n\n def set_rise_time(self, value):\n self.iface_dac.set_value('primary.envelope.rise_time', value)\n\n def set_order(self, value):\n self.iface_dac.set_value('primary.noise.order', value)\n\n def set_rs(self, value):\n self.iface_dac.set_value('primary.noise.rs', value)\n\n def set_rp(self, value):\n self.iface_dac.set_value('primary.noise.rp', value)\n\n def set_speaker_sens_dbv(self, value):\n self.channel.calibration = InterpCalibration([0, 100e3], [value, value])\n\n def set_mic_sens(self, value):\n level = self.get_current_value('level')\n max_value = dbtopa(level)*value*1e-3\n max_value_decade = 10**np.ceil(np.log10(max_value*2))*10\n self.iface_adc.expected_range = max_value_decade\n\n\nclass NoiseExposureExperiment(AbstractExperiment):\n\n paradigm = Instance(NoiseExposureParadigm, ())\n data = Instance(AbstractData, ())\n\n rms_data = Instance(ArrayPlotData)\n recent_rms_plot = Instance(Component)\n overall_rms_plot = Instance(Component)\n fft_plot = Instance(Component)\n\n current_time = Float(0)\n current_update = Int(0)\n\n current_spl = Float(np.nan, label='Current inst. output (dB SPL)')\n current_spl_average = Float(np.nan, label='Average of last min. (dB SPL)')\n overall_spl_average = Float(np.nan, label='Average output (dB SPL)')\n\n _coefs = None\n _zf = None\n\n def update_plots(self, fs, data):\n self.current_update += 1\n data = signal.detrend(data.ravel())\n\n # Plot RMS\n if self._coefs is None:\n self._coefs = signal.iirfilter(2, (400.0/(fs/2), 40e3/(fs/2)))\n b, a = self._coefs\n self._zf = signal.lfiltic(b, a, data[:len(a)-1], data[:len(b)-1])\n b, a = self._coefs\n\n data, self._zf = signal.lfilter(b, a, data, zi=self._zf)\n rms = np.mean(data**2)**0.5\n db_rms = db(rms)-self.paradigm.mic_sens_dbv-db(20e-6)\n self.append_data(time=self.current_time, rms=db_rms)\n self.current_time += len(data)/fs\n\n self.current_spl = db_rms\n self.current_spl_average = self.rms_data.get_data('rms')[-60:].mean()\n self.overall_spl_average = self.rms_data.get_data('rms').mean()\n\n w_frequency = psd_freq(data, fs)\n w_psd = psd(data, fs, 'hamming')\n w_psd_db = db(w_psd)-self.paradigm.mic_sens_dbv-db(20e-6)\n self.rms_data.update_data(frequency=w_frequency, psd=w_psd_db)\n\n def _rms_data_default(self):\n return ArrayPlotData(time=[], rms=[], frequency=[], psd=[])\n\n def append_data(self, **kwargs):\n for k, v in kwargs.items():\n kwargs[k] = np.append(self.rms_data.get_data(k), v)\n self.rms_data.update_data(**kwargs)\n\n def _overall_rms_plot_default(self):\n plot = Plot(self.rms_data)\n plot.index_range.low_setting = 0\n plot.plot(('time', 'rms'))\n return plot\n\n def _recent_rms_plot_default(self):\n plot = Plot(self.rms_data)\n plot.index_range.high_setting = 'auto'\n plot.index_range.low_setting = 'track'\n plot.index_range.tracking_amount = 30\n plot.value_range.high_setting = 'auto'\n plot.value_range.low_setting = 'track'\n plot.value_range.tracking_amount = 5\n plot.plot(('time', 'rms'))\n return plot\n\n def _fft_plot_default(self):\n plot = Plot(self.rms_data)\n plot.index_range.low_setting = 1e3\n plot.index_range.high_setting = 20e3\n plot.value_range.low_setting = 10\n plot.value_range.high_setting = 80\n plot.plot(('frequency', 'psd'))\n plot.index_scale = 'log'\n return plot\n\n traits_view = View(\n HSplit(\n VGroup(\n VGroup(\n Item('paradigm', style='custom', show_label=False,\n width=200),\n show_border=True,\n label='Settings',\n enabled_when=\"handler.state!='running'\",\n ),\n VGroup(\n 'current_spl',\n 'current_spl_average',\n 'overall_spl_average',\n style='readonly',\n show_border=True,\n label='Output',\n ),\n ),\n VGroup(\n HGroup(\n Item('overall_rms_plot',\n editor=ComponentEditor(width=200, height=200)),\n Item('recent_rms_plot',\n editor=ComponentEditor(width=200, height=200)),\n show_labels=False,\n ),\n Item('fft_plot', show_label=False,\n editor=ComponentEditor(width=200, height=200)),\n ),\n show_labels=False,\n ),\n resizable=True,\n toolbar=ToolBar(\n Action(name='Start', action='start',\n image=ImageResource('1rightarrow', icon_dir),\n enabled_when='handler.state==\"uninitialized\"'),\n Action(name='Stop', action='stop',\n image=ImageResource('stop', icon_dir),\n enabled_when='handler.state==\"running\"'),\n ),\n width=0.5,\n height=0.5,\n id='lbhb.NoiseExposureExperiment',\n )\n\n\ndef configure_logging(filename):\n time_format = '[%(asctime)s] :: %(name)s - %(levelname)s - %(message)s'\n simple_format = '%(name)s - %(message)s'\n\n logging_config = {\n 'version': 1,\n 'formatters': {\n 'time': {'format': time_format},\n 'simple': {'format': simple_format},\n },\n 'handlers': {\n # This is what gets printed out to the console\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n 'level': 'DEBUG',\n },\n # This is what gets saved to the file\n 'file': {\n 'class': 'logging.FileHandler',\n 'formatter': 'time',\n 'filename': filename,\n 'level': 'DEBUG',\n }\n },\n 'loggers': {\n '__main__': {'level': 'ERROR'},\n 'cochlear': {'level': 'ERROR'},\n 'cochlear.nidaqmx': {'level': 'ERROR'},\n 'neurogen.block_definitions': {'level': 'DEBUG'},\n },\n 'root': {\n 'handlers': ['console', 'file'],\n },\n }\n logging.config.dictConfig(logging_config)\n\n\nif __name__ == '__main__':\n import logging.config\n import PyDAQmx as pyni\n import warnings\n import tables\n pyni.DAQmxResetDevice('Dev1')\n configure_logging('temp.log')\n log.debug('====================== MAIN =======================')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n with tables.open_file('temp.hdf5', 'w') as fh:\n data = NoiseExposureData(store_node=fh.root)\n controller = NoiseExposureController()\n NoiseExposureExperiment(data=data) \\\n .configure_traits(handler=controller)\n"
] | [
[
"scipy.signal.lfilter",
"numpy.log10",
"scipy.signal.iirfilter",
"numpy.mean"
]
] |
visinf/deblur-devil | [
"53cc4c72a4ddb9dcede5ee52dc53000c39ff5dab",
"53cc4c72a4ddb9dcede5ee52dc53000c39ff5dab"
] | [
"contrib/cmap.py",
"losses/classification_losses.py"
] | [
"# Author: Jochen Gast <[email protected]>\n\nimport numpy as np\nimport torch\nfrom matplotlib import cm\nfrom torch import nn\n\n# ----------------------------------------------------------------------------------------\n# See https://matplotlib.org/examples/color/colormaps_reference.html\n#\n# Typical choices are: 'gray', jet', 'viridis', 'hot'\n# ----------------------------------------------------------------------------------------\n\nCOLORMAPS = [\n\n # Perceptually Uniform Sequential\n 'viridis', 'plasma', 'inferno', 'magma',\n\n # Sequential\n 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',\n 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn',\n\n # Sequential (2)\n 'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',\n 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',\n 'hot', 'afmhot', 'gist_heat', 'copper',\n\n # Diverging\n 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',\n 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic',\n\n # Qualitative,\n 'Pastel1', 'Pastel2', 'Paired', 'Accent',\n 'Dark2', 'Set1', 'Set2', 'Set3',\n 'tab10', 'tab20', 'tab20b', 'tab20c',\n\n # Miscellaneous\n 'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',\n 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',\n 'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'\n\n]\n\n\nclass ColorMap(nn.Module):\n #\n # Note: uint8 inputs are never normalized.\n # float inputs are normalized if normalize_floats=True\n #\n def __init__(self, cmap='jet', normalize_floats=True, output_dtype=torch.uint8):\n super().__init__()\n if cmap not in COLORMAPS:\n raise ValueError('Unknown colormap!')\n self.normalize_floats = normalize_floats\n self.cmap = torch.from_numpy(self.get_cmap_as_float_array(cmap)).view(-1, 3)\n if output_dtype == torch.uint8:\n self.cmap = (255 * self.cmap).byte()\n\n @staticmethod\n def get_cmap_as_float_array(cmap_name):\n raw_cmap = cm.get_cmap(cmap_name, 256)\n cmap_array = raw_cmap(np.arange(256))[:, 0:3] # remove alpha channels\n return cmap_array\n\n @staticmethod\n def min2d(tensor):\n b, c, h, w = tensor.size()\n return tensor.view(b, c, h * w).min(dim=2, keepdim=True)[0].unsqueeze(dim=3)\n\n @staticmethod\n def max2d(tensor):\n b, c, h, w = tensor.size()\n return tensor.view(b, c, h * w).max(dim=2, keepdim=True)[0].unsqueeze(dim=3)\n\n def forward(self, value):\n b, c, h, w = value.size()\n assert c == 1, 'ColorMap expects second dimension of size 1L'\n if not isinstance(value, torch.ByteTensor):\n if self.normalize_floats:\n cmin = self.min2d(value)\n cmax = self.max2d(value)\n normalized = (value - cmin) / torch.max(cmax - cmin, torch.ones_like(value) * 1e-5)\n normalized = (normalized * 255).long()\n else:\n normalized = (value * 255).long()\n else:\n normalized = value.long()\n self.cmap = self.cmap.to(value.device)\n z = torch.index_select(self.cmap, dim=0, index=normalized.view(-1))\n return z.transpose(0, 1).contiguous().view(b, 3, h, w)\n",
"# Author: Jochen Gast <[email protected]>\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nfrom losses import factory\r\n\r\n\r\nclass ClassificationLoss(nn.Module):\r\n def __init__(self, args, topk=(1, 2, 3), reduction='mean'):\r\n super().__init__()\r\n self.args = args\r\n self.cross_entropy = torch.nn.CrossEntropyLoss(reduction=reduction)\r\n self.topk = topk\r\n\r\n @staticmethod\r\n def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1))\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res\r\n\r\n def forward(self, output_dict, target_dict):\r\n output = output_dict[\"output1\"]\r\n target = target_dict[\"target1\"]\r\n # compute actual losses\r\n cross_entropy = self.cross_entropy(output, target)\r\n # create dictonary for losses\r\n loss_dict = {\r\n \"xe\": cross_entropy,\r\n }\r\n acc_k = ClassificationLoss.accuracy(output, target, topk=self.topk)\r\n for acc, k in zip(acc_k, self.topk):\r\n loss_dict[\"top%i\" % k] = acc\r\n return loss_dict\r\n\r\n\r\nfactory.register(\"ClassificationLoss\", ClassificationLoss)\r\n"
] | [
[
"numpy.arange",
"matplotlib.cm.get_cmap",
"torch.ones_like"
],
[
"torch.nn.CrossEntropyLoss"
]
] |
alshedivat/federated | [
"fe9f44a504bc51b603a3ab9a181148da0aa9612f",
"fe9f44a504bc51b603a3ab9a181148da0aa9612f"
] | [
"optimization/main/federated_trainer.py",
"gans/experiments/emnist/preprocessing/filtered_emnist_data_utils.py"
] | [
"# Copyright 2020, Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Runs federated training on various tasks using a generalized form of FedAvg.\n\nSpecifically, we create (according to flags) an iterative processes that allows\nfor client and server learning rate schedules, as well as various client and\nserver optimization methods. For more details on the learning rate scheduling\nand optimization methods, see `shared/optimizer_utils.py`. For details on the\niterative process, see `shared/fed_avg_schedule.py`.\n\"\"\"\n\nimport collections\nimport os.path\nfrom typing import Callable\n\nfrom absl import app\nfrom absl import flags\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nfrom optimization.cifar100 import federated_cifar100\nfrom optimization.emnist import federated_emnist\nfrom optimization.emnist_ae import federated_emnist_ae\nfrom optimization.shakespeare import federated_shakespeare\nfrom optimization.shared import fed_avg_schedule\nfrom optimization.shared import optimizer_utils\nfrom optimization.shared import training_specs\nfrom optimization.stackoverflow import federated_stackoverflow\nfrom optimization.stackoverflow_lr import federated_stackoverflow_lr\nfrom utils import training_loop\nfrom utils import utils_impl\n\n_SUPPORTED_TASKS = [\n 'cifar100', 'emnist_cr', 'emnist_ae', 'shakespeare', 'stackoverflow_nwp',\n 'stackoverflow_lr'\n]\n\nwith utils_impl.record_hparam_flags() as optimizer_flags:\n # Defining optimizer flags\n optimizer_utils.define_optimizer_flags('client')\n optimizer_utils.define_optimizer_flags('server')\n optimizer_utils.define_lr_schedule_flags('client')\n optimizer_utils.define_lr_schedule_flags('server')\n\nwith utils_impl.record_hparam_flags() as shared_flags:\n # Federated training hyperparameters\n flags.DEFINE_integer('client_epochs_per_round', 1,\n 'Number of epochs in the client to take per round.')\n flags.DEFINE_integer('client_batch_size', 20, 'Batch size on the clients.')\n flags.DEFINE_integer('clients_per_round', 10,\n 'How many clients to sample per round.')\n flags.DEFINE_integer('client_datasets_random_seed', 1,\n 'Random seed for client sampling.')\n\n # Training loop configuration\n flags.DEFINE_string(\n 'experiment_name', None, 'The name of this experiment. Will be append to '\n '--root_output_dir to separate experiment results.')\n flags.mark_flag_as_required('experiment_name')\n flags.DEFINE_string('root_output_dir', '/tmp/fed_opt/',\n 'Root directory for writing experiment output.')\n flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')\n flags.DEFINE_integer(\n 'rounds_per_eval', 1,\n 'How often to evaluate the global model on the validation dataset.')\n flags.DEFINE_integer('rounds_per_checkpoint', 50,\n 'How often to checkpoint the global model.')\n\nwith utils_impl.record_hparam_flags() as task_flags:\n # Task specification\n flags.DEFINE_enum('task', None, _SUPPORTED_TASKS,\n 'Which task to perform federated training on.')\n\nwith utils_impl.record_hparam_flags() as cifar100_flags:\n # CIFAR-100 flags\n flags.DEFINE_integer('cifar100_crop_size', 24, 'The height and width of '\n 'images after preprocessing.')\n flags.DEFINE_bool(\n 'cifar100_distort_train_images', True, 'If set to True, '\n 'train images will be randomly cropped. Otherwise, all '\n 'images will simply be resized.')\n\nwith utils_impl.record_hparam_flags() as emnist_cr_flags:\n # EMNIST CR flags\n flags.DEFINE_enum(\n 'emnist_cr_model', 'cnn', ['cnn', '2nn'], 'Which model to '\n 'use. This can be a convolutional model (cnn) or a two '\n 'hidden-layer densely connected network (2nn).')\n\nwith utils_impl.record_hparam_flags() as shakespeare_flags:\n # Shakespeare flags\n flags.DEFINE_integer(\n 'shakespeare_sequence_length', 80,\n 'Length of character sequences to use for the RNN model.')\n\nwith utils_impl.record_hparam_flags() as so_nwp_flags:\n # Stack Overflow NWP flags\n flags.DEFINE_integer('so_nwp_vocab_size', 10000, 'Size of vocab to use.')\n flags.DEFINE_integer('so_nwp_num_oov_buckets', 1,\n 'Number of out of vocabulary buckets.')\n flags.DEFINE_integer('so_nwp_sequence_length', 20,\n 'Max sequence length to use.')\n flags.DEFINE_integer('so_nwp_max_elements_per_user', 1000, 'Max number of '\n 'training sentences to use per user.')\n flags.DEFINE_integer(\n 'so_nwp_num_validation_examples', 10000, 'Number of examples '\n 'to use from test set for per-round validation.')\n\nwith utils_impl.record_hparam_flags() as so_lr_flags:\n # Stack Overflow LR flags\n flags.DEFINE_integer('so_lr_vocab_tokens_size', 10000,\n 'Vocab tokens size used.')\n flags.DEFINE_integer('so_lr_vocab_tags_size', 500, 'Vocab tags size used.')\n flags.DEFINE_integer(\n 'so_lr_num_validation_examples', 10000, 'Number of examples '\n 'to use from test set for per-round validation.')\n flags.DEFINE_integer('so_lr_max_elements_per_user', 1000,\n 'Max number of training '\n 'sentences to use per user.')\n\nFLAGS = flags.FLAGS\n\nTASK_FLAGS = collections.OrderedDict(\n cifar100=cifar100_flags,\n emnist_cr=emnist_cr_flags,\n shakespeare=shakespeare_flags,\n stackoverflow_nwp=so_nwp_flags,\n stackoverflow_lr=so_lr_flags)\n\n\ndef _write_hparam_flags():\n \"\"\"Creates an ordered dictionary of hyperparameter flags and writes to CSV.\"\"\"\n hparam_dict = utils_impl.lookup_flag_values(shared_flags)\n\n # Update with optimizer flags corresponding to the chosen optimizers.\n opt_flag_dict = utils_impl.lookup_flag_values(optimizer_flags)\n opt_flag_dict = optimizer_utils.remove_unused_flags('client', opt_flag_dict)\n opt_flag_dict = optimizer_utils.remove_unused_flags('server', opt_flag_dict)\n hparam_dict.update(opt_flag_dict)\n\n # Update with task-specific flags.\n task_name = FLAGS.task\n if task_name in TASK_FLAGS:\n task_hparam_dict = utils_impl.lookup_flag_values(TASK_FLAGS[task_name])\n hparam_dict.update(task_hparam_dict)\n\n results_dir = os.path.join(FLAGS.root_output_dir, 'results',\n FLAGS.experiment_name)\n utils_impl.create_directory_if_not_exists(results_dir)\n hparam_file = os.path.join(results_dir, 'hparams.csv')\n utils_impl.atomic_write_series_to_csv(hparam_dict, hparam_file)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Expected no command-line arguments, '\n 'got: {}'.format(argv))\n\n client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('client')\n server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('server')\n\n client_lr_schedule = optimizer_utils.create_lr_schedule_from_flags('client')\n server_lr_schedule = optimizer_utils.create_lr_schedule_from_flags('server')\n\n def iterative_process_builder(\n model_fn: Callable[[],\n tff.learning.Model]) -> tff.templates.IterativeProcess:\n \"\"\"Creates an iterative process using a given TFF `model_fn`.\n\n Args:\n model_fn: A no-arg function returning a `tff.learning.Model`.\n\n Returns:\n A `tff.templates.IterativeProcess`.\n \"\"\"\n if FLAGS.task == 'shakespeare' or FLAGS.task == 'stackoverflow_nwp':\n\n def client_weight_fn(local_outputs):\n return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)\n else:\n client_weight_fn = None\n\n return fed_avg_schedule.build_fed_avg_process(\n model_fn=model_fn,\n client_optimizer_fn=client_optimizer_fn,\n client_lr=client_lr_schedule,\n server_optimizer_fn=server_optimizer_fn,\n server_lr=server_lr_schedule,\n client_weight_fn=client_weight_fn)\n\n task_spec = training_specs.TaskSpec(\n iterative_process_builder=iterative_process_builder,\n client_epochs_per_round=FLAGS.client_epochs_per_round,\n client_batch_size=FLAGS.client_batch_size,\n clients_per_round=FLAGS.clients_per_round,\n client_datasets_random_seed=FLAGS.client_datasets_random_seed)\n\n if FLAGS.task == 'cifar100':\n runner_spec = federated_cifar100.configure_training(\n task_spec,\n crop_size=FLAGS.cifar100_crop_size,\n distort_train_images=FLAGS.cifar100_distort_train_images)\n elif FLAGS.task == 'emnist_cr':\n runner_spec = federated_emnist.configure_training(\n task_spec, model=FLAGS.emnist_cr_model)\n elif FLAGS.task == 'emnist_ae':\n runner_spec = federated_emnist_ae.configure_training(task_spec)\n elif FLAGS.task == 'shakespeare':\n runner_spec = federated_shakespeare.configure_training(\n task_spec, sequence_length=FLAGS.shakespeare_sequence_length)\n elif FLAGS.task == 'stackoverflow_nwp':\n runner_spec = federated_stackoverflow.configure_training(\n task_spec,\n vocab_size=FLAGS.so_nwp_vocab_size,\n num_oov_buckets=FLAGS.so_nwp_num_oov_buckets,\n sequence_length=FLAGS.so_nwp_sequence_length,\n max_elements_per_user=FLAGS.so_nwp_max_elements_per_user,\n num_validation_examples=FLAGS.so_nwp_num_validation_examples)\n elif FLAGS.task == 'stackoverflow_lr':\n runner_spec = federated_stackoverflow_lr.configure_training(\n task_spec,\n vocab_tokens_size=FLAGS.so_lr_vocab_tokens_size,\n vocab_tags_size=FLAGS.so_lr_vocab_tags_size,\n max_elements_per_user=FLAGS.so_lr_max_elements_per_user,\n num_validation_examples=FLAGS.so_lr_num_validation_examples)\n else:\n raise ValueError(\n '--task flag {} is not supported, must be one of {}.'.format(\n FLAGS.task, _SUPPORTED_TASKS))\n\n _write_hparam_flags()\n\n training_loop.run(\n iterative_process=runner_spec.iterative_process,\n client_datasets_fn=runner_spec.client_datasets_fn,\n validation_fn=runner_spec.validation_fn,\n test_fn=runner_spec.test_fn,\n total_rounds=FLAGS.total_rounds,\n experiment_name=FLAGS.experiment_name,\n root_output_dir=FLAGS.root_output_dir,\n rounds_per_eval=FLAGS.rounds_per_eval,\n rounds_per_checkpoint=FLAGS.rounds_per_checkpoint)\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# Copyright 2019, Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility for filtering (via class. accuracy) the Federated EMNIST dataset.\"\"\"\n\nimport csv\nimport functools\nimport os.path\n\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nfrom gans.experiments.emnist import emnist_data_utils\n\nBASE_URL = 'https://storage.googleapis.com/tff-experiments-public/'\nCSVS_BASE_PATH = 'gans/csvs/'\n\n\[email protected]_cache(maxsize=1)\ndef get_unfiltered_client_data_for_training(batch_size):\n r\"\"\"Returns `tff.simulation.datasets.ClientData` of unfiltered Federated EMNIST data.\n\n The data returned will neither be filtered by user nor by example, so training\n can take place with all users and all examples for each user.\n\n Args:\n batch_size: Batch size of output dataset. If None, don't batch.\n\n Returns:\n A tff.simulation.datasets.ClientData` of real images of numbers/letters. The\n data has\n not been filtered.\n \"\"\"\n return get_filtered_client_data_for_training(None, None, batch_size)\n\n\[email protected]_cache(maxsize=1)\ndef get_filtered_by_user_client_data_for_training(invert_imagery_probability,\n accuracy_threshold,\n batch_size,\n cache_dir=None):\n r\"\"\"Returns `tff.simulation.datasets.ClientData` of filtered Federated EMNIST data.\n\n Input data gets filtered on a per-user basis; users get selected via the\n `accuracy_threshold` criterion, and then training can take place with all\n examples from only the selected users.\n\n Args:\n invert_imagery_probability: The probability that a user\\'s image data has\n pixel intensity inverted. E.g., `0p1` corresponds to 0.1, or a 10%\n probability that a user\\'s data is flipped. Note that to save time in\n experiment execution, this is precomputed via the ./filter_users.py\n script, and the selection here controls which file to read from.\n accuracy_threshold: Indicates the classification threshold by which a user\n is included in the training population. E.g., `lt0p882` means any user\n who\\'s data cumulatively classifies with <0.882 accuracy would be used for\n training; `gt0p939` means any user who\\'s data cumulatively classifies\n with >0.939 accuracy would be used for training. To save time in\n experiment execution, this assignment is precomputed via the\n ./filter_users.py script, and the flag selection here is to indicate which\n file to read from.\n batch_size: Batch size of output dataset. If None, don't batch.\n cache_dir: (Optional) base directory to cache the downloaded files. If None,\n caches in Keras' default cache directory.\n\n Returns:\n A tff.simulation.datasets.ClientData` of real images of numbers/letters. The\n data has\n been filtered by user classification accuracy as per the input arguments.\n \"\"\"\n path_to_data = os.path.join(CSVS_BASE_PATH,\n 'inv_prob_{}'.format(invert_imagery_probability),\n 'filter_by_user',\n 'acc_{}'.format(accuracy_threshold))\n\n try:\n filename = 'client_ids.csv'\n path_to_read_inversions_csv = tf.keras.utils.get_file(\n fname=filename,\n cache_subdir=path_to_data,\n cache_dir=cache_dir,\n origin=os.path.join(BASE_URL, path_to_data, filename))\n except Exception:\n msg = ('A URL fetch failure was encountered when trying to retrieve '\n 'filter-by-user generated csv file with invert_imagery_probability '\n '`{}` and accuracy_threshold `{}`. Please run the ./filter_users.py '\n 'script to generate the missing data, and use the `cache_dir` '\n 'argument to this method to specify the location of the generated '\n 'data csv file.'.format(invert_imagery_probability,\n accuracy_threshold))\n raise ValueError(msg)\n\n return get_filtered_client_data_for_training(path_to_read_inversions_csv,\n None, batch_size)\n\n\[email protected]_cache(maxsize=1)\ndef get_filtered_by_example_client_data_for_training(invert_imagery_probability,\n min_num_examples,\n example_class_selection,\n batch_size,\n cache_dir=None):\n r\"\"\"Returns `tff.simulation.datasets.ClientData` of filtered Federated EMNIST data.\n\n Input data gets filtered on a per-example basis. Any user meeting the\n `min_num_examples` criterion is included. The examples are limited to those\n that classified according to the `example_class_selection` criterion.\n\n Args:\n invert_imagery_probability: The probability that a user\\'s image data has\n pixel intensity inverted. E.g., `0p1` corresponds to 0.1, or a 10%\n probability that a user\\'s data is flipped. Note that to save time in\n experiment execution, this is precomputed via the ./filter_examples.py\n scripts, and the selection here controls which file to read from.\n min_num_examples: Indicates the minimum number of examples that are either\n correct or incorrect (as set by the `example_class_selection` argument) in\n a client\\'s local dataset for that client to be considered as part of\n training sub-population. To save time in experiment execution, this\n assignment is precomputed via the ./filter_examples.py script, and the\n flag selection here is to indicate which file to read from.\n example_class_selection: Indicates whether to train on a client\\'s correct\n or incorrect examples. To save time in experiment execution, this\n assignment is precomputed via the ./filter_examples.py script, and the\n flag selection here is to indicate which file to read from.\n batch_size: Batch size of output dataset. If None, don't batch.\n cache_dir: (Optional) base directory to cache the downloaded files. If None,\n caches in Keras' default cache directory.\n\n Returns:\n A `tff.simulation.datasets.ClientData` of real images of numbers/letters.\n The data\n has been filtered as per the input arguments (either not filtered, filtered\n by user classification accuracy, or filtered by example classification\n correctness).\n \"\"\"\n path_to_data = os.path.join(CSVS_BASE_PATH,\n 'inv_prob_{}'.format(invert_imagery_probability),\n 'filter_by_example',\n 'min_num_examples_{}'.format(min_num_examples),\n '{}'.format(example_class_selection))\n\n try:\n filename = 'client_ids.csv'\n path_to_read_inversions_csv = tf.keras.utils.get_file(\n fname=filename,\n cache_subdir=path_to_data,\n cache_dir=cache_dir,\n origin=os.path.join(BASE_URL, path_to_data, filename))\n\n filename = 'example_indices_map.csv'\n path_to_read_example_indices_csv = tf.keras.utils.get_file(\n fname=filename,\n cache_subdir=path_to_data,\n cache_dir=cache_dir,\n origin=os.path.join(BASE_URL, path_to_data, filename))\n except Exception:\n msg = ('A URL fetch failure was encountered when trying to retrieve '\n 'filter-by-example generated csv files with '\n 'invert_imagery_probability `{}`, min_num_examples `{}`, and '\n 'example_class_selection `{}`. Please run the ./filter_examples.py '\n 'script to generate the missing data, and use the `cache_dir` '\n 'argument to this method to specify the location of the generated '\n 'data csv files.'.format(invert_imagery_probability,\n min_num_examples, example_class_selection))\n raise ValueError(msg)\n\n return get_filtered_client_data_for_training(\n path_to_read_inversions_csv, path_to_read_example_indices_csv, batch_size)\n\n\ndef get_filtered_client_data_for_training(path_to_read_inversions_csv,\n path_to_read_example_indices_csv,\n batch_size):\n \"\"\"Form ClientData using paths to pixel inversion, example selection data.\"\"\"\n\n raw_client_data = emnist_data_utils.create_real_images_tff_client_data(\n 'train')\n client_ids = raw_client_data.client_ids\n\n selected_client_ids_inversion_map = None\n client_ids_example_indices_map = None\n # If filter-by-user or filter-by-example, load the csv data into maps, and\n # update the client IDs to just the users that will be part of training.\n if path_to_read_inversions_csv is not None:\n selected_client_ids_inversion_map, client_ids_example_indices_map = (\n _get_client_ids_inversion_and_example_indices_maps(\n path_to_read_inversions_csv, path_to_read_example_indices_csv))\n client_ids = list(selected_client_ids_inversion_map.keys())\n\n def _get_dataset(client_id):\n \"\"\"Retrieve/preprocess a tf.data.Dataset for a given client_id.\"\"\"\n raw_ds = raw_client_data.create_tf_dataset_for_client(client_id)\n\n invert_imagery = False\n if selected_client_ids_inversion_map:\n invert_imagery = selected_client_ids_inversion_map[client_id]\n\n # If filter-by-example, do it here.\n if client_ids_example_indices_map:\n raw_ds = _filter_by_example(raw_ds, client_ids_example_indices_map,\n client_id)\n\n return emnist_data_utils.preprocess_img_dataset(\n raw_ds,\n invert_imagery=invert_imagery,\n include_label=False,\n batch_size=batch_size,\n shuffle=True,\n repeat=False)\n\n return tff.simulation.datasets.ClientData.from_clients_and_fn(\n client_ids, _get_dataset)\n\n\ndef _filter_by_example(raw_ds, client_ids_example_indices_map, client_id):\n \"\"\"Form a tf.data.Dataset from the examples in the map for the client_id.\"\"\"\n example_indices = client_ids_example_indices_map[client_id]\n # B/c the csv stores the list as a string, we need to do some slightly\n # klugey conversion from a string to list. (We strip off the first and\n # last characters in the string, which are [ and ], and then split on\n # commas as delimiters, to recover the original list of ints.\n example_indices = [int(s) for s in example_indices[1:-1].split(',')]\n\n # Get the elements (OrderedDicts) in the raw data which are at the indices\n # indicated by the list above.\n elements = []\n index = 0\n for element in raw_ds:\n if index in example_indices:\n elements.append(element)\n index += 1\n\n # Bind the elements (via a generator fn) into a new tf.data.Dataset.\n def _generator():\n for element in elements:\n yield element\n\n return tf.data.Dataset.from_generator(_generator, raw_ds.output_types,\n raw_ds.output_shapes)\n\n\ndef _get_client_ids_inversion_and_example_indices_maps(\n path_to_read_inversions_csv, path_to_read_example_indices_csv):\n \"\"\"Return paths to csv files storing maps indicating the data to train on.\"\"\"\n if path_to_read_inversions_csv is None:\n raise ValueError(\n 'No path provided to the CSV file that stores map from client ids to '\n 'image inversion data.')\n\n # Load (from CSV file) the specific client IDs that the GAN will train on, and\n # whether or not the images on that client are inverted.\n selected_client_ids_inversion_map = {}\n with tf.io.gfile.GFile(path_to_read_inversions_csv, 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n for [key, val] in csvreader:\n selected_client_ids_inversion_map[key] = (val == 'True')\n\n # If specified (via CSV file), the specific examples on each client ID that\n # the GAN will be trained on.\n client_ids_example_indices_map = None\n if path_to_read_example_indices_csv:\n client_ids_example_indices_map = {}\n with tf.io.gfile.GFile(path_to_read_example_indices_csv, 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n for [key, val] in csvreader:\n client_ids_example_indices_map[key] = val\n\n set_1 = set(client_ids_example_indices_map.keys())\n set_2 = set(selected_client_ids_inversion_map.keys())\n symmetric_diff = set_1 ^ set_2\n if symmetric_diff:\n raise ValueError(\n 'The CSV files at path_to_read_inversions_csv and '\n 'path_to_read_example_indices_csv contain different keys.')\n\n return selected_client_ids_inversion_map, client_ids_example_indices_map\n"
] | [
[
"tensorflow.squeeze"
],
[
"tensorflow.io.gfile.GFile",
"tensorflow.data.Dataset.from_generator"
]
] |
lighthall-lab/NiPype | [
"80d3f05d9aa006fa3055785327892e8a89530a80"
] | [
"nipype/utils/misc.py"
] | [
"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Miscellaneous utility functions\n\"\"\"\nfrom __future__ import (print_function, unicode_literals, division,\n absolute_import)\nfrom builtins import next, str\n\nimport sys\nimport re\nfrom collections import Iterator\n\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nfrom future.utils import raise_from\nfrom future import standard_library\ntry:\n from textwrap import indent as textwrap_indent\nexcept ImportError:\n\n def textwrap_indent(text, prefix):\n \"\"\" A textwrap.indent replacement for Python < 3.3 \"\"\"\n if not prefix:\n return text\n splittext = text.splitlines(True)\n return prefix + prefix.join(splittext)\n\n\nstandard_library.install_aliases()\n\n\ndef human_order_sorted(l):\n \"\"\"Sorts string in human order (i.e. 'stat10' will go after 'stat2')\"\"\"\n\n def atoi(text):\n return int(text) if text.isdigit() else text\n\n def natural_keys(text):\n if isinstance(text, tuple):\n text = text[0]\n return [atoi(c) for c in re.split('(\\d+)', text)]\n\n return sorted(l, key=natural_keys)\n\n\ndef trim(docstring, marker=None):\n if isinstance(docstring, bytes):\n docstring = str(docstring, 'utf-8')\n\n if not docstring:\n return ''\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = docstring.expandtabs().splitlines()\n # Determine minimum indentation (first line doesn't count):\n indent = sys.maxsize\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < sys.maxsize:\n for line in lines[1:]:\n # replace existing REST marker with doc level marker\n stripped = line.lstrip().strip().rstrip()\n if marker is not None and stripped and \\\n all([s == stripped[0] for s in stripped]) and \\\n stripped[0] not in [':']:\n line = line.replace(stripped[0], marker)\n trimmed.append(line[indent:].rstrip())\n # Strip off trailing and leading blank lines:\n while trimmed and not trimmed[-1]:\n trimmed.pop()\n while trimmed and not trimmed[0]:\n trimmed.pop(0)\n # Return a single string:\n return '\\n'.join(trimmed)\n\n\ndef find_indices(condition):\n \"Return the indices where ravel(condition) is true\"\n res, = np.nonzero(np.ravel(condition))\n return res\n\n\ndef is_container(item):\n \"\"\"Checks if item is a container (list, tuple, dict, set)\n\n Parameters\n ----------\n item : object\n object to check for .__iter__\n\n Returns\n -------\n output : Boolean\n True if container\n False if not (eg string)\n \"\"\"\n if isinstance(item, str):\n return False\n elif hasattr(item, '__iter__'):\n return True\n else:\n return False\n\n\ndef container_to_string(cont):\n \"\"\"Convert a container to a command line string.\n\n Elements of the container are joined with a space between them,\n suitable for a command line parameter.\n\n If the container `cont` is only a sequence, like a string and not a\n container, it is returned unmodified.\n\n Parameters\n ----------\n cont : container\n A container object like a list, tuple, dict, or a set.\n\n Returns\n -------\n cont_str : string\n Container elements joined into a string.\n\n \"\"\"\n if hasattr(cont, '__iter__') and not isinstance(cont, str):\n cont = ' '.join(cont)\n return str(cont)\n\n\n# Dependency checks. Copied this from Nipy, with some modificiations\n# (added app as a parameter).\ndef package_check(pkg_name,\n version=None,\n app=None,\n checker=LooseVersion,\n exc_failed_import=ImportError,\n exc_failed_check=RuntimeError):\n \"\"\"Check that the minimal version of the required package is installed.\n\n Parameters\n ----------\n pkg_name : string\n Name of the required package.\n version : string, optional\n Minimal version number for required package.\n app : string, optional\n Application that is performing the check. For instance, the\n name of the tutorial being executed that depends on specific\n packages. Default is *Nipype*.\n checker : object, optional\n The class that will perform the version checking. Default is\n distutils.version.LooseVersion.\n exc_failed_import : Exception, optional\n Class of the exception to be thrown if import failed.\n exc_failed_check : Exception, optional\n Class of the exception to be thrown if version check failed.\n\n Examples\n --------\n package_check('numpy', '1.3')\n package_check('scipy', '0.7', 'tutorial1')\n\n \"\"\"\n\n if app:\n msg = '%s requires %s' % (app, pkg_name)\n else:\n msg = 'Nipype requires %s' % pkg_name\n if version:\n msg += ' with version >= %s' % (version, )\n try:\n mod = __import__(pkg_name)\n except ImportError as e:\n raise_from(exc_failed_import(msg), e)\n if not version:\n return\n try:\n have_version = mod.__version__\n except AttributeError as e:\n raise_from(\n exc_failed_check('Cannot find version for %s' % pkg_name), e)\n if checker(have_version) < checker(version):\n raise exc_failed_check(msg)\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n lower = v.lower()\n if lower in (\"yes\", \"true\", \"t\", \"1\"):\n return True\n elif lower in (\"no\", \"false\", \"n\", \"f\", \"0\"):\n return False\n else:\n raise ValueError(\"%s cannot be converted to bool\" % v)\n\n\ndef flatten(S):\n if S == []:\n return S\n if isinstance(S[0], list):\n return flatten(S[0]) + flatten(S[1:])\n return S[:1] + flatten(S[1:])\n\n\ndef unflatten(in_list, prev_structure):\n if not isinstance(in_list, Iterator):\n in_list = iter(in_list)\n\n if not isinstance(prev_structure, list):\n return next(in_list)\n\n out = []\n for item in prev_structure:\n out.append(unflatten(in_list, item))\n return out\n\n\ndef normalize_mc_params(params, source):\n \"\"\"\n Normalize a single row of motion parameters to the SPM format.\n\n SPM saves motion parameters as:\n x Right-Left (mm)\n y Anterior-Posterior (mm)\n z Superior-Inferior (mm)\n rx Pitch (rad)\n ry Yaw (rad)\n rz Roll (rad)\n \"\"\"\n if source.upper() == 'FSL':\n params = params[[3, 4, 5, 0, 1, 2]]\n elif source.upper() in ('AFNI', 'FSFAST'):\n params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]\n params[3:] = params[3:] * np.pi / 180.\n elif source.upper() == 'NIPY':\n from nipy.algorithms.registration import to_matrix44, aff2euler\n matrix = to_matrix44(params)\n params = np.zeros(6)\n params[:3] = matrix[:3, 3]\n params[-1:2:-1] = aff2euler(matrix)\n\n return params\n\n\ndef dict_diff(dold, dnew, indent=0):\n \"\"\"Helper to log what actually changed from old to new values of\n dictionaries.\n\n typical use -- log difference for hashed_inputs\n \"\"\"\n # First check inputs, since they usually are lists of tuples\n # and dicts are required.\n if isinstance(dnew, list):\n dnew = dict(dnew)\n if isinstance(dold, list):\n dold = dict(dold)\n\n # Compare against hashed_inputs\n # Keys: should rarely differ\n new_keys = set(dnew.keys())\n old_keys = set(dold.keys())\n\n diff = []\n if new_keys - old_keys:\n diff += [\" * keys not previously seen: %s\" % (new_keys - old_keys)]\n\n if old_keys - new_keys:\n diff += [\" * keys not presently seen: %s\" % (old_keys - new_keys)]\n\n # Add topical message\n if diff:\n diff.insert(0, \"Dictionaries had differing keys:\")\n\n diffkeys = len(diff)\n\n # Values in common keys would differ quite often,\n # so we need to join the messages together\n for k in new_keys.intersection(old_keys):\n same = False\n try:\n new, old = dnew[k], dold[k]\n same = new == old\n if not same:\n # Since JSON does not discriminate between lists and\n # tuples, we might need to cast them into the same type\n # as the last resort. And lets try to be more generic\n same = old.__class__(new) == old\n except Exception:\n same = False\n if not same:\n diff += [\" * %s: %r != %r\" % (k, dnew[k], dold[k])]\n\n if len(diff) > diffkeys:\n diff.insert(diffkeys, \"Some dictionary entries had differing values:\")\n\n return textwrap_indent('\\n'.join(diff), ' ' * indent)\n"
] | [
[
"numpy.ravel",
"numpy.asarray",
"numpy.zeros"
]
] |
drienyov/treadmill | [
"812109e31c503a6eddaee2d3f2e1faf2833b6aaf"
] | [
"lib/python/treadmill/cli/scheduler/__init__.py"
] | [
"\"\"\"Top level command for Treadmill reports.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport json\n\nimport click\nimport pandas as pd\nimport tabulate\n\nfrom six.moves import urllib_parse\n\nfrom treadmill import cli\nfrom treadmill import context\nfrom treadmill import plugin_manager\nfrom treadmill import restclient\n\n\ndef fetch_report(cell_api, report_type, match=None, partition=None):\n \"\"\"Fetch a report of the given type and return it as a DataFrame.\"\"\"\n api_urls = context.GLOBAL.cell_api(cell_api)\n path = '/scheduler/{}'.format(report_type)\n\n query = {}\n if match:\n query['match'] = match\n if partition:\n query['partition'] = partition\n\n if query:\n path += '?' + urllib_parse.urlencode(query)\n\n response = restclient.get(api_urls, path).json()\n return pd.DataFrame(response['data'], columns=response['columns'])\n\n\ndef print_report(frame):\n \"\"\"Pretty-print the report.\"\"\"\n if cli.OUTPUT_FORMAT is None:\n frame.replace(True, ' ', inplace=True)\n frame.replace(False, 'X', inplace=True)\n dict_ = frame.to_dict(orient='split')\n del dict_['index']\n\n cli.out(\n tabulate.tabulate(\n dict_['data'], dict_['columns'], tablefmt='simple'\n )\n )\n cli.echo_green('\\nX: designates the factor that prohibits scheduling '\n 'the instance on the given server')\n elif cli.OUTPUT_FORMAT == 'yaml':\n fmt = plugin_manager.load('treadmill.formatters', 'yaml')\n cli.out(fmt.format(frame.to_dict(orient='records')))\n elif cli.OUTPUT_FORMAT == 'json':\n cli.out(frame.to_json(orient='records'))\n elif cli.OUTPUT_FORMAT == 'csv':\n cli.out(frame.to_csv(index=False))\n else:\n cli.out(tabulate.tabulate(frame, frame.columns, tablefmt='simple'))\n\n\ndef init():\n \"\"\"Return top level command handler.\"\"\"\n\n @click.group(cls=cli.make_commands(__name__))\n @click.option(\n '--cell',\n help='Treadmill cell',\n envvar='TREADMILL_CELL',\n callback=cli.handle_context_opt,\n expose_value=False,\n required=True\n )\n @click.option(\n '--api',\n help='Cell API URL',\n metavar='URL',\n envvar='TREADMILL_CELLAPI'\n )\n @click.pass_context\n def run(ctx, api):\n \"\"\"Report scheduler state.\"\"\"\n if not ctx.obj:\n ctx.obj = {} # Doesn't seem to exist in testing\n ctx.obj['api'] = api\n\n return run\n"
] | [
[
"pandas.DataFrame"
]
] |
ruomingp/lingvo | [
"ba59e8c46471be77d5d3c48177f0f0dd8d5d44e9"
] | [
"lingvo/jax/eval.py"
] | [
"# Lint as: python3\n# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Evaluation loop for lingvo Jax model.\"\"\"\n\nimport contextlib\nimport functools\nimport hashlib\nimport os\nimport time\nfrom typing import List, Optional, Sequence\n\nfrom absl import logging\nimport jax\nfrom jax.experimental import maps\nfrom jax.experimental import mesh_utils\nfrom lingvo.jax import base_input\nfrom lingvo.jax import base_layer\nfrom lingvo.jax import base_metrics\nfrom lingvo.jax import base_model_params\nfrom lingvo.jax import base_task\nfrom lingvo.jax import checkpoint_pb2\nfrom lingvo.jax import model_utils\nfrom lingvo.jax import py_utils\nfrom lingvo.jax import pytypes\nfrom lingvo.jax import summary_utils\nfrom lingvo.jax import train_states\nfrom lingvo.jax import trainer_lib\nimport tensorflow.compat.v2 as tf\n\nfrom lingvo.jax import checkpoints\nfrom lingvo.jax import io_utils\n\nBaseModelParamsT = base_model_params.BaseModelParamsT\nCheckpointType = checkpoint_pb2.CheckpointType\nInstantiableParams = py_utils.InstantiableParams\nNestedMap = py_utils.NestedMap\nJTensor = pytypes.JTensor\nNestedJTensor = pytypes.NestedJTensor\nTrainState = train_states.TrainState\nSummaryWriter = tf.summary.SummaryWriter\n\n\ndef maybe_ema(model_states):\n \"\"\"Finds the ema state from optimizer states.\"\"\"\n if not model_states.opt_states:\n return model_states\n for i in range(len(model_states.opt_states[0])):\n if 'ema' in model_states.opt_states[0][i]:\n return TrainState(\n step=model_states.step,\n mdl_vars=model_states.opt_states[0][i].ema,\n opt_states={})\n return model_states\n\n\ndef evaluate(\n model_name: str,\n job_log_dir: Optional[str],\n multi_host_checkpointing: Optional[bool],\n maybe_use_persistence_checkpointing: bool,\n) -> None:\n \"\"\"Runs the evaluation loop on the entire eval data set.\n\n Args:\n model_name: The name of the model from the registry to evaluate.\n job_log_dir: The directory for the job logs.\n multi_host_checkpointing: Whether to use multi-host checkpointing.\n maybe_use_persistence_checkpointing: If set, it will try to use\n persistence-based checkpointing if suitable.\n \"\"\"\n model_config = model_utils.get_model(model_name)()\n task_p = model_config.task()\n model_p = task_p.model\n eval_input_p = [v for v in model_config.datasets() if not v.is_training]\n for inp in eval_input_p:\n inp.num_infeed_hosts = jax.process_count()\n inp.infeed_host_index = jax.process_index()\n\n if model_p.device_mesh is not None:\n checkpoint_type = checkpoints.retrieve_checkpoint_type(\n multi_host_checkpointing, maybe_use_persistence_checkpointing, task_p)\n evaluate_spmd_model(task_p, eval_input_p, job_log_dir, checkpoint_type)\n else:\n evaluate_pmap_model(task_p, eval_input_p, job_log_dir)\n\n\ndef evaluate_pmap_model(\n task_p: InstantiableParams,\n eval_input_p: Sequence[InstantiableParams],\n job_log_dir: Optional[str],\n) -> None:\n \"\"\"Runs the evaluation loop on the entire test dataset for PMAP model.\n\n Args:\n task_p: Params for the task encapsulating the data parallel model.\n eval_input_p: List of params for the eval data input pipelines.\n job_log_dir: Directory for the job logs.\n \"\"\"\n logging.info('Using pmap for data parallelism.')\n jax_task = task_p.Instantiate()\n eval_input_pipelines = [input_p.Instantiate() for input_p in eval_input_p]\n # TODO(shafey): Retrieve the seeds from the model definition instead.\n prng_key = jax.random.PRNGKey(1234)\n prng_key, init_key = jax.random.split(prng_key)\n\n checkpoint_dir = os.path.join(job_log_dir, 'checkpoints')\n # Restore flax checkpoints still required bak variables in TrainState\n # TODO(pax): add is_eval=True to initialize_model_state\n model_states = trainer_lib.initialize_model_state(jax_task, init_key)\n # Pmap does not use GDA, and so global_mesh and mesh_axes are None.\n model_states = checkpoints.restore_checkpoint(model_states, checkpoint_dir)\n replicated_model_states = trainer_lib.replicate_model_state(model_states)\n logging.info('replicated_model_states: %s',\n jax.tree_map(lambda x: x.shape, replicated_model_states))\n # From now on, different replicas should use different random seeds.\n # Here, each process will have its unique prng_key.\n # prng_key will be further split so that each core on a host will get\n # different prng_key.\n prng_key = jax.random.fold_in(prng_key, jax.process_index())\n logging.info('root prng_key: %s', prng_key)\n\n def eval_step(mdl_states, prng_key, inputs):\n mdl_states = trainer_lib.train_state_for_eval_step(mdl_states)\n return trainer_lib.eval_step_single_learner(\n jax_task,\n mdl_states,\n prng_key,\n inputs,\n data_parallel_axis_name='batch',\n fprop_dtype=jax_task.model.fprop_dtype)\n\n num_devices = jax.local_device_count()\n prng_key, eval_key = jax.random.split(prng_key)\n eval_prng_seed = jax.random.split(eval_key, num=num_devices)\n logging.info('eval prng_seed: %s', eval_prng_seed)\n\n p_eval_step = jax.pmap(eval_step, axis_name='batch')\n\n logging.info('Evaluation loop starting...')\n summary_base_dir = os.path.join(job_log_dir, 'summaries')\n summary_eval_dirs = [\n os.path.join(summary_base_dir, f'eval_test_{split}')\n for split, _ in enumerate(eval_input_p)\n ]\n\n num_steps = [\n -1 if p.reset_for_eval else p.eval_loop_num_batches for p in eval_input_p\n ]\n last_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)\n with contextlib.ExitStack() as exit_stack:\n eval_summary_writers = [\n exit_stack.enter_context(summary_utils.get_summary_writer(d))\n for d in summary_eval_dirs\n ]\n\n while True:\n step_i = int(jax.device_get(replicated_model_states.step)[0])\n eval_step = functools.partial(p_eval_step,\n maybe_ema(replicated_model_states),\n eval_prng_seed)\n # Run the eval loop.\n model_utils.run_eval_loop_over_test_splits(\n num_steps,\n eval_step,\n eval_summary_writers,\n step_i,\n eval_input_pipelines,\n reshard_inputs=True)\n # If the last check point evaluated matches max train steps, exit.\n if last_checkpoint is not None:\n last_ckpt_step = checkpoints.get_step_from_checkpoint_asset(\n last_checkpoint)\n exceeded_ckpt = last_ckpt_step + task_p.train.save_interval_steps\n if exceeded_ckpt >= task_p.train.num_train_steps:\n break\n # Release replicated_model_states.\n del replicated_model_states\n new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)\n while new_checkpoint == last_checkpoint:\n # Sleep for a minute.\n time.sleep(60)\n new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)\n # There must be a new checkpoint here.\n logging.info('Found new checkpoint: %s', new_checkpoint)\n model_states = checkpoints.restore_checkpoint(model_states,\n checkpoint_dir)\n replicated_model_states = trainer_lib.replicate_model_state(model_states)\n last_checkpoint = new_checkpoint\n\n\ndef evaluate_spmd_model(\n task_p: InstantiableParams,\n eval_input_p: Sequence[InstantiableParams],\n job_log_dir: Optional[str],\n checkpoint_type: CheckpointType,\n) -> None:\n \"\"\"Runs the evaluation loop on the entire test dataset for SPMD model.\n\n Args:\n task_p: Params of the task encapsulating an SPMD model.\n eval_input_p: List of Params for the eval data pipelines.\n job_log_dir: Directory for the job logs.\n checkpoint_type: Type of model checkpointing method to use.\n \"\"\"\n logging.info('Using SPMD sharding for model parallelism.')\n eval_input_pipelines = [input_p.Instantiate() for input_p in eval_input_p]\n # TODO(bf-jax): Retrieve the seeds from the model definition instead.\n prng_key = jax.random.PRNGKey(1234)\n prng_key, init_key = jax.random.split(prng_key)\n\n checkpoint_dir = os.path.join(job_log_dir, 'checkpoints')\n # Note that GDA checkpoint requires all processes to participate in\n # checkpointing but it does not require a separate checkpoint_dir per process.\n if checkpoint_type == CheckpointType.CHECKPOINT_MULTI_HOST_FLAX:\n checkpoint_task_dir = os.path.join(checkpoint_dir,\n f'{jax.process_index():03d}')\n else:\n checkpoint_task_dir = checkpoint_dir\n\n multi_host_checkpointing = bool(checkpoint_type in {\n CheckpointType.CHECKPOINT_MULTI_HOST_FLAX, CheckpointType.CHECKPOINT_GDA\n })\n\n def get_shape_dtype(x):\n y = jax.ShapeDtypeStruct(x.shape, x.dtype)\n return y\n\n # Do not ues eval_input_pipelines[0] directly.\n sample_model_inputs = eval_input_p[0].Instantiate().get_next()\n inputs_shape = tf.nest.map_structure(get_shape_dtype, sample_model_inputs)\n\n jax_task = task_p.Instantiate()\n model_p = task_p.model\n mesh_shape = model_p.device_mesh.shape\n device_mesh = mesh_utils.create_device_mesh(mesh_shape)\n logging.info('device_mesh: %s', device_mesh)\n global_mesh = maps.Mesh(device_mesh, model_p.mesh_axis_names)\n use_gda_checkpoint = jax.config.jax_parallel_functions_output_gda\n with global_mesh:\n jax_task.model.instantiate_variable_configs()\n # Restore flax checkpoints still required backward variables in TrainState\n # TODO(pax): set is_eval=True for all ckpt types.\n if use_gda_checkpoint:\n partitioned_specs = jax_task.create_train_state_partition_specs(\n jax_task.model.vars, discard_opt_states=True)\n partitioned_train_state = checkpoints.restore_checkpoint(\n None,\n checkpoint_task_dir,\n global_mesh=global_mesh,\n checkpoint_type=checkpoint_type,\n state_specs=partitioned_specs)\n eval_step, inputs_partition_specs = (\n trainer_lib.get_partitioned_spmd_model_step_fn(\n jax_task,\n init_key,\n partitioned_specs,\n inputs_shape,\n is_eval=True))\n else:\n (partitioned_train_state, partitioned_specs, inputs_partition_specs, _,\n eval_step, _) = trainer_lib.partition_spmd_model(task_p, init_key,\n inputs_shape)\n partitioned_train_state = checkpoints.restore_checkpoint(\n partitioned_train_state,\n checkpoint_task_dir,\n global_mesh=global_mesh,\n checkpoint_type=checkpoint_type,\n state_specs=partitioned_specs)\n\n logging.info('partitioned_train_state: %s',\n jax.tree_map(lambda x: x.shape, partitioned_train_state))\n if multi_host_checkpointing:\n py_utils.sync_global_devices(f'checkpointer:restored:{checkpoint_dir}')\n\n # We do not fold in jax.process_index in contrast to the pmap version and\n # use a single global key instead to rely on pjit to split for different\n # replicas.\n logging.info('root prng_key: %s', prng_key)\n prng_key, eval_key = jax.random.split(prng_key)\n logging.info('eval prng_key: %s', eval_key)\n\n logging.info('Evaluation loop starting...')\n summary_base_dir = os.path.join(job_log_dir, 'summaries')\n summary_eval_dirs = [\n os.path.join(summary_base_dir, f'eval_{split}')\n for split, _ in enumerate(eval_input_p)\n ]\n\n num_steps = [-1 if p.reset_for_eval else 1 for p in eval_input_p]\n last_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)\n with contextlib.ExitStack() as exit_stack:\n eval_summary_writers = [\n exit_stack.enter_context(summary_utils.get_summary_writer(d))\n for d in summary_eval_dirs\n ]\n while True:\n step_i = int(jax.device_get(partitioned_train_state.step))\n eval_step_fn = functools.partial(\n eval_step,\n trainer_lib.train_state_for_eval_step(partitioned_train_state),\n eval_key)\n # Run the eval loop.\n model_utils.run_eval_loop_over_test_splits(\n num_steps,\n eval_step_fn,\n eval_summary_writers,\n step_i,\n eval_input_pipelines,\n inputs_partition_specs,\n inputs_shape,\n global_mesh,\n reshard_inputs=False)\n # If the last check point evaluated matches max train steps, exit.\n if last_checkpoint is not None:\n last_ckpt_step = checkpoints.get_step_from_checkpoint_asset(\n last_checkpoint)\n exceeded_ckpt = last_ckpt_step + task_p.train.save_interval_steps\n if exceeded_ckpt >= task_p.train.num_train_steps:\n break\n new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)\n while new_checkpoint == last_checkpoint:\n # Sleep for a minute.\n time.sleep(60)\n new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)\n # There must be a new checkpoint here.\n logging.info('Found new checkpoint: %s', new_checkpoint)\n partitioned_train_state = checkpoints.restore_checkpoint(\n None if use_gda_checkpoint else partitioned_train_state,\n checkpoint_task_dir,\n global_mesh=global_mesh,\n checkpoint_type=checkpoint_type,\n state_specs=partitioned_specs)\n if multi_host_checkpointing:\n py_utils.sync_global_devices(\n f'checkpointer:restored:{checkpoint_dir}')\n last_checkpoint = new_checkpoint\n\n\ndef decode(\n model_name: str,\n job_log_dir: Optional[str],\n multi_host_checkpointing: Optional[bool],\n maybe_use_persistence_checkpointing: bool,\n restore_checkpoint_dir: Optional[str],\n restore_checkpoint_step: Optional[int],\n continuous_decode: bool,\n) -> None:\n \"\"\"Runs decoding once on the decoder datasets.\n\n Args:\n model_name: The name of the model from the registry to evaluate.\n job_log_dir: The directory for the job logs.\n multi_host_checkpointing: Whether to use multi-host checkpointing.\n maybe_use_persistence_checkpointing: If set, it will try to use\n persistence-based checkpointing if suitable.\n restore_checkpoint_dir: The directory from which to restore checkpoint.\n restore_checkpoint_step: If set, the checkpoint step to restore. If unset,\n try to restore from the latest checkpoint if any.\n continuous_decode: whether to continuously decode on the latest ckpt.\n \"\"\"\n logging.info('running decode_once on model %s restored from %s', model_name,\n restore_checkpoint_dir)\n model_config = model_utils.get_model(model_name)()\n task_p = model_config.task()\n model_p = task_p.model\n decoder_inputs = model_config.decoder_datasets()\n if not decoder_inputs:\n return\n for inp in decoder_inputs:\n inp.num_infeed_hosts = jax.process_count()\n inp.infeed_host_index = jax.process_index()\n\n if model_p.device_mesh is not None:\n if continuous_decode:\n raise NotImplementedError('http://b/214589358: not supported')\n checkpoint_type = checkpoints.retrieve_checkpoint_type(\n multi_host_checkpointing, maybe_use_persistence_checkpointing, task_p)\n decode_once_spmd_model(task_p, decoder_inputs, job_log_dir, checkpoint_type,\n restore_checkpoint_dir, restore_checkpoint_step)\n else:\n decode_pmap_model(task_p, decoder_inputs, job_log_dir,\n restore_checkpoint_dir, restore_checkpoint_step,\n continuous_decode)\n\n\ndef _get_dir_names(input_p: Sequence[InstantiableParams]) -> Sequence[str]:\n \"\"\"Returns a list of same length for parent dir names for each dataset.\"\"\"\n uniq_names = set()\n ret = []\n for idx, p in enumerate(input_p):\n name = p.name or f'decode_test_{idx}'\n if p.name and p.name in uniq_names:\n name = f'{p.name}_{idx}'\n if name in uniq_names:\n suffix = hashlib.md5(name.encode()).hexdigest()[-5:]\n name = f'{name}_{suffix}'\n assert name not in uniq_names\n uniq_names.add(name)\n ret.append(name)\n return ret\n\n\ndef _get_step(step: base_layer.JTensorOrPartitionSpec) -> int:\n \"\"\"Returns an int for the current global step.\"\"\"\n if step.ndim == 0:\n return jax.device_get(step)\n if step.ndim == 1:\n return jax.device_get(step[0])\n raise ValueError(\n f'Expecting a replicated 1D global step (got ndim=`{step.ndim}`).')\n\n\ndef _get_filename(step: base_layer.JTensorOrPartitionSpec) -> str:\n \"\"\"Returns a filename for the given step.\"\"\"\n step_num = _get_step(step)\n return f'decoder_out_{step_num}_shard_{jax.process_index()}'\n\n\ndef decode_pmap_model(\n task_p: InstantiableParams,\n input_p: Sequence[InstantiableParams],\n job_log_dir: Optional[str],\n restore_checkpoint_dir: Optional[str],\n restore_checkpoint_step: Optional[int],\n continuous_decode: bool,\n) -> None:\n \"\"\"Runs the decoding on the entire decoder datasets for a PMAP model.\n\n Args:\n task_p: Params of the task encapsulating a the data parallel model.\n input_p: List of input params to be decoded.\n job_log_dir: Directory for the job logs.\n restore_checkpoint_dir: The directory from which to restore checkpoint. If\n None, uses job_log_dir.\n restore_checkpoint_step: If set, the checkpoint step to restore. If unset,\n try to restore from the latest checkpoint if any.\n continuous_decode: whether to continuously decode on the latest ckpt.\n \"\"\"\n if continuous_decode and restore_checkpoint_step is not None:\n raise ValueError('Continuous decoding mode requires restore_checkpoint_step'\n '=None, actual restore_checkpoint_step='\n f'{restore_checkpoint_step}')\n restore_checkpoint_dir = restore_checkpoint_dir or os.path.join(\n job_log_dir, 'checkpoints')\n\n # TODO(shafey): Retrieve the seeds from the model definition instead.\n prng_key = jax.random.PRNGKey(1234)\n prng_key, init_key = jax.random.split(prng_key)\n\n # From now on, different replicas should use different random seeds.\n # Here, each process will have its unique prng_key.\n # prng_key will be further split so that each core on a host will get\n # different prng_key.\n prng_key = jax.random.fold_in(prng_key, jax.process_index())\n logging.info('root prng_key: %s', prng_key)\n prng_key, eval_key = jax.random.split(prng_key)\n prng_seed = jax.random.split(eval_key, num=jax.local_device_count())\n logging.info('decoder prng_seed: %s', prng_seed)\n\n inputs = [p.Instantiate() for p in input_p]\n summary_base_dir = os.path.join(job_log_dir, 'summaries')\n dirnames = _get_dir_names(input_p)\n summary_decode_dirs = [\n os.path.join(summary_base_dir, f'decode_test_{dirnames[split]}')\n for split, _ in enumerate(input_p)\n ]\n with contextlib.ExitStack() as exit_stack:\n summary_writers = [\n exit_stack.enter_context(summary_utils.get_summary_writer(d))\n for d in summary_decode_dirs\n ]\n\n jax_task = task_p.Instantiate()\n # Restore flax checkpoints still required bak variables in TrainState\n # TODO(pax): add is_eval=True to initialize_model_state\n model_states = trainer_lib.initialize_model_state(jax_task, init_key)\n model_states = checkpoints.restore_checkpoint(\n model_states, restore_checkpoint_dir, step=restore_checkpoint_step)\n replicated_model_states = trainer_lib.replicate_model_state(model_states)\n logging.info('replicated_model_states: %s',\n jax.tree_map(lambda x: x.shape, replicated_model_states))\n last_checkpoint = checkpoints.latest_checkpoint(restore_checkpoint_dir)\n\n while True:\n _decode_once_pmap_model(jax_task, task_p, inputs, input_p, prng_seed,\n job_log_dir, replicated_model_states,\n summary_writers)\n if not continuous_decode:\n break\n if last_checkpoint is not None:\n last_ckpt_step = int(last_checkpoint.split('_')[-1])\n exceeded_ckpt = last_ckpt_step + task_p.train.save_interval_steps\n if exceeded_ckpt >= task_p.train.num_train_steps:\n break\n # Release replicated_model_states.\n del replicated_model_states\n new_checkpoint = checkpoints.latest_checkpoint(restore_checkpoint_dir)\n while new_checkpoint == last_checkpoint:\n time.sleep(60)\n new_checkpoint = checkpoints.latest_checkpoint(restore_checkpoint_dir)\n logging.info('Found new checkpoint: %s', new_checkpoint)\n model_states = checkpoints.restore_checkpoint(model_states,\n restore_checkpoint_dir)\n replicated_model_states = trainer_lib.replicate_model_state(model_states)\n last_checkpoint = new_checkpoint\n\n\ndef _decode_once_pmap_model(\n jax_task: base_task.SingleTask,\n task_p: InstantiableParams,\n inputs: List[base_input.BaseInput],\n input_p: Sequence[InstantiableParams],\n prng_seed: JTensor,\n job_log_dir: Optional[str],\n replicated_model_states: train_states.TrainState,\n summary_writers: List[SummaryWriter],\n) -> None:\n \"\"\"Runs the decoding on the entire decoder datasets for a PMAP model.\n\n Args:\n jax_task: instantiated model from task_p.\n task_p: Params for the task encapsulating a data parallel model.\n inputs: instantiated inputs.\n input_p: List of input params to be decoded.\n prng_seed: The prng seed used for decoding.\n job_log_dir: Directory for the job logs.\n replicated_model_states: A TrainState object.\n summary_writers: The summary writer objects to log summaries.\n \"\"\"\n model = jax_task.model\n model_p = task_p.model\n metrics_p = task_p.metrics\n if not metrics_p:\n metrics_p = base_metrics.MeanMetrics.Params()\n decode_metrics = metrics_p.Instantiate()\n process_decode_metrics = metrics_p.Instantiate()\n\n step_i = _get_step(replicated_model_states.step)\n pmap_axis_name = 'batch'\n\n def decode_step(mdl_states, prng_key, inputs):\n mdl_states = trainer_lib.train_state_for_eval_step(mdl_states)\n metrics, out = trainer_lib.decode_step(model, mdl_states, prng_key, inputs,\n model_p.fprop_dtype)\n metrics = decode_metrics.aggregate(metrics)\n return metrics, out\n\n # As an example, suppose the output leaf from trainer_lib.decoder_step()\n # for each core has shape: [per_core_batch_size, decoding_length].\n # In the all_gather we set tiled=True, so the output chunks are all\n # concatenated into the existing batch axis, so we get shape\n # [num_cores x per_core_batch_size, decoding_length].\n # In the pmap call we set out_axes=None to not have to manually unreplicate,\n # so the output of pmap_decode_step() will have the same shape.\n #\n # Example code snippet showing this:\n # # shape (8, 3, 2)\n # x = jnp.tile(jnp.arange(8)[:, None, None],[1, 3, 2])\n # # shape (24, 2)\n # z = jax.pmap(\n # lambda y: jax.lax.all_gather(y+1, axis_name='i', tiled=True),\n # axis_name='i', out_axes=None)(x)\n #\n # We only aggregate metrics, not `out`, hence the tuple for out_axes.\n pmap_decode_step = jax.pmap(\n decode_step, axis_name=pmap_axis_name, out_axes=(None, 0))\n decode_step_func = functools.partial(pmap_decode_step,\n maybe_ema(replicated_model_states),\n prng_seed)\n\n num_steps = [\n -1 if p.reset_for_eval else p.eval_loop_num_batches for p in input_p\n ]\n decodes = [list() for _ in input_p]\n for split, num_split_steps in enumerate(num_steps):\n logging.info('Start decoding on input %s', input_p[split].name)\n step_num = 0\n while num_split_steps < 0 or step_num < num_split_steps:\n step_num += 1\n try:\n batch = inputs[split].get_next()\n except (tf.errors.OutOfRangeError, StopIteration):\n inputs[split].reset()\n break\n batch = tf.nest.map_structure(py_utils.reshard, batch)\n batch_metrics, out = decode_step_func(batch)\n # we store the metric directly as it has already been aggregated in\n # side decode_step_fun\n decode_metrics.store(batch_metrics)\n logging.info('Finished decoding input batch %d', step_num)\n\n out = tf.nest.map_structure(py_utils.unshard, out)\n process_metrics, processed = model.process_decode_out(inputs[split], out)\n decodes[split].extend(processed)\n logging.info('Finished processing decoded input batch %d', step_num)\n\n # Reshard the metrics for pmap.\n process_decode_metrics.update(process_metrics)\n\n with summary_writers[split].as_default():\n decode_metrics.summarize(step_i, 'decode_metrics')\n process_decode_metrics.summarize(step_i, 'process_decode_metrics')\n\n basedir = os.path.join(job_log_dir, 'decoder_out')\n dirnames = _get_dir_names(input_p)\n filename = _get_filename(replicated_model_states.step)\n for s in dirnames:\n dir_path = os.path.join(basedir, s)\n if not tf.io.gfile.exists(dir_path):\n tf.io.gfile.makedirs(dir_path)\n filenames = [os.path.join(basedir, s, filename) for s in dirnames]\n for split, output_file in enumerate(filenames):\n logging.info('Writing decoder output to %s with %d entries', output_file,\n len(decodes[split]))\n io_utils.WriteKeyValuePairs(output_file, decodes[split])\n\n\ndef decode_once_spmd_model(\n task_p: InstantiableParams,\n input_p: Sequence[InstantiableParams],\n job_log_dir: Optional[str],\n checkpoint_type: CheckpointType,\n restore_checkpoint_dir: str,\n restore_checkpoint_step: Optional[int],\n) -> None:\n \"\"\"Runs the decoding once on the entire decoder datasets for SPMD model.\n\n Args:\n task_p: Params for the task that encapsulates an SPMD model.\n input_p: List of input params to be decoded.\n job_log_dir: Directory for the job logs.\n checkpoint_type: Type of model checkpointing method to use.\n restore_checkpoint_dir: The directory from which to restore checkpoint.\n restore_checkpoint_step: If set, the checkpoint step to restore. If unset,\n try to restore from the latest checkpoint if any.\n \"\"\"\n # TODO(bf-jax): Retrieve the seeds from the model definition instead.\n prng_key = jax.random.PRNGKey(1234)\n prng_key, init_key = jax.random.split(prng_key)\n\n if restore_checkpoint_dir:\n restore_checkpoint_parent_dir = restore_checkpoint_dir\n if checkpoint_type == CheckpointType.CHECKPOINT_MULTI_HOST_FLAX:\n # TODO(zhouwk): add sanity check on number of subdirs and number of\n # processes and fail early if unequal.\n restore_checkpoint_dir = os.path.join(restore_checkpoint_dir,\n f'{jax.process_index():03d}')\n\n multi_host_checkpointing = bool(checkpoint_type in {\n CheckpointType.CHECKPOINT_MULTI_HOST_FLAX, CheckpointType.CHECKPOINT_GDA\n })\n\n sample_inputs = input_p[0].Instantiate().get_next()\n inputs_shape = tf.nest.map_structure(py_utils.get_global_input_shape_dtype,\n sample_inputs)\n\n model_p = task_p.model\n # TODO(b/198356509): This is a hack for now as we need to change some\n # annotations for mode='decode'. A future cl will move this logic\n # to a more generic model_p.update_sharding_params_v1(mode='decode').\n model_p.lm = model_p.lm.cls.set_sharding_params_v1(\n model_p.lm,\n replica_axis=model_p.lm.mesh_axis_names[0],\n data_axis=model_p.lm.mesh_axis_names[1],\n mdl_axis=model_p.lm.mesh_axis_names[2],\n device_ids_mesh=model_p.lm.device_mesh,\n mesh_axis_names=model_p.lm.mesh_axis_names,\n mode='decode')\n\n mesh_shape = model_p.device_mesh.shape\n device_mesh = mesh_utils.create_device_mesh(mesh_shape)\n logging.info('device_mesh: %s', device_mesh)\n jax_task = task_p.Instantiate()\n global_mesh = maps.Mesh(device_mesh, model_p.mesh_axis_names)\n with global_mesh:\n if restore_checkpoint_dir:\n model = jax_task.model\n model.instantiate_variable_configs()\n # Get the metadata from variables instead of actually instantiating them.\n partitioned_specs = jax_task.create_train_state_partition_specs(\n model.vars, discard_opt_states=True)\n # Instantiate the TrainState directly from the checkpoint.\n partitioned_train_state = checkpoints.restore_checkpoint(\n None,\n restore_checkpoint_dir,\n global_mesh=global_mesh,\n checkpoint_type=checkpoint_type,\n state_specs=partitioned_specs,\n step=restore_checkpoint_step)\n if multi_host_checkpointing:\n py_utils.sync_global_devices(\n f'checkpointer:restored:{restore_checkpoint_parent_dir}')\n decode_step_fn, inputs_partition_spec = (\n trainer_lib.get_partitioned_spmd_model_decode_fn(\n jax_task, init_key, partitioned_specs, inputs_shape))\n else:\n # When restore is not specified, randomly initiate the train_state.\n (partitioned_train_state, inputs_partition_spec, partitioned_specs,\n decode_step_fn) = trainer_lib.partition_spmd_model_decode(\n task_p, init_key, inputs_shape)\n logging.info('partitioned_train_state: %s',\n jax.tree_map(lambda x: x.shape, partitioned_train_state))\n # We do not fold in jax.process_index in contrast to the pmap version and\n # use a single global key instead to rely on pjit to split for different\n # replicas.\n logging.info('root prng_key: %s', prng_key)\n prng_key, decode_key = jax.random.split(prng_key)\n logging.info('eval prng_key: %s', decode_key)\n spmd_decode_step_fn = functools.partial(\n decode_step_fn,\n trainer_lib.train_state_for_eval_step(partitioned_train_state),\n decode_key)\n\n num_steps = [\n -1 if p.reset_for_eval else p.eval_loop_num_batches for p in input_p\n ]\n inputs = [p.Instantiate() for p in input_p]\n decodes = [list() for _ in input_p]\n process_id = jax.process_index()\n\n for split, num_split_steps in enumerate(num_steps):\n logging.info('Start decoding on input %s', input_p[split].name)\n step_num = 0\n while num_split_steps < 0 or step_num < num_split_steps:\n step_num += 1\n try:\n batch = inputs[split].get_next()\n except (tf.errors.OutOfRangeError, StopIteration):\n break\n if jax.config.jax_parallel_functions_output_gda:\n batch = py_utils.create_gda(batch, inputs_shape, global_mesh,\n inputs_partition_spec)\n _, out = spmd_decode_step_fn(batch)\n # Output is fully replicated now, so it's ok to unreplicate it by\n # retrieving from device 0 only.\n out = py_utils.maybe_unreplicate_gda(out)\n global_batch_size = next(iter(out.values())).shape[0]\n logging.info('Finished decoding input batch %d with %d examples',\n step_num, global_batch_size)\n # Manually shard the output per each jax process.\n # We require that all fields in the output is batch major.\n if global_batch_size % jax.process_count() != 0:\n raise ValueError(f'Global batch size {global_batch_size} must divide '\n f'jax process count {jax.process_count()}')\n for k, v in out.items():\n if v.shape[0] != global_batch_size:\n raise ValueError('We require that all fields in the decode output '\n 'to have batch size as the first dim, got shape='\n f'{v.shape} with key={k}, expect batch size = '\n f'{global_batch_size}')\n per_process_batch_size = global_batch_size // jax.process_count()\n\n def shard(x, per_process_batch_size=per_process_batch_size):\n return x[(process_id *\n per_process_batch_size):((process_id + 1) *\n per_process_batch_size)]\n\n out = jax.tree_map(shard, out)\n _, processed = jax_task.model.process_decode_out(inputs[split], out)\n decodes[split].extend(processed)\n logging.info('Finished processing decoded input batch %d', step_num)\n\n basedir = os.path.join(job_log_dir, 'decoder_out')\n dirnames = _get_dir_names(input_p)\n filename = _get_filename(\n py_utils.maybe_unreplicate_gda(partitioned_train_state.step))\n for s in dirnames:\n dir_path = os.path.join(basedir, s)\n if not tf.io.gfile.exists(dir_path):\n tf.io.gfile.makedirs(dir_path)\n filenames = [os.path.join(basedir, s, filename) for s in dirnames]\n for split, output_file in enumerate(filenames):\n logging.info('Writing decoder output to %s with %d entries', output_file,\n len(decodes[split]))\n io_utils.WriteKeyValuePairs(output_file, decodes[split])\n"
] | [
[
"tensorflow.compat.v2.io.gfile.exists",
"tensorflow.compat.v2.io.gfile.makedirs",
"tensorflow.compat.v2.nest.map_structure"
]
] |
pakesson/scaml | [
"c69d422d6839d75a81426c81fd8d570fa421744b"
] | [
"explain.py"
] | [
"#!/usr/bin/env python\n\nimport sys\nimport math\nimport numpy as np\n\nfrom tensorflow.keras.models import load_model\n\nfrom aes import aes_sbox, aes_sbox_inv\n\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\ndef get_label(plaintext, key, index):\n return aes_sbox[plaintext[index] ^ key[index]]\n\nnum_classes = 256\nattack_byte = 0\nstart_trace_to_attack = 100\nnumber_of_traces_to_attack = 25\nnumber_of_traces_to_explain = 5\nocclusion_size = 1\n\ndef apply_occlusion(sample, x, occlusion_size=1, occlusion_value=0):\n occluded_sample = np.array(sample, copy=True)\n occluded_sample[x:x+occlusion_size, :] = occlusion_value\n return occluded_sample\n\ndef get_occlusion_sensitivity(samples, model, class_index, occlusion_size=1):\n print(\"Generating occlusion sensitivity maps...\")\n\n confidence_map = np.zeros(math.ceil(samples[0].shape[0] / occlusion_size))\n sensitivity_map = np.zeros(math.ceil(samples[0].shape[0] / occlusion_size))\n\n for idx, sample in enumerate(samples):\n print(f\" Sample {idx}\")\n\n occlusion_value = np.mean(sample)\n\n occlusions = [\n apply_occlusion(sample, x, occlusion_size, occlusion_value)\n for x in range(0, sample.shape[0], occlusion_size)\n ]\n\n predictions = model.predict(np.array(occlusions), batch_size=32)\n target_class_predictions = [\n prediction[class_index[idx]] for prediction in predictions\n ]\n\n for x, confidence in zip(range(sensitivity_map.shape[0]), target_class_predictions):\n confidence_map[x] += confidence\n\n # Mean confidence value\n confidence_map = confidence_map / samples.shape[0]\n sensitivity_map = 1 - confidence_map\n\n # Scale back up\n result = np.zeros(samples[0].shape[0])\n for x in range(result.shape[0]):\n result[x] = sensitivity_map[x // occlusion_size]\n\n return result\n\ndef explain(data, model, class_index, occlusion_size=1):\n # Make sure the data shape is (num_traces, num_points_per_trace, x)\n if len(data.shape) == 2:\n data = data.reshape((1, data.shape[0], data.shape[1]))\n class_index = class_index.reshape((1, class_index.shape[0], class_index.shape[1]))\n elif len(data.shape) != 3:\n raise ValueError(\"unsupported data shape\")\n\n # Generate one map for all samples\n return get_occlusion_sensitivity(data, model, class_index, occlusion_size)\n\nif __name__ == '__main__':\n if len(sys.argv) != 4:\n print(\"Usage:\")\n print(f\" {sys.argv[0]} <model filename> <trace filename> <sensitivity map filename>\")\n exit()\n\n model_filename = sys.argv[1]\n trace_filename = sys.argv[2]\n sensitivity_map_filename = sys.argv[3]\n\n model = load_model(model_filename)\n print(\"Input shape: \" + str(model.input_shape))\n\n traces = np.load(trace_filename)\n\n print(traces.files)\n\n trace_array = traces['trace_array']\n textin_array = traces['textin_array']\n known_keys = traces['known_keys']\n\n trace_array = trace_array.reshape((trace_array.shape[0], trace_array.shape[1], 1))\n\n # Run an initial prediction before we try to explain anything\n result = model.predict(trace_array[start_trace_to_attack:start_trace_to_attack+number_of_traces_to_attack, :, :])\n\n log10_sum_prediction = np.zeros(num_classes)\n for k in range(number_of_traces_to_attack):\n plaintext = textin_array[start_trace_to_attack+k, attack_byte]\n prediction = result[k]\n for l in range(num_classes):\n key_byte_index = (aes_sbox_inv[l] ^ plaintext)\n log10_sum_prediction[key_byte_index] += np.log10(prediction[l] + 1e-22)\n\n print(\"Best key byte guess: \" + str(np.argmax(log10_sum_prediction)))\n print(\"known_keys[0]: \" + str(known_keys[0]))\n\n # Run explainer\n data = trace_array[start_trace_to_attack:start_trace_to_attack+number_of_traces_to_explain, :, :]\n key_index = np.argmax(log10_sum_prediction)\n class_index = aes_sbox[textin_array[start_trace_to_attack:start_trace_to_attack+number_of_traces_to_explain, attack_byte] ^ key_index]\n\n sensitivity_map = explain(data, model, class_index, occlusion_size)\n\n # Save results\n np.savez_compressed(sensitivity_map_filename, sensitivity_map=sensitivity_map)\n\n # Visualize the results\n fig = plt.figure()\n plt.title(f\"Occlusion sensitivity for key byte {attack_byte} in trace {start_trace_to_attack}\")\n ax = fig.gca()\n x = np.linspace(0, sensitivity_map.shape[0]-1, sensitivity_map.shape[0])\n for i in range(0, sensitivity_map.shape[0]-1, occlusion_size):\n color = (sensitivity_map[i]-min(sensitivity_map))/np.ptp(sensitivity_map)\n ax.plot(x[i:i+occlusion_size+1], data[0, i:i+occlusion_size+1, 0], color=plt.cm.plasma(color))\n plt.show()\n"
] | [
[
"numpy.load",
"tensorflow.keras.models.load_model",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.argmax",
"matplotlib.pyplot.title",
"matplotlib.pyplot.cm.plasma",
"matplotlib.pyplot.show",
"numpy.log10",
"numpy.ptp",
"matplotlib.use",
"numpy.linspace",
"numpy.array",
"numpy.mean",
"numpy.savez_compressed"
]
] |
diegomrsantos/Python-Baseball | [
"4543df7a4d74e82106a3e8481553149c447d8ab6"
] | [
"stats/attendance.py"
] | [
"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom data import games\n\ninfo_filter = games['type'] == 'info'\nattendance_filter = games['multi2'] == 'attendance'\nattendance = games.loc[info_filter & attendance_filter, ['year', 'multi3']]\n\nattendance.columns = ['year', 'attendance']\n\nattendance.loc[:, 'attendance'] = pd.to_numeric(attendance.loc[:, 'attendance'])\n\nattendance.plot(x='year', y='attendance', figsize=(15, 7), kind='bar')\nplt.xlabel('Year')\nplt.ylabel('Attendance')\nplt.axhline(y=attendance['attendance'].mean(), label='Mean', linestyle='--', color='green')\nplt.show()\n"
] | [
[
"pandas.to_numeric",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
]
] |
rperi/trustworthy-asv-fairness | [
"15df69a8f3f8ad5262002c9e3d12aa12ea8f1c6f"
] | [
"evaluate/evaluate_FDR.py"
] | [
"import numpy as np\nimport pandas as pd\nimport os\nimport pdb\nfrom scipy.spatial.distance import cosine\nfrom sklearn.metrics import roc_curve, confusion_matrix\nimport sys\nfrom tqdm import tqdm\nfrom sklearn.metrics import auc\nimport argparse\n\nfprs = [0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.2,0.3,0.4,0.5]\ngroups = ['male_male','female_female']\nomegas = [0.0, 0.25, 0.5, 0.75, 1.0]\nemb_map = {}\nxvec_map = {}\n\ndef compute_scores(df_, eer_threshold_overall=0, agnostic_FLAG=False, emb_FLAG=True):\n if emb_FLAG:\n emb_mapping = emb_map\n else:\n emb_mapping = xvec_map\n similarity_scores= []\n labels = []\n for idx, row in tqdm(enumerate(df_.iterrows())):\n enrol = row[1]['audio_1']\n test = row[1]['audio_2']\n label = row[1]['label']\n if not enrol in emb_mapping.keys():\n print(enrol)\n if not test in emb_mapping.keys():\n print(test)\n\n sim = 1 - cosine(emb_mapping[enrol],emb_mapping[test])\n\n similarity_scores.append(sim)\n labels.append(label)\n fpr, tpr, threshold = roc_curve(labels, similarity_scores)\n fnr = 1 - tpr\n eer_threshold = threshold[np.nanargmin(np.absolute((fnr - fpr)))]\n eer1 = fpr[np.nanargmin(np.absolute((fnr - fpr)))]\n eer2 = fnr[np.nanargmin(np.absolute((fnr - fpr)))]\n eer = np.mean((eer1,eer2))\n\n sim = np.array(similarity_scores)\n labels = np.array(labels)\n if not agnostic_FLAG:\n fpr, fnr = compute_fpr_fnr(sim, labels, eer_threshold_overall)\n return sim, labels, eer, fpr, fnr\n else:\n return sim, labels, eer, eer_threshold\n\ndef compute_fpr_fnr(sim,labels_e1, thresh):\n\n preds = np.zeros(labels_e1.shape[0])\n preds[sim > thresh] = 1\n tn, fp, fn, tp = confusion_matrix(labels_e1, preds).ravel()\n fpr = fp/(fp+tn)\n fnr = fn/(fn+tp)\n return fpr, fnr\n\ndef compute_fdr(fprs, fnrs, omega=0.5):\n A = np.absolute(fprs[0]-fprs[1])\n B = np.absolute(fnrs[0]-fnrs[1])\n \n return 1 - (omega*A + (1-omega)*B)\n\ndef compute_auFDR(fpr_ov, tpr_ov, threshold_ov, sim_g0, sim_g1, labels_g0, labels_g1, \n score_dir, emb_FLAG=True, omega=0.5):\n # FDRs at various thersholds\n fdrs = []\n fnrs = []\n for fpr in tqdm(fprs):\n thresh = threshold_ov[np.nanargmin(np.absolute((fpr_ov-fpr)))]\n fnr = 1 - tpr_ov[np.nanargmin(np.absolute((fpr_ov-fpr)))]\n fpr_g0, fnr_g0 = compute_fpr_fnr(sim_g0, labels_g0, thresh)\n fpr_g1, fnr_g1 = compute_fpr_fnr(sim_g1, labels_g1, thresh)\n fdr = compute_fdr((fpr_g0, fpr_g1), (fnr_g0, fnr_g1), float(omega))\n fdrs.append(np.round(fdr*100,2))\n fnrs.append(np.round(fnr*100,2))\n auFDR = auc([x*100 for x in fprs], fdrs)\n auFDR_10 = auc([x*100 for x in fprs[0:10]], fdrs[0:10])\n df = pd.DataFrame(zip(fprs,fdrs, fnrs), columns=['fpr','fdr', 'fnr'])\n if emb_FLAG:\n print(\"Alpha = {} auFDR auFDR_10\".format(omega))\n print(\"Embeddings: {} {}\\n\".format(auFDR, auFDR_10))\n df.to_csv(os.path.join(score_dir, 'fdr_at_fpr_gender_omega_{}.csv'.format(omega)), index=None)\n else:\n print(\"Alpha = {} auFDR auFDR_10\".format(omega))\n print(\"xvectors: {} {}\\n\".format(auFDR, auFDR_10))\n df.to_csv(os.path.join(score_dir, 'fdr_at_fpr_gender_omega_{}.csv'.format(omega)), index=None)\n return auFDR, auFDR_10\n\ndef main(args):\n xvec_FLAG = args.eval_xvector\n\n # Creating necessary trials for gender-specific evaluations \n trial_dir = args.trials_root\n trials = os.path.join(trial_dir, 'Test-Combined.csv')\n df = pd.read_csv(trials)\n df['label'] = pd.to_numeric(df['label'])\n\n df_m = df.loc[df[\"gender_1\"]=='male']\n df_f = df.loc[df[\"gender_1\"]=='female']\n df_m_m = df_m.loc[df_m[\"gender_2\"]=='male']\n df_f_f = df_f.loc[df_f[\"gender_2\"]=='female']\n \n if not os.path.exists(os.path.join(trial_dir,'Test-male-all.csv')):\n df_m.to_csv(os.path.join(trial_dir,'Test-male-all.csv'), index=None)\n if not os.path.exists(os.path.join(trial_dir,'Test-female-all.csv')):\n df_f.to_csv(os.path.join(trial_dir,'Test-female-all.csv'), index=None)\n if not os.path.exists(os.path.join(trial_dir,'Test-male-male.csv')):\n df_m_m.to_csv(os.path.join(trial_dir,'Test-male-male.csv'), index=None)\n if not os.path.exists(os.path.join(trial_dir,'Test-female-female.csv')):\n df_f_f.to_csv(os.path.join(trial_dir,'Test-female-female.csv'), index=None)\n\n # Create directories to save ASV scores\n scores_dir_base = args.scores_root\n scores_dir_xvec = os.path.join(scores_dir_base,'baseline')\n scores_dir = os.path.join(scores_dir_base,'{}'.format(args.mode))\n os.makedirs(scores_dir_xvec, exist_ok=True)\n os.makedirs(scores_dir, exist_ok=True)\n\n # Load extracted embeddings and xvectors\n test_utts = np.load(os.path.join(args.data_root,'test_utts.npy'))\n \n pred_dir = args.pred_root\n e1 = np.load(os.path.join(pred_dir,'emb1.npy'))\n for idx, utt in enumerate(test_utts):\n emb_map[utt] = e1[idx,:]\n if xvec_FLAG:\n xvec = np.load(os.path.join(args.data_root,'test_data.npy'))\n for idx, utt in enumerate(test_utts):\n xvec_map[utt] = xvec[idx,:]\n\n\n # Gender-agnostic scoring\n print(\"Computing Gender-agnostic scores\")\n if os.path.exists(os.path.join(scores_dir_xvec, 'sim_xvec_overall.npy')) and os.path.exists(os.path.join(scores_dir, 'sim_e1_overall.npy')) and os.path.exists(os.path.join(scores_dir_xvec, 'labels_overall.npy')):\n sim_e1_ov = np.load(os.path.join(scores_dir, 'sim_e1_overall.npy'))\n labels_ov = np.load(os.path.join(scores_dir_xvec, 'labels_overall.npy'))\n fpr, tpr, threshold = roc_curve(labels_ov, sim_e1_ov)\n fnr = 1 - tpr\n eer_threshold_e1_ov = threshold[np.nanargmin(np.absolute((fnr - fpr)))]\n eer_e1_ov = fpr[np.nanargmin(np.absolute((fnr - fpr))) ]\n\n if xvec_FLAG:\n sim_xvec_ov = np.load(os.path.join(scores_dir_xvec, 'sim_xvec_overall.npy'))\n fpr, tpr, threshold = roc_curve(labels_ov, sim_xvec_ov)\n fnr = 1 - tpr\n eer_threshold_xvec_ov = threshold[np.nanargmin(np.absolute((fnr - fpr)))]\n eer_xvec_ov = fpr[np.nanargmin(np.absolute((fnr - fpr)))]\n print(\"Done scoring Gender-agnostic trials\")\n else:\n sim_e1_ov, labels_ov, eer_e1_ov, eer_threshold_e1_ov = compute_scores(df, agnostic_FLAG=True)\n np.save(os.path.join(scores_dir, 'sim_e1_overall'), sim_e1_ov)\n np.save(os.path.join(scores_dir_xvec, 'labels_overall'), labels_ov)\n if xvec_FLAG:\n sim_xvec_ov, labels_xvec_ov, eer_xvec_ov, eer_threshold_xvec_ov = compute_scores(df, agnostic_FLAG=True, emb_FLAG=False)\n np.save(os.path.join(scores_dir_xvec, 'sim_xvec_overall'), sim_xvec_ov)\n print(\"Done scoring Gender-agnostic trials\")\n\n #Gender-specific scoring\n print(\"Computing Gender-specific scores\")\n if (not os.path.exists(os.path.join(scores_dir, 'sim_e1_male_male.npy'))) or (not os.path.exists(os.path.join(scores_dir, 'sim_e1_female_female.npy'))):\n sim_e1_m, labels_e1_m, eer_e1_m, fpr_e1_m, fnr_e1_m = compute_scores(df_m_m, eer_threshold_e1_ov)\n sim_e1_f, labels_e1_f, eer_e1_f, fpr_e1_f, fnr_e1_f = compute_scores(df_f_f, eer_threshold_e1_ov)\n np.save(os.path.join(scores_dir, 'sim_e1_male_male'), sim_e1_m)\n np.save(os.path.join(scores_dir, 'sim_e1_female_female'), sim_e1_f)\n np.save(os.path.join(scores_dir_xvec, 'labels_male_male'), labels_e1_m)\n np.save(os.path.join(scores_dir_xvec, 'labels_female_female'), labels_e1_f)\n \n print(\"EER_all EER_Male EER_Female\")\n print(\"Embeddings: {} {} {}\\n\".format(np.round(eer_e1_ov*100,2), np.round(eer_e1_m*100,2), np.round(eer_e1_f*100,2)))\n \n sim_e1_g0 = sim_e1_m\n sim_e1_g1 = sim_e1_f\n labels_g0 = labels_e1_m\n labels_g1 = labels_e1_f\n print(\"Done scoring Gender-specific trials\")\n else:\n sim_e1 = []\n labels = []\n for group in groups:\n sim_e1.append(np.load(os.path.join(scores_dir, 'sim_e1_{}.npy'.format(group))))\n labels.append(np.load(os.path.join(scores_dir_xvec, 'labels_{}.npy'.format(group))))\n sim_e1_g0 = sim_e1[0]\n sim_e1_g1 = sim_e1[1]\n labels_g0 = labels[0]\n labels_g1 = labels[1]\n print(\"Done scoring Gender-specific trials\")\n if xvec_FLAG:\n if (not os.path.exists(os.path.join(scores_dir_xvec, 'sim_xvec_male_male.npy'))) or (not os.path.exists(os.path.join(scores_dir_xvec, 'sim_xvec_female_female.npy'))):\n print(\"Computing Gender-specific scores for x-vectors\")\n sim_xvec_m, labels_xvec_m, eer_xvec_m, fpr_xvec_m, fnr_xvec_m = compute_scores(df_m_m, eer_threshold_xvec_ov, emb_FLAG=False)\n sim_xvec_f, labels_xvec_f, eer_xvec_f, fpr_xvec_f, fnr_xvec_f = compute_scores(df_f_f, eer_threshold_xvec_ov, emb_FLAG=False)\n np.save(os.path.join(scores_dir_xvec, 'sim_xvec_male_male'), sim_xvec_m)\n np.save(os.path.join(scores_dir_xvec, 'sim_xvec_female_female'), sim_xvec_f)\n sim_xvec_g0 = sim_xvec_m\n sim_xvec_g1 = sim_xvec_f\n print(\"x-vector: {} {} {}\\n\".format(np.round(eer_xvec_ov*100,2), np.round(eer_xvec_m*100,2),np.round(eer_xvec_f*100,2)))\n print(\"Done scoring Gender-specific trials for x-vectors\")\n else:\n sim_xvec = []\n for group in groups:\n sim_xvec.append(np.load(os.path.join(scores_dir_xvec, 'sim_xvec_{}.npy'.format(group))))\n sim_xvec_g0 = sim_xvec[0]\n sim_xvec_g1 = sim_xvec[1]\n print(\"Done scoring Gender-specific trials for x-vectors\")\n\n # Compute area under FDR-FPR curve\n fpr_ov, tpr_ov, threshold_ov = roc_curve(labels_ov, sim_e1_ov)\n aus, au10s = [], []\n for omega in omegas:\n au, au10 = compute_auFDR(fpr_ov, tpr_ov, threshold_ov, sim_e1_g0, sim_e1_g1, labels_g0, labels_g1, scores_dir, emb_FLAG=True, omega=omega)\n aus.append(au)\n au10s.append(au10)\n df = pd.DataFrame(zip(omegas,aus, au10s), columns=['omega','au', 'au10'])\n df.to_csv(os.path.join(scores_dir, 'au_fdrs.csv'), index=None)\n\n if xvec_FLAG:\n fpr_ov, tpr_ov, threshold_ov = roc_curve(labels_ov, sim_xvec_ov)\n aus, aus10 = [],[]\n for omega in omegas:\n compute_auFDR(fpr_ov, tpr_ov, threshold_ov, sim_xvec_g0, sim_xvec_g1, labels_g0, labels_g1, scores_dir_xvec, emb_FLAG=False, omega=omega)\n aus.append(au)\n au10s.append(au10)\n df = pd.DataFrame(zip(omegas,aus, au10s), columns=['omega','au', 'au10'])\n df.to_csv(os.path.join(scores_dir_xvec, 'aufdrs.csv'), index=None)\n pdb.set_trace()\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, required=True)\n parser.add_argument('--trials_root', type=str, required=True,\n help=\"Directory containing Test-Combined.csv\")\n\n parser.add_argument('--data_root', type=str, required=True,\n help=\"Directory containing test_utts.npy\")\n\n parser.add_argument('--pred_root', type=str, required=True,\n help=\"Directory containing Extracted embeddings\")\n\n parser.add_argument('--scores_root', type=str, required=True,\n help=\"Directory to save ASV scores\")\n parser.add_argument('--eval_xvector', default=False, action='store_true')\n args = parser.parse_args()\n main(args)\n\n"
] | [
[
"numpy.zeros",
"sklearn.metrics.roc_curve",
"pandas.read_csv",
"sklearn.metrics.auc",
"pandas.to_numeric",
"sklearn.metrics.confusion_matrix",
"numpy.absolute",
"numpy.array",
"numpy.round",
"scipy.spatial.distance.cosine",
"numpy.mean"
]
] |
creare-com/podpac | [
"7feb5c957513c146ce73ba1c36c630284f513a6e"
] | [
"podpac/core/coordinates/test/test_uniform_coordinates1d.py"
] | [
"from datetime import datetime\nimport json\n\nimport pytest\nimport traitlets as tl\nimport numpy as np\nfrom numpy.testing import assert_equal\n\nimport podpac\nfrom podpac.core.coordinates.utils import make_coord_array\nfrom podpac.core.coordinates.coordinates1d import Coordinates1d\nfrom podpac.core.coordinates.array_coordinates1d import ArrayCoordinates1d\nfrom podpac.core.coordinates.uniform_coordinates1d import UniformCoordinates1d\nfrom podpac.core.coordinates.coordinates import Coordinates\n\n\nclass TestUniformCoordinatesCreation(object):\n def test_numerical(self):\n # ascending\n c = UniformCoordinates1d(0, 50, 10)\n a = np.array([0, 10, 20, 30, 40, 50], dtype=float)\n assert c.start == 0\n assert c.stop == 50\n assert c.step == 10\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, [0, 50])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == 6\n assert c.dtype == float\n assert c.is_monotonic == True\n assert c.is_descending == False\n assert c.is_uniform == True\n\n # descending\n c = UniformCoordinates1d(50, 0, -10)\n a = np.array([50, 40, 30, 20, 10, 0], dtype=float)\n assert c.start == 50\n assert c.stop == 0\n assert c.step == -10\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, [0, 50])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == 6\n assert c.dtype == float\n assert c.is_monotonic == True\n assert c.is_descending == True\n assert c.is_uniform == True\n\n def test_numerical_inexact(self):\n # ascending\n c = UniformCoordinates1d(0, 49, 10)\n a = np.array([0, 10, 20, 30, 40], dtype=float)\n assert c.start == 0\n assert c.stop == 49\n assert c.step == 10\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, [0, 40])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == 5\n assert c.dtype == float\n assert c.is_monotonic == True\n assert c.is_descending == False\n assert c.is_uniform == True\n\n # descending\n c = UniformCoordinates1d(50, 1, -10)\n a = np.array([50, 40, 30, 20, 10], dtype=float)\n assert c.start == 50\n assert c.stop == 1\n assert c.step == -10\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, [10, 50])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.dtype == float\n assert c.size == a.size\n assert c.is_monotonic == True\n assert c.is_descending == True\n assert c.is_uniform == True\n\n def test_datetime(self):\n # ascending\n c = UniformCoordinates1d(\"2018-01-01\", \"2018-01-04\", \"1,D\")\n a = np.array([\"2018-01-01\", \"2018-01-02\", \"2018-01-03\", \"2018-01-04\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2018-01-01\")\n assert c.stop == np.datetime64(\"2018-01-04\")\n assert c.step == np.timedelta64(1, \"D\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[0, -1]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == False\n assert c.is_uniform == True\n\n # descending\n c = UniformCoordinates1d(\"2018-01-04\", \"2018-01-01\", \"-1,D\")\n a = np.array([\"2018-01-04\", \"2018-01-03\", \"2018-01-02\", \"2018-01-01\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2018-01-04\")\n assert c.stop == np.datetime64(\"2018-01-01\")\n assert c.step == np.timedelta64(-1, \"D\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[-1, 0]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == True\n assert c.is_uniform == True\n\n def test_datetime_inexact(self):\n # ascending\n c = UniformCoordinates1d(\"2018-01-01\", \"2018-01-06\", \"2,D\")\n a = np.array([\"2018-01-01\", \"2018-01-03\", \"2018-01-05\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2018-01-01\")\n assert c.stop == np.datetime64(\"2018-01-06\")\n assert c.step == np.timedelta64(2, \"D\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[0, -1]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == False\n assert c.is_uniform == True\n\n # descending\n c = UniformCoordinates1d(\"2018-01-06\", \"2018-01-01\", \"-2,D\")\n a = np.array([\"2018-01-06\", \"2018-01-04\", \"2018-01-02\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2018-01-06\")\n assert c.stop == np.datetime64(\"2018-01-01\")\n assert c.step == np.timedelta64(-2, \"D\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[-1, 0]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == True\n assert c.is_uniform == True\n\n def test_datetime_month_step(self):\n # ascending\n c = UniformCoordinates1d(\"2018-01-01\", \"2018-04-01\", \"1,M\")\n a = np.array([\"2018-01-01\", \"2018-02-01\", \"2018-03-01\", \"2018-04-01\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2018-01-01\")\n assert c.stop == np.datetime64(\"2018-04-01\")\n assert c.step == np.timedelta64(1, \"M\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[0, -1]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == False\n assert c.is_uniform == True\n\n # descending\n c = UniformCoordinates1d(\"2018-04-01\", \"2018-01-01\", \"-1,M\")\n a = np.array([\"2018-04-01\", \"2018-03-01\", \"2018-02-01\", \"2018-01-01\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2018-04-01\")\n assert c.stop == np.datetime64(\"2018-01-01\")\n assert c.step == np.timedelta64(-1, \"M\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[-1, 0]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == True\n assert c.is_uniform == True\n\n def test_datetime_year_step(self):\n # ascending, exact\n c = UniformCoordinates1d(\"2018-01-01\", \"2021-01-01\", \"1,Y\")\n a = np.array([\"2018-01-01\", \"2019-01-01\", \"2020-01-01\", \"2021-01-01\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2018-01-01\")\n assert c.stop == np.datetime64(\"2021-01-01\")\n assert c.step == np.timedelta64(1, \"Y\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[0, -1]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == False\n assert c.is_uniform == True\n\n # descending, exact\n c = UniformCoordinates1d(\"2021-01-01\", \"2018-01-01\", \"-1,Y\")\n a = np.array([\"2021-01-01\", \"2020-01-01\", \"2019-01-01\", \"2018-01-01\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2021-01-01\")\n assert c.stop == np.datetime64(\"2018-01-01\")\n assert c.step == np.timedelta64(-1, \"Y\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[-1, 0]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == True\n assert c.is_uniform == True\n\n # ascending, inexact (two cases)\n c = UniformCoordinates1d(\"2018-01-01\", \"2021-04-01\", \"1,Y\")\n a = np.array([\"2018-01-01\", \"2019-01-01\", \"2020-01-01\", \"2021-01-01\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2018-01-01\")\n assert c.stop == np.datetime64(\"2021-04-01\")\n assert c.step == np.timedelta64(1, \"Y\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[0, -1]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == False\n assert c.is_uniform == True\n\n c = UniformCoordinates1d(\"2018-04-01\", \"2021-01-01\", \"1,Y\")\n a = np.array([\"2018-04-01\", \"2019-04-01\", \"2020-04-01\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2018-04-01\")\n assert c.stop == np.datetime64(\"2021-01-01\")\n assert c.step == np.timedelta64(1, \"Y\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[0, -1]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == False\n assert c.is_uniform == True\n\n # descending, inexact (two cases)\n c = UniformCoordinates1d(\"2021-01-01\", \"2018-04-01\", \"-1,Y\")\n a = np.array([\"2021-01-01\", \"2020-01-01\", \"2019-01-01\", \"2018-01-01\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2021-01-01\")\n assert c.stop == np.datetime64(\"2018-04-01\")\n assert c.step == np.timedelta64(-1, \"Y\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[-1, 0]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == True\n assert c.is_uniform == True\n\n c = UniformCoordinates1d(\"2021-04-01\", \"2018-01-01\", \"-1,Y\")\n a = np.array([\"2021-04-01\", \"2020-04-01\", \"2019-04-01\", \"2018-04-01\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2021-04-01\")\n assert c.stop == np.datetime64(\"2018-01-01\")\n assert c.step == np.timedelta64(-1, \"Y\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[-1, 0]])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == True\n assert c.is_uniform == True\n\n def test_numerical_size(self):\n # ascending\n c = UniformCoordinates1d(0, 10, size=20)\n assert c.start == 0\n assert c.stop == 10\n assert c.step == 10 / 19.0\n assert_equal(c.coordinates, np.linspace(0, 10, 20))\n assert_equal(c.bounds, [0, 10])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == 20\n assert c.dtype == float\n assert c.is_monotonic == True\n assert c.is_descending == False\n assert c.is_uniform == True\n\n # descending\n c = UniformCoordinates1d(10, 0, size=20)\n assert c.start == 10\n assert c.stop == 0\n assert c.step == -10 / 19.0\n assert_equal(c.coordinates, np.linspace(10, 0, 20))\n assert_equal(c.bounds, [0, 10])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == 20\n assert c.dtype == float\n assert c.is_monotonic == True\n assert c.is_descending == True\n assert c.is_uniform == True\n\n def test_datetime_size(self):\n # ascending\n c = UniformCoordinates1d(\"2018-01-01\", \"2018-01-10\", size=10)\n assert c.start == np.datetime64(\"2018-01-01\")\n assert c.stop == np.datetime64(\"2018-01-10\")\n assert_equal(c.bounds, [np.datetime64(\"2018-01-01\"), np.datetime64(\"2018-01-10\")])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == 10\n assert c.dtype == np.datetime64\n assert c.is_descending == False\n\n # descending\n c = UniformCoordinates1d(\"2018-01-10\", \"2018-01-01\", size=10)\n assert c.start == np.datetime64(\"2018-01-10\")\n assert c.stop == np.datetime64(\"2018-01-01\")\n assert_equal(c.bounds, [np.datetime64(\"2018-01-01\"), np.datetime64(\"2018-01-10\")])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == 10\n assert c.dtype == np.datetime64\n assert c.is_descending == True\n\n # increase resolution\n c = UniformCoordinates1d(\"2018-01-01\", \"2018-01-10\", size=21)\n assert c.start == np.datetime64(\"2018-01-01\")\n assert c.stop == np.datetime64(\"2018-01-10\")\n assert_equal(c.bounds, [np.datetime64(\"2018-01-01\"), np.datetime64(\"2018-01-10\")])\n assert c.coordinates[c.argbounds[0]] == c.bounds[0]\n assert c.coordinates[c.argbounds[1]] == c.bounds[1]\n assert c.size == 21\n assert c.dtype == np.datetime64\n assert c.is_descending == False\n\n def test_datetime_size_invalid(self):\n with pytest.raises(ValueError, match=\"Cannot divide timedelta\"):\n c = UniformCoordinates1d(\"2018-01-01\", \"2018-01-10\", size=20)\n\n def test_numerical_size_floating_point_error(self):\n c = UniformCoordinates1d(50.619, 50.62795, size=30)\n assert c.size == 30\n\n def test_numerical_singleton(self):\n # positive step\n c = UniformCoordinates1d(1, 1, 10)\n a = np.array([1], dtype=float)\n assert c.start == 1\n assert c.stop == 1\n assert c.step == 10\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, [1, 1])\n assert c.size == 1\n assert c.dtype == float\n assert c.is_monotonic == True\n assert c.is_descending == None\n assert c.is_uniform == True\n\n # negative step\n c = UniformCoordinates1d(1, 1, -10)\n a = np.array([1], dtype=float)\n assert c.start == 1\n assert c.stop == 1\n assert c.step == -10\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, [1, 1])\n assert c.size == 1\n assert c.dtype == float\n assert c.is_monotonic == True\n assert c.is_descending == None\n assert c.is_uniform == True\n\n def test_datetime_singleton(self):\n # positive step\n c = UniformCoordinates1d(\"2018-01-01\", \"2018-01-01\", \"1,D\")\n a = np.array([\"2018-01-01\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2018-01-01\")\n assert c.stop == np.datetime64(\"2018-01-01\")\n assert c.step == np.timedelta64(1, \"D\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[0, -1]])\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == None\n assert c.is_uniform == True\n\n # negative step\n c = UniformCoordinates1d(\"2018-01-01\", \"2018-01-01\", \"-1,D\")\n a = np.array([\"2018-01-01\"]).astype(np.datetime64)\n assert c.start == np.datetime64(\"2018-01-01\")\n assert c.stop == np.datetime64(\"2018-01-01\")\n assert c.step == np.timedelta64(-1, \"D\")\n assert_equal(c.coordinates, a)\n assert_equal(c.bounds, a[[-1, 0]])\n assert c.size == a.size\n assert c.dtype == np.datetime64\n assert c.is_monotonic == True\n assert c.is_descending == None\n assert c.is_uniform == True\n\n def test_from_tuple(self):\n # numerical, step\n c = UniformCoordinates1d.from_tuple((0, 10, 0.5))\n assert c.start == 0.0\n assert c.stop == 10.0\n assert c.step == 0.5\n\n # numerical, size\n c = UniformCoordinates1d.from_tuple((0, 10, 20))\n assert c.start == 0.0\n assert c.stop == 10.0\n assert c.size == 20\n\n # datetime, step\n c = UniformCoordinates1d.from_tuple((\"2018-01-01\", \"2018-01-04\", \"1,D\"))\n assert c.start == np.datetime64(\"2018-01-01\")\n assert c.stop == np.datetime64(\"2018-01-04\")\n assert c.step == np.timedelta64(1, \"D\")\n\n # invalid\n with pytest.raises(ValueError, match=\"UniformCoordinates1d.from_tuple expects a tuple\"):\n UniformCoordinates1d.from_tuple((0, 10))\n\n with pytest.raises(ValueError, match=\"UniformCoordinates1d.from_tuple expects a tuple\"):\n UniformCoordinates1d.from_tuple(np.array([0, 10, 0.5]))\n\n def test_copy(self):\n c = UniformCoordinates1d(0, 10, 50, name=\"lat\")\n c2 = c.copy()\n assert c is not c2\n assert c == c2\n\n def test_invalid_init(self):\n with pytest.raises(ValueError):\n UniformCoordinates1d(0, 0, 0)\n\n with pytest.raises(ValueError):\n UniformCoordinates1d(0, 50, 0)\n\n with pytest.raises(ValueError):\n UniformCoordinates1d(0, 50, -10)\n\n with pytest.raises(ValueError):\n UniformCoordinates1d(50, 0, 10)\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(0, \"2018-01-01\", 10)\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(\"2018-01-01\", 50, 10)\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(\"2018-01-01\", \"2018-01-02\", 10)\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(0.0, \"2018-01-01\", \"1,D\")\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(\"2018-01-01\", 50, \"1,D\")\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(0, 50, \"1,D\")\n\n with pytest.raises(ValueError):\n UniformCoordinates1d(\"a\", 50, 10)\n\n with pytest.raises(ValueError):\n UniformCoordinates1d(0, \"b\", 10)\n\n with pytest.raises(ValueError):\n UniformCoordinates1d(0, 50, \"a\")\n\n with pytest.raises(TypeError):\n UniformCoordinates1d()\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(0)\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(0, 50)\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(0, 50, 10, size=6)\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(0, 10, size=20.0)\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(0, 10, size=\"string\")\n\n with pytest.raises(TypeError):\n UniformCoordinates1d(\"2018-01-10\", \"2018-01-01\", size=\"1,D\")\n\n\nclass TestUniformCoordinatesEq(object):\n def test_equal(self):\n c1 = UniformCoordinates1d(0, 50, 10)\n c2 = UniformCoordinates1d(0, 50, 10)\n c3 = UniformCoordinates1d(0, 50, 10)\n c4 = UniformCoordinates1d(5, 50, 10)\n c5 = UniformCoordinates1d(0, 60, 10)\n c6 = UniformCoordinates1d(0, 50, 5)\n c7 = UniformCoordinates1d(50, 0, -10)\n\n assert c1 == c2\n assert c1 == c3\n assert c1 != c4\n assert c1 != c5\n assert c1 != c6\n assert c1 != c7\n\n def test_equal_array_coordinates(self):\n c1 = UniformCoordinates1d(0, 50, 10)\n c2 = ArrayCoordinates1d([0, 10, 20, 30, 40, 50])\n c3 = ArrayCoordinates1d([10, 20, 30, 40, 50, 60])\n\n assert c1 == c2\n assert c1 != c3\n\n\nclass TestUniformCoordinatesSerialization(object):\n def test_definition(self):\n # numerical\n c = UniformCoordinates1d(0, 50, 10, name=\"lat\")\n d = c.definition\n assert isinstance(d, dict)\n assert set(d.keys()) == set([\"start\", \"stop\", \"step\", \"name\"])\n json.dumps(d, cls=podpac.core.utils.JSONEncoder) # test serializable\n c2 = UniformCoordinates1d.from_definition(d) # test from_definition\n assert c2 == c\n\n # datetimes\n c = UniformCoordinates1d(\"2018-01-01\", \"2018-01-03\", \"1,D\")\n d = c.definition\n assert isinstance(d, dict)\n assert set(d.keys()) == set([\"start\", \"stop\", \"step\"])\n json.dumps(d, cls=podpac.core.utils.JSONEncoder) # test serializable\n c2 = UniformCoordinates1d.from_definition(d) # test from_definition\n assert c2 == c\n\n def test_invalid_definition(self):\n # incorrect definition\n d = {\"stop\": 50}\n with pytest.raises(ValueError, match='UniformCoordinates1d definition requires \"start\"'):\n UniformCoordinates1d.from_definition(d)\n\n d = {\"start\": 0}\n with pytest.raises(ValueError, match='UniformCoordinates1d definition requires \"stop\"'):\n UniformCoordinates1d.from_definition(d)\n\n def test_from_definition_size(self):\n # numerical\n d = {\"start\": 0, \"stop\": 50, \"size\": 6}\n c = UniformCoordinates1d.from_definition(d)\n assert_equal(c.coordinates, [0, 10, 20, 30, 40, 50])\n\n # datetime, size\n d = {\"start\": \"2018-01-01\", \"stop\": \"2018-01-03\", \"size\": 3}\n c = UniformCoordinates1d.from_definition(d)\n assert_equal(c.coordinates, np.array([\"2018-01-01\", \"2018-01-02\", \"2018-01-03\"]).astype(np.datetime64))\n\n\nclass TestUniformCoordinatesIndexing(object):\n def test_len(self):\n c = UniformCoordinates1d(0, 50, 10)\n assert len(c) == 6\n\n def test_index(self):\n c = UniformCoordinates1d(0, 50, 10, name=\"lat\")\n\n # int\n c2 = c[2]\n assert isinstance(c2, Coordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [20])\n\n c2 = c[-2]\n assert isinstance(c2, Coordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [40])\n\n # slice\n c2 = c[:2]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 0\n assert c2.stop == 10\n assert c2.step == 10\n\n c2 = c[2:]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 20\n assert c2.stop == 50\n assert c2.step == 10\n\n c2 = c[::2]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 0\n assert c2.stop == 50\n assert c2.step == 20\n\n c2 = c[1:-1]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 10\n assert c2.stop == 40\n assert c2.step == 10\n\n c2 = c[-3:5]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 30\n assert c2.stop == 40\n assert c2.step == 10\n\n c2 = c[::-1]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 50\n assert c2.stop == 0\n assert c2.step == -10\n\n # index array\n c2 = c[[0, 1, 3]]\n assert isinstance(c2, ArrayCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [0, 10, 30])\n\n c2 = c[[3, 1, 0]]\n assert isinstance(c2, ArrayCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [30, 10, 0])\n\n c2 = c[[0, 3, 1]]\n assert isinstance(c2, ArrayCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [0, 30, 10])\n\n c2 = c[[]]\n assert isinstance(c2, ArrayCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [])\n\n c2 = c[0:0]\n assert isinstance(c2, ArrayCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [])\n\n c2 = c[[]]\n assert isinstance(c2, ArrayCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [])\n\n # boolean array\n c2 = c[[True, True, True, False, True, False]]\n assert isinstance(c2, ArrayCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [0, 10, 20, 40])\n\n # invalid\n with pytest.raises(IndexError):\n c[0.3]\n\n with pytest.raises(IndexError):\n c[10]\n\n def test_index_descending(self):\n c = UniformCoordinates1d(50, 0, -10, name=\"lat\")\n\n # int\n c2 = c[2]\n assert isinstance(c2, Coordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [30])\n\n c2 = c[-2]\n assert isinstance(c2, Coordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [10])\n\n # slice\n c2 = c[:2]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 50\n assert c2.stop == 40\n assert c2.step == -10\n\n c2 = c[2:]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 30\n assert c2.stop == 0\n assert c2.step == -10\n\n c2 = c[::2]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 50\n assert c2.stop == 0\n assert c2.step == -20\n\n c2 = c[1:-1]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 40\n assert c2.stop == 10\n assert c2.step == -10\n\n c2 = c[-3:5]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 20\n assert c2.stop == 10\n assert c2.step == -10\n\n c2 = c[::-1]\n assert isinstance(c2, UniformCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert c2.start == 0\n assert c2.stop == 50\n assert c2.step == 10\n\n # index array\n c2 = c[[0, 1, 3]]\n assert isinstance(c2, ArrayCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [50, 40, 20])\n\n c2 = c[[3, 1, 0]]\n assert isinstance(c2, ArrayCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [20, 40, 50])\n\n c2 = c[[0, 3, 1]]\n assert isinstance(c2, ArrayCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [50, 20, 40])\n\n # boolean array\n c2 = c[[True, True, True, False, True, False]]\n assert isinstance(c2, ArrayCoordinates1d)\n assert c2.name == c.name\n assert c2.properties == c.properties\n assert_equal(c2.coordinates, [50, 40, 30, 10])\n\n def test_in(self):\n c = UniformCoordinates1d(0, 50, 10, name=\"lat\")\n assert 0 in c\n assert 10 in c\n assert 50 in c\n assert -10 not in c\n assert 60 not in c\n assert 5 not in c\n assert np.datetime64(\"2018\") not in c\n assert \"a\" not in c\n\n c = UniformCoordinates1d(50, 0, -10, name=\"lat\")\n assert 0 in c\n assert 10 in c\n assert 50 in c\n assert -10 not in c\n assert 60 not in c\n assert 5 not in c\n assert np.datetime64(\"2018\") not in c\n assert \"a\" not in c\n\n c = UniformCoordinates1d(\"2020-01-01\", \"2020-01-09\", \"2,D\", name=\"time\")\n assert np.datetime64(\"2020-01-01\") in c\n assert np.datetime64(\"2020-01-03\") in c\n assert np.datetime64(\"2020-01-09\") in c\n assert np.datetime64(\"2020-01-11\") not in c\n assert np.datetime64(\"2020-01-02\") not in c\n assert 10 not in c\n assert \"a\" not in c\n\n\nclass TestArrayCoordinatesAreaBounds(object):\n def test_get_area_bounds_numerical(self):\n c = UniformCoordinates1d(0, 50, 10)\n\n # point\n area_bounds = c.get_area_bounds(None)\n assert_equal(area_bounds, [0.0, 50.0])\n\n # uniform\n area_bounds = c.get_area_bounds(0.5)\n assert_equal(area_bounds, [-0.5, 50.5])\n\n # segment\n area_bounds = c.get_area_bounds([-0.2, 0.7])\n assert_equal(area_bounds, [-0.2, 50.7])\n\n # polygon (i.e. there would be corresponding offets for another dimension)\n area_bounds = c.get_area_bounds([-0.2, -0.5, 0.7, 0.5])\n assert_equal(area_bounds, [-0.5, 50.7])\n\n def test_get_area_bounds_datetime(self):\n c = UniformCoordinates1d(\"2018-01-01\", \"2018-01-04\", \"1,D\")\n\n # point\n area_bounds = c.get_area_bounds(None)\n assert_equal(area_bounds, make_coord_array([\"2018-01-01\", \"2018-01-04\"]))\n\n # uniform\n area_bounds = c.get_area_bounds(\"1,D\")\n assert_equal(area_bounds, make_coord_array([\"2017-12-31\", \"2018-01-05\"]))\n\n area_bounds = c.get_area_bounds(\"1,M\")\n assert_equal(area_bounds, make_coord_array([\"2017-12-01\", \"2018-02-04\"]))\n\n area_bounds = c.get_area_bounds(\"1,Y\")\n assert_equal(area_bounds, make_coord_array([\"2017-01-01\", \"2019-01-04\"]))\n\n # segment\n area_bounds = c.get_area_bounds([\"0,h\", \"12,h\"])\n assert_equal(area_bounds, make_coord_array([\"2018-01-01 00:00\", \"2018-01-04 12:00\"]))\n\n\nclass TestUniformCoordinatesSelection(object):\n def test_select_all_shortcut(self):\n c = UniformCoordinates1d(20.0, 70.0, 10.0)\n\n s = c.select([0, 100])\n assert s.start == 20.0\n assert s.stop == 70.0\n assert s.step == 10.0\n\n s, I = c.select([0, 100], return_index=True)\n assert s.start == 20.0\n assert s.stop == 70.0\n assert s.step == 10.0\n assert_equal(c[I], s)\n\n def test_select_none_shortcut(self):\n c = UniformCoordinates1d(20.0, 70.0, 10.0)\n\n # above\n s = c.select([100, 200])\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n\n s, I = c.select([100, 200], return_index=True)\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n assert c[I] == s\n\n # below\n s = c.select([0, 5])\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n\n s, I = c.select([0, 5], return_index=True)\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n assert c[I] == s\n\n def test_select_ascending(self):\n c = UniformCoordinates1d(20.0, 70.0, 10.0)\n\n # inner\n s = c.select([35.0, 55.0])\n assert s.start == 40.0\n assert s.stop == 50.0\n assert s.step == 10.0\n\n s, I = c.select([35.0, 55.0], return_index=True)\n assert s.start == 40.0\n assert s.stop == 50.0\n assert s.step == 10.0\n assert c[I] == s\n\n # inner with aligned bounds\n s = c.select([30.0, 60.0])\n assert s.start == 30.0\n assert s.stop == 60.0\n assert s.step == 10.0\n\n s, I = c.select([30.0, 60.0], return_index=True)\n assert s.start == 30.0\n assert s.stop == 60.0\n assert s.step == 10.0\n assert c[I] == s\n\n # above\n s = c.select([45, 100])\n assert s.start == 50.0\n assert s.stop == 70.0\n assert s.step == 10.0\n\n s, I = c.select([45, 100], return_index=True)\n assert s.start == 50.0\n assert s.stop == 70.0\n assert s.step == 10.0\n assert c[I] == s\n\n # below\n s = c.select([5, 55])\n assert s.start == 20.0\n assert s.stop == 50.0\n assert s.step == 10.0\n\n s, I = c.select([5, 55], return_index=True)\n assert s.start == 20.0\n assert s.stop == 50.0\n assert s.step == 10.0\n assert c[I] == s\n\n # between coordinates\n s = c.select([52, 55])\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n\n s, I = c.select([52, 55], return_index=True)\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n assert_equal(c.coordinates[I], [])\n\n # backwards bounds\n s = c.select([70, 30])\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n\n s, I = c.select([70, 30], return_index=True)\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n assert_equal(c.coordinates[I], [])\n\n def test_select_descending(self):\n c = UniformCoordinates1d(70.0, 20.0, -10.0)\n\n # inner\n s = c.select([35.0, 55.0])\n assert s.start == 50.0\n assert s.stop == 40.0\n assert s.step == -10.0\n\n s, I = c.select([35.0, 55.0], return_index=True)\n assert s.start == 50.0\n assert s.stop == 40.0\n assert s.step == -10.0\n assert c[I] == s\n\n # inner with aligned bounds\n s = c.select([30.0, 60.0])\n assert s.start == 60.0\n assert s.stop == 30.0\n assert s.step == -10.0\n\n s, I = c.select([30.0, 60.0], return_index=True)\n assert s.start == 60.0\n assert s.stop == 30.0\n assert s.step == -10.0\n assert c[I] == s\n\n # above\n s = c.select([45, 100])\n assert s.start == 70.0\n assert s.stop == 50.0\n assert s.step == -10.0\n\n s, I = c.select([45, 100], return_index=True)\n assert s.start == 70.0\n assert s.stop == 50.0\n assert s.step == -10.0\n assert c[I] == s\n\n # below\n s = c.select([5, 55])\n assert s.start == 50.0\n assert s.stop == 20.0\n assert s.step == -10.0\n\n s, I = c.select([5, 55], return_index=True)\n assert s.start == 50.0\n assert s.stop == 20.0\n assert s.step == -10.0\n assert c[I] == s\n\n # between coordinates\n s = c.select([52, 55])\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n\n s, I = c.select([52, 55], return_index=True)\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n assert_equal(c.coordinates[I], [])\n\n # backwards bounds\n s = c.select([70, 30])\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n\n s, I = c.select([70, 30], return_index=True)\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n assert_equal(c.coordinates[I], [])\n\n def test_select_outer(self):\n c = UniformCoordinates1d(20.0, 70.0, 10.0)\n\n # inner\n s = c.select([35.0, 55.0], outer=True)\n assert s.start == 30.0\n assert s.stop == 60.0\n assert s.step == 10.0\n\n s, I = c.select([35.0, 55.0], outer=True, return_index=True)\n assert s.start == 30.0\n assert s.stop == 60.0\n assert s.step == 10.0\n assert c[I] == s\n\n # inner with aligned bounds\n s = c.select([30.0, 60.0], outer=True)\n assert s.start == 30.0\n assert s.stop == 60.0\n assert s.step == 10.0\n\n s, I = c.select([30.0, 60.0], outer=True, return_index=True)\n assert s.start == 30.0\n assert s.stop == 60.0\n assert s.step == 10.0\n assert c[I] == s\n\n # above\n s = c.select([45, 100], outer=True)\n assert s.start == 40.0\n assert s.stop == 70.0\n assert s.step == 10.0\n\n s, I = c.select([45, 100], outer=True, return_index=True)\n assert s.start == 40.0\n assert s.stop == 70.0\n assert s.step == 10.0\n assert c[I] == s\n\n # below\n s = c.select([5, 55], outer=True)\n assert s.start == 20.0\n assert s.stop == 60.0\n assert s.step == 10.0\n\n s, I = c.select([5, 55], outer=True, return_index=True)\n assert s.start == 20.0\n assert s.stop == 60.0\n assert s.step == 10.0\n assert c[I] == s\n\n # between coordinates\n s = c.select([52, 55], outer=True)\n assert s.start == 50.0\n assert s.stop == 60.0\n assert s.step == 10.0\n\n s, I = c.select([52, 55], outer=True, return_index=True)\n assert s.start == 50.0\n assert s.stop == 60.0\n assert s.step == 10.0\n assert c[I] == s\n\n # backwards bounds\n s = c.select([70, 30], outer=True)\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n\n s, I = c.select([70, 30], outer=True, return_index=True)\n assert isinstance(s, ArrayCoordinates1d)\n assert_equal(s.coordinates, [])\n assert_equal(c.coordinates[I], [])\n\n def test_select_time_variable_precision(self):\n c = UniformCoordinates1d(\"2012-05-19\", \"2012-05-20\", \"1,D\", name=\"time\")\n c2 = UniformCoordinates1d(\"2012-05-20T12:00:00\", \"2012-05-21T12:00:00\", \"1,D\", name=\"time\")\n s = c.select(c2.bounds, outer=True)\n s1 = c.select(c2.bounds, outer=False)\n s2 = c2.select(c.bounds)\n assert s.size == 1\n assert s1.size == 0\n assert s2.size == 1\n\n\nclass TestUniformCoordinatesMethods(object):\n def test_unique(self):\n c = UniformCoordinates1d(1, 5, step=1)\n c2 = c.unique()\n assert c2 == c and c2 is not c\n\n c2, I = c.unique(return_index=True)\n assert c2 == c and c2 is not c\n assert c2 == c[I]\n\n def test_simplify(self):\n c = UniformCoordinates1d(1, 5, step=1)\n c2 = c.simplify()\n assert c2 == c and c2 is not c\n\n # reversed, step -2\n c = UniformCoordinates1d(4, 0, step=-2)\n c2 = c.simplify()\n assert c2 == c and c2 is not c\n\n # time, convert to UniformCoordinates\n c = UniformCoordinates1d(\"2020-01-01\", \"2020-01-05\", step=\"1,D\")\n c2 = c.simplify()\n assert c2 == c and c2 is not c\n\n # time, reverse -2,h\n c = UniformCoordinates1d(\"2020-01-01T12:00\", \"2020-01-01T08:00\", step=\"-3,h\")\n c2 = c.simplify()\n assert c2 == c and c2 is not c\n\n def test_flatten(self):\n c = UniformCoordinates1d(1, 5, step=1)\n c2 = c.flatten()\n assert c2 == c and c2 is not c\n\n def test_reshape(self):\n c = UniformCoordinates1d(1, 6, step=1, name=\"lat\")\n c2 = c.reshape((2, 3))\n assert c2 == ArrayCoordinates1d(c.coordinates.reshape((2, 3)), name=\"lat\")\n\n def test_issubset(self):\n c1 = UniformCoordinates1d(2, 1, step=-1)\n c2 = UniformCoordinates1d(1, 3, step=1)\n c3 = UniformCoordinates1d(0, 2, step=1)\n c4 = UniformCoordinates1d(1, 4, step=0.5)\n c5 = UniformCoordinates1d(1.5, 2.5, step=0.5)\n c6 = UniformCoordinates1d(1.4, 2.4, step=0.5)\n c7 = UniformCoordinates1d(1.4, 2.4, step=10)\n\n # self\n assert c1.issubset(c1)\n\n # subsets\n assert c1.issubset(c2)\n assert c1.issubset(c3)\n assert c1.issubset(c4)\n assert c5.issubset(c4)\n assert c7.issubset(c6)\n\n # not subsets\n assert not c2.issubset(c1)\n assert not c2.issubset(c3)\n assert not c3.issubset(c1)\n assert not c3.issubset(c2)\n assert not c4.issubset(c1)\n assert not c6.issubset(c4)\n\n def test_issubset_datetime(self):\n c1 = UniformCoordinates1d(\"2020-01-01\", \"2020-01-03\", \"1,D\")\n c2 = UniformCoordinates1d(\"2020-01-01\", \"2020-01-03\", \"2,D\")\n c3 = UniformCoordinates1d(\"2020-01-01\", \"2020-01-05\", \"1,D\")\n c4 = UniformCoordinates1d(\"2020-01-05\", \"2020-01-01\", \"-2,D\")\n\n # self\n assert c1.issubset(c1)\n\n # same resolution\n assert c1.issubset(c3)\n assert c2.issubset(c1)\n assert c2.issubset(c4)\n assert not c1.issubset(c2)\n assert not c1.issubset(c4)\n assert not c3.issubset(c1)\n\n # different resolution\n c5 = UniformCoordinates1d(\"2020-01-01T00:00\", \"2020-01-03T00:00\", \"1,D\")\n c6 = UniformCoordinates1d(\"2020-01-01T00:00\", \"2020-01-03T00:00\", \"6,h\")\n assert c1.issubset(c5)\n assert c5.issubset(c1)\n assert c1.issubset(c6)\n assert not c6.issubset(c1)\n\n def test_issubset_dtype(self):\n c1 = UniformCoordinates1d(0, 10, step=1)\n c2 = UniformCoordinates1d(\"2018\", \"2020\", step=\"1,Y\")\n assert not c1.issubset(c2)\n assert not c2.issubset(c1)\n\n def test_issubset_array_coordinates(self):\n u = UniformCoordinates1d(start=1, stop=3, step=1)\n a1 = ArrayCoordinates1d([1, 3, 2])\n a2 = ArrayCoordinates1d([1, 2, 3])\n a3 = ArrayCoordinates1d([1, 3, 4])\n e = ArrayCoordinates1d([])\n\n # self\n assert u.issubset(a1)\n assert u.issubset(a2)\n assert not u.issubset(a3)\n assert not u.issubset(e)\n\n def test_issubset_coordinates(self):\n u = UniformCoordinates1d(1, 3, 1, name=\"lat\")\n c1 = Coordinates([[1, 2, 3], [10, 20, 30]], dims=[\"lat\", \"lon\"])\n c2 = Coordinates([[1, 2, 4], [10, 20, 30]], dims=[\"lat\", \"lon\"])\n c3 = Coordinates([[10, 20, 30]], dims=[\"alt\"])\n\n assert u.issubset(c1)\n assert not u.issubset(c2)\n assert not u.issubset(c3)\n"
] | [
[
"numpy.timedelta64",
"numpy.testing.assert_equal",
"numpy.datetime64",
"numpy.array",
"numpy.linspace"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.