index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
listlengths 1
1.25k
| error
bool 2
classes |
---|---|---|---|---|
98,500 |
d2ba59d3d6ee4043c3a3e20604b23293ed7e394d
|
from protocols.forms import forms
class DryForm(forms.VerbForm):
name = "Dissolve"
slug = "dissolve"
has_component = True
edit_to_what = forms.CharField(required = False, help_text = 'sample, mastermix, tube, etc')
duration = forms.IntegerField(help_text='this is the minimal time this should take', initial = 'sec')
using_what = forms.CharField(required = False, help_text = 'rotator, shaker, manual etc')
conditional_statement = forms.CharField(required = False, help_text ='if X happens, do Y')
|
[
"from protocols.forms import forms\n\n\nclass DryForm(forms.VerbForm):\n\n name = \"Dissolve\"\n slug = \"dissolve\"\n has_component = True\n \n edit_to_what = forms.CharField(required = False, help_text = 'sample, mastermix, tube, etc')\n duration = forms.IntegerField(help_text='this is the minimal time this should take', initial = 'sec')\n using_what = forms.CharField(required = False, help_text = 'rotator, shaker, manual etc')\n conditional_statement = forms.CharField(required = False, help_text ='if X happens, do Y')\n\n",
"from protocols.forms import forms\n\n\nclass DryForm(forms.VerbForm):\n name = 'Dissolve'\n slug = 'dissolve'\n has_component = True\n edit_to_what = forms.CharField(required=False, help_text=\n 'sample, mastermix, tube, etc')\n duration = forms.IntegerField(help_text=\n 'this is the minimal time this should take', initial='sec')\n using_what = forms.CharField(required=False, help_text=\n 'rotator, shaker, manual etc')\n conditional_statement = forms.CharField(required=False, help_text=\n 'if X happens, do Y')\n",
"<import token>\n\n\nclass DryForm(forms.VerbForm):\n name = 'Dissolve'\n slug = 'dissolve'\n has_component = True\n edit_to_what = forms.CharField(required=False, help_text=\n 'sample, mastermix, tube, etc')\n duration = forms.IntegerField(help_text=\n 'this is the minimal time this should take', initial='sec')\n using_what = forms.CharField(required=False, help_text=\n 'rotator, shaker, manual etc')\n conditional_statement = forms.CharField(required=False, help_text=\n 'if X happens, do Y')\n",
"<import token>\n\n\nclass DryForm(forms.VerbForm):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,501 |
c4616fa31556c3efc6921b6203b0a2c5424d0fef
|
'''
Nombre de archivo:
+recursos.py
Descripción:
+Librería con recursos varios para la aplicación
'''
#librerías
from IPython.display import Javascript
import sys, os, glob, datetime as dt, numpy as np, random, collections as coll
import pandas as pd, seaborn as sns, matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc, classification_report, confusion_matrix
from pyspark.sql import SparkSession, functions as F, window as W, DataFrame as DF
from pyspark.sql.types import (DateType, IntegerType, FloatType, DoubleType, LongType, StringType, StructField, StructType, TimestampType)
from pyspark.ml import functions as mlF, Pipeline as pipe
from pyspark.ml.stat import Correlation
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import Imputer, StandardScaler, MinMaxScaler, Normalizer, PCA, StringIndexer, OneHotEncoder, VectorAssembler
from pyspark.ml.regression import LinearRegression
from pyspark.ml.classification import LogisticRegression, DecisionTreeClassifier, DecisionTreeClassificationModel, RandomForestClassifier, GBTClassifier
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.mllib.evaluation import BinaryClassificationMetrics, MulticlassMetrics
from pyspark.ml.tuning import CrossValidator, CrossValidatorModel, ParamGridBuilder
from functools import reduce
from difflib import SequenceMatcher as seqmatch
import findspark
findspark.init('/usr/lib/python3.7/site-packages/pyspark')
# !pip install -q handyspark
# from handyspark import *
#variables postgres
# args = sys.argv
# print(args)
#estos parámetros corresponden a la instancia de postgres dentro del ambiente de docker que se adjunta al trabajo
host = '10.7.84.102'
port = '5432'
user = 'postgres'
password = 'testPassword'
#sesión de spark
spark = SparkSession.builder\
.master("local")\
.appName("Main")\
.config('spark.ui.port', '4050')\
.config("spark.driver.extraClassPath", "postgresql-42.2.14.jar") \
.config("spark.executor.extraClassPath", "postgresql-42.2.14.jar") \
.config("spark.jars", "postgresql-42.2.14.jar") \
.getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
#funciones
#función para almacenar en base de datos
def escribir_df(df, host=host, port=port, user=user, password=password, table='table'):
try:
#almacenamiento en base de datos
# .option("driver", "postgresql-42.2.14.jar") \
df \
.write \
.format("jdbc") \
.mode('overwrite') \
.option("url", "jdbc:postgresql://"+host+":"+port+"/postgres") \
.option("user", user) \
.option("password", password) \
.option("dbtable", table) \
.save()
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)
'''
La ejecución de spark es una ejecución vaga ("lazy"), si se intenta almacenar un dataframe en una tabla la cual es a su vez su propia fuente de datos,
dicha tabla será sobreescrita con valores nulos quedando vacía, por lo tanto en dichos casos se recomienda utilizar una tabla temporal.
'''
#función para cargar de base de datos
def leer_df(host=host, port=port, user=user, password=password, table='table'):
try:
#lectura desde base de datos hacia dataframe temporal
# .option("driver", "postgresql-42.2.14.jar") \
df = spark \
.read \
.format("jdbc") \
.option("url", "jdbc:postgresql://"+host+":"+port+"/postgres") \
.option("user", user) \
.option("password", password) \
.option("dbtable", table) \
.load()
df.count()
return df
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)
#función columnas-vector
def cols2vec(dfin, inputcols=[], outputcol='features', label='label', lab_alias='label', print_=False):
try:
assy = VectorAssembler(inputCols=inputcols, outputCol=outputcol, handleInvalid='skip')
dfout = assy.transform(dfin)
if lab_alias:
dfout = dfout.select([outputcol, F.col(label).alias(lab_alias)])
else:
dfout = dfout.select([outputcol])
if print_: dfout.show(10, truncate=False)
return dfout
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)
#función vector-columnas
def vec2cols(dfin, inputcol='features', outputcols=[], label='label', lab_alias='label', print_=False, prediction=None):
try:
if lab_alias:
if prediction:
dfout = dfin.select(inputcol, label, prediction).withColumn('temp', mlF.vector_to_array(inputcol)) \
.select([F.col('temp')[i].alias(outputcols[i]) for i in range(len(outputcols))] + [F.col(label).alias(lab_alias)] + [F.col(prediction)])
else:
dfout = dfin.select(inputcol, label).withColumn('temp', mlF.vector_to_array(inputcol)) \
.select([F.col('temp')[i].alias(outputcols[i]) for i in range(len(outputcols))] + [F.col(label).alias(lab_alias)])
else:
dfout = dfin.select(inputcol, label).withColumn('temp', mlF.vector_to_array(inputcol)) \
.select([F.col('temp')[i].alias(outputcols[i]) for i in range(len(outputcols))])
if print_: dfout.show(10, truncate=False)
return dfout
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)
#función de graficación de correlaciones
def plot_corr(df=None, inputcols=[]):
try:
sns.set(font_scale=1.5)
dfvec = cols2vec(df, inputcols=inputcols, outputcol='features')
dfscaled = StandardScaler(inputCol='features', outputCol='scaled', withStd=True, withMean=True).fit(dfvec).transform(dfvec).select(['scaled', 'label'])
pearson_matrix = Correlation.corr(dfscaled, column='scaled', method='pearson').collect()[0][0]
dfcols = vec2cols(dfscaled, inputcol='scaled', outputcols=inputcols)
print('\nMapa de calor')
grid_kws = {"height_ratios":(1,.05), "hspace":.2}
f,(ax,cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws, figsize=(24,8))
sns.heatmap(pearson_matrix.toArray(), yticklabels=inputcols, xticklabels=inputcols, mask=np.triu(pearson_matrix.toArray()),
annot=True, fmt=".2f", linewidths=.5, cmap=sns.diverging_palette(220,20,as_cmap=True), ax=ax, cbar_ax=cbar_ax, cbar_kws={"orientation": "horizontal"})
plt.show()
print('\nGráfico de parcela')
sns.pairplot(dfcols.toPandas(), height=2, aspect=16/9, corner=True, hue='label')
plt.show()
return dfscaled
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)
#función de graficación ROC
def plot_metrics(dfcoll=None, ver=1, metric=None):
try:
sns.set(font_scale=1)
fpr, tpr, thresholds = roc_curve(np.asarray(list(i[1] for i in dfcoll)), np.asarray(list(i[4][1] for i in dfcoll)))
roc_auc = auc(fpr, tpr)
conf_mat = confusion_matrix(list(i[1] for i in dfcoll), list(i[5] for i in dfcoll))
if ver==1:
fig,ax = plt.subplots(1,2, figsize=(12,4))
ax[0].plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
ax[0].plot([0, 1], [0, 1], 'k--')
ax[0].set_xlim([-0.05, 1.0]), ax[0].set_ylim([0.0, 1.05])
ax[0].set_xlabel('Falsos positivos'), ax[0].set_ylabel('Verdaderos positivos')
ax[0].set_title('Curva ROC'), ax[0].legend(loc="lower right")
sns.heatmap(conf_mat, annot=True, fmt='.0f', ax=ax[1])
ax[1].set_title('Matriz de confusión')
plt.show()
else:
fig, axs = plt.subplots(1, 2, figsize=(12,4))
metric.plot_roc_curve(ax=axs[0])
metric.plot_pr_curve(ax=axs[1])
plt.show()
return (roc_auc, fpr, tpr, thresholds, conf_mat)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)
def plot_bound(trues, falses, n):
try:
fig,ax = plt.subplots(figsize=(12,4))
ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r', marker="o", label='1')
ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b', marker="s", label='0')
plt.axhline(.5, color='green')
plt.legend(loc='upper right'), ax.set_title('Límite de decisión')
ax.set_xlabel('Observaciones'), ax.set_ylabel('Predicción de probabilidad')
plt.show()
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)
|
[
"'''\nNombre de archivo:\n +recursos.py\nDescripción: \n +Librería con recursos varios para la aplicación\n'''\n\n#librerías\nfrom IPython.display import Javascript\nimport sys, os, glob, datetime as dt, numpy as np, random, collections as coll\nimport pandas as pd, seaborn as sns, matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc, classification_report, confusion_matrix\nfrom pyspark.sql import SparkSession, functions as F, window as W, DataFrame as DF\nfrom pyspark.sql.types import (DateType, IntegerType, FloatType, DoubleType, LongType, StringType, StructField, StructType, TimestampType)\nfrom pyspark.ml import functions as mlF, Pipeline as pipe\nfrom pyspark.ml.stat import Correlation\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.ml.feature import Imputer, StandardScaler, MinMaxScaler, Normalizer, PCA, StringIndexer, OneHotEncoder, VectorAssembler\nfrom pyspark.ml.regression import LinearRegression\nfrom pyspark.ml.classification import LogisticRegression, DecisionTreeClassifier, DecisionTreeClassificationModel, RandomForestClassifier, GBTClassifier\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.mllib.evaluation import BinaryClassificationMetrics, MulticlassMetrics\nfrom pyspark.ml.tuning import CrossValidator, CrossValidatorModel, ParamGridBuilder\nfrom functools import reduce\nfrom difflib import SequenceMatcher as seqmatch\nimport findspark\nfindspark.init('/usr/lib/python3.7/site-packages/pyspark')\n# !pip install -q handyspark\n# from handyspark import *\n\n#variables postgres\n# args = sys.argv\n# print(args)\n#estos parámetros corresponden a la instancia de postgres dentro del ambiente de docker que se adjunta al trabajo\nhost = '10.7.84.102'\nport = '5432'\nuser = 'postgres'\npassword = 'testPassword'\n\n#sesión de spark\nspark = SparkSession.builder\\\n .master(\"local\")\\\n .appName(\"Main\")\\\n .config('spark.ui.port', '4050')\\\n .config(\"spark.driver.extraClassPath\", \"postgresql-42.2.14.jar\") \\\n .config(\"spark.executor.extraClassPath\", \"postgresql-42.2.14.jar\") \\\n .config(\"spark.jars\", \"postgresql-42.2.14.jar\") \\\n .getOrCreate()\nspark.sparkContext.setLogLevel(\"ERROR\")\n\n#funciones\n#función para almacenar en base de datos\ndef escribir_df(df, host=host, port=port, user=user, password=password, table='table'):\n try:\n #almacenamiento en base de datos\n # .option(\"driver\", \"postgresql-42.2.14.jar\") \\\n df \\\n .write \\\n .format(\"jdbc\") \\\n .mode('overwrite') \\\n .option(\"url\", \"jdbc:postgresql://\"+host+\":\"+port+\"/postgres\") \\\n .option(\"user\", user) \\\n .option(\"password\", password) \\\n .option(\"dbtable\", table) \\\n .save()\n return True\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)\n'''\nLa ejecución de spark es una ejecución vaga (\"lazy\"), si se intenta almacenar un dataframe en una tabla la cual es a su vez su propia fuente de datos, \ndicha tabla será sobreescrita con valores nulos quedando vacía, por lo tanto en dichos casos se recomienda utilizar una tabla temporal.\n'''\n#función para cargar de base de datos\ndef leer_df(host=host, port=port, user=user, password=password, table='table'):\n try:\n #lectura desde base de datos hacia dataframe temporal\n # .option(\"driver\", \"postgresql-42.2.14.jar\") \\\n df = spark \\\n .read \\\n .format(\"jdbc\") \\\n .option(\"url\", \"jdbc:postgresql://\"+host+\":\"+port+\"/postgres\") \\\n .option(\"user\", user) \\\n .option(\"password\", password) \\\n .option(\"dbtable\", table) \\\n .load()\n df.count()\n return df\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)\n\n#función columnas-vector\ndef cols2vec(dfin, inputcols=[], outputcol='features', label='label', lab_alias='label', print_=False):\n try:\n assy = VectorAssembler(inputCols=inputcols, outputCol=outputcol, handleInvalid='skip')\n dfout = assy.transform(dfin)\n if lab_alias:\n dfout = dfout.select([outputcol, F.col(label).alias(lab_alias)])\n else:\n dfout = dfout.select([outputcol])\n if print_: dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)\n\n#función vector-columnas\ndef vec2cols(dfin, inputcol='features', outputcols=[], label='label', lab_alias='label', print_=False, prediction=None):\n try:\n if lab_alias:\n if prediction:\n dfout = dfin.select(inputcol, label, prediction).withColumn('temp', mlF.vector_to_array(inputcol)) \\\n .select([F.col('temp')[i].alias(outputcols[i]) for i in range(len(outputcols))] + [F.col(label).alias(lab_alias)] + [F.col(prediction)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF.vector_to_array(inputcol)) \\\n .select([F.col('temp')[i].alias(outputcols[i]) for i in range(len(outputcols))] + [F.col(label).alias(lab_alias)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF.vector_to_array(inputcol)) \\\n .select([F.col('temp')[i].alias(outputcols[i]) for i in range(len(outputcols))])\n if print_: dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)\n\n#función de graficación de correlaciones\ndef plot_corr(df=None, inputcols=[]):\n try:\n sns.set(font_scale=1.5)\n dfvec = cols2vec(df, inputcols=inputcols, outputcol='features')\n dfscaled = StandardScaler(inputCol='features', outputCol='scaled', withStd=True, withMean=True).fit(dfvec).transform(dfvec).select(['scaled', 'label'])\n pearson_matrix = Correlation.corr(dfscaled, column='scaled', method='pearson').collect()[0][0]\n dfcols = vec2cols(dfscaled, inputcol='scaled', outputcols=inputcols)\n print('\\nMapa de calor')\n grid_kws = {\"height_ratios\":(1,.05), \"hspace\":.2}\n f,(ax,cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws, figsize=(24,8))\n sns.heatmap(pearson_matrix.toArray(), yticklabels=inputcols, xticklabels=inputcols, mask=np.triu(pearson_matrix.toArray()),\n annot=True, fmt=\".2f\", linewidths=.5, cmap=sns.diverging_palette(220,20,as_cmap=True), ax=ax, cbar_ax=cbar_ax, cbar_kws={\"orientation\": \"horizontal\"})\n plt.show()\n print('\\nGráfico de parcela')\n sns.pairplot(dfcols.toPandas(), height=2, aspect=16/9, corner=True, hue='label')\n plt.show()\n return dfscaled\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)\n\n#función de graficación ROC\ndef plot_metrics(dfcoll=None, ver=1, metric=None):\n try:\n sns.set(font_scale=1)\n fpr, tpr, thresholds = roc_curve(np.asarray(list(i[1] for i in dfcoll)), np.asarray(list(i[4][1] for i in dfcoll)))\n roc_auc = auc(fpr, tpr)\n conf_mat = confusion_matrix(list(i[1] for i in dfcoll), list(i[5] for i in dfcoll))\n if ver==1:\n fig,ax = plt.subplots(1,2, figsize=(12,4))\n ax[0].plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n ax[0].plot([0, 1], [0, 1], 'k--')\n ax[0].set_xlim([-0.05, 1.0]), ax[0].set_ylim([0.0, 1.05])\n ax[0].set_xlabel('Falsos positivos'), ax[0].set_ylabel('Verdaderos positivos')\n ax[0].set_title('Curva ROC'), ax[0].legend(loc=\"lower right\")\n sns.heatmap(conf_mat, annot=True, fmt='.0f', ax=ax[1])\n ax[1].set_title('Matriz de confusión')\n plt.show()\n else:\n fig, axs = plt.subplots(1, 2, figsize=(12,4))\n metric.plot_roc_curve(ax=axs[0])\n metric.plot_pr_curve(ax=axs[1])\n plt.show()\n return (roc_auc, fpr, tpr, thresholds, conf_mat)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)\n\ndef plot_bound(trues, falses, n):\n try:\n fig,ax = plt.subplots(figsize=(12,4))\n ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r', marker=\"o\", label='1')\n ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b', marker=\"s\", label='0')\n plt.axhline(.5, color='green')\n plt.legend(loc='upper right'), ax.set_title('Límite de decisión')\n ax.set_xlabel('Observaciones'), ax.set_ylabel('Predicción de probabilidad')\n plt.show()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)",
"<docstring token>\nfrom IPython.display import Javascript\nimport sys, os, glob, datetime as dt, numpy as np, random, collections as coll\nimport pandas as pd, seaborn as sns, matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc, classification_report, confusion_matrix\nfrom pyspark.sql import SparkSession, functions as F, window as W, DataFrame as DF\nfrom pyspark.sql.types import DateType, IntegerType, FloatType, DoubleType, LongType, StringType, StructField, StructType, TimestampType\nfrom pyspark.ml import functions as mlF, Pipeline as pipe\nfrom pyspark.ml.stat import Correlation\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.ml.feature import Imputer, StandardScaler, MinMaxScaler, Normalizer, PCA, StringIndexer, OneHotEncoder, VectorAssembler\nfrom pyspark.ml.regression import LinearRegression\nfrom pyspark.ml.classification import LogisticRegression, DecisionTreeClassifier, DecisionTreeClassificationModel, RandomForestClassifier, GBTClassifier\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.mllib.evaluation import BinaryClassificationMetrics, MulticlassMetrics\nfrom pyspark.ml.tuning import CrossValidator, CrossValidatorModel, ParamGridBuilder\nfrom functools import reduce\nfrom difflib import SequenceMatcher as seqmatch\nimport findspark\nfindspark.init('/usr/lib/python3.7/site-packages/pyspark')\nhost = '10.7.84.102'\nport = '5432'\nuser = 'postgres'\npassword = 'testPassword'\nspark = SparkSession.builder.master('local').appName('Main').config(\n 'spark.ui.port', '4050').config('spark.driver.extraClassPath',\n 'postgresql-42.2.14.jar').config('spark.executor.extraClassPath',\n 'postgresql-42.2.14.jar').config('spark.jars', 'postgresql-42.2.14.jar'\n ).getOrCreate()\nspark.sparkContext.setLogLevel('ERROR')\n\n\ndef escribir_df(df, host=host, port=port, user=user, password=password,\n table='table'):\n try:\n df.write.format('jdbc').mode('overwrite').option('url', \n 'jdbc:postgresql://' + host + ':' + port + '/postgres').option(\n 'user', user).option('password', password).option('dbtable', table\n ).save()\n return True\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<docstring token>\n\n\ndef leer_df(host=host, port=port, user=user, password=password, table='table'):\n try:\n df = spark.read.format('jdbc').option('url', 'jdbc:postgresql://' +\n host + ':' + port + '/postgres').option('user', user).option(\n 'password', password).option('dbtable', table).load()\n df.count()\n return df\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef cols2vec(dfin, inputcols=[], outputcol='features', label='label',\n lab_alias='label', print_=False):\n try:\n assy = VectorAssembler(inputCols=inputcols, outputCol=outputcol,\n handleInvalid='skip')\n dfout = assy.transform(dfin)\n if lab_alias:\n dfout = dfout.select([outputcol, F.col(label).alias(lab_alias)])\n else:\n dfout = dfout.select([outputcol])\n if print_:\n dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef vec2cols(dfin, inputcol='features', outputcols=[], label='label',\n lab_alias='label', print_=False, prediction=None):\n try:\n if lab_alias:\n if prediction:\n dfout = dfin.select(inputcol, label, prediction).withColumn(\n 'temp', mlF.vector_to_array(inputcol)).select([F.col(\n 'temp')[i].alias(outputcols[i]) for i in range(len(\n outputcols))] + [F.col(label).alias(lab_alias)] + [F.\n col(prediction)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF\n .vector_to_array(inputcol)).select([F.col('temp')[i].\n alias(outputcols[i]) for i in range(len(outputcols))] +\n [F.col(label).alias(lab_alias)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF.\n vector_to_array(inputcol)).select([F.col('temp')[i].alias(\n outputcols[i]) for i in range(len(outputcols))])\n if print_:\n dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_corr(df=None, inputcols=[]):\n try:\n sns.set(font_scale=1.5)\n dfvec = cols2vec(df, inputcols=inputcols, outputcol='features')\n dfscaled = StandardScaler(inputCol='features', outputCol='scaled',\n withStd=True, withMean=True).fit(dfvec).transform(dfvec).select([\n 'scaled', 'label'])\n pearson_matrix = Correlation.corr(dfscaled, column='scaled', method\n ='pearson').collect()[0][0]\n dfcols = vec2cols(dfscaled, inputcol='scaled', outputcols=inputcols)\n print('\\nMapa de calor')\n grid_kws = {'height_ratios': (1, 0.05), 'hspace': 0.2}\n f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws, figsize=(\n 24, 8))\n sns.heatmap(pearson_matrix.toArray(), yticklabels=inputcols,\n xticklabels=inputcols, mask=np.triu(pearson_matrix.toArray()),\n annot=True, fmt='.2f', linewidths=0.5, cmap=sns.\n diverging_palette(220, 20, as_cmap=True), ax=ax, cbar_ax=\n cbar_ax, cbar_kws={'orientation': 'horizontal'})\n plt.show()\n print('\\nGráfico de parcela')\n sns.pairplot(dfcols.toPandas(), height=2, aspect=16 / 9, corner=\n True, hue='label')\n plt.show()\n return dfscaled\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_metrics(dfcoll=None, ver=1, metric=None):\n try:\n sns.set(font_scale=1)\n fpr, tpr, thresholds = roc_curve(np.asarray(list(i[1] for i in\n dfcoll)), np.asarray(list(i[4][1] for i in dfcoll)))\n roc_auc = auc(fpr, tpr)\n conf_mat = confusion_matrix(list(i[1] for i in dfcoll), list(i[5] for\n i in dfcoll))\n if ver == 1:\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n ax[0].plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n ax[0].plot([0, 1], [0, 1], 'k--')\n ax[0].set_xlim([-0.05, 1.0]), ax[0].set_ylim([0.0, 1.05])\n ax[0].set_xlabel('Falsos positivos'), ax[0].set_ylabel(\n 'Verdaderos positivos')\n ax[0].set_title('Curva ROC'), ax[0].legend(loc='lower right')\n sns.heatmap(conf_mat, annot=True, fmt='.0f', ax=ax[1])\n ax[1].set_title('Matriz de confusión')\n plt.show()\n else:\n fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n metric.plot_roc_curve(ax=axs[0])\n metric.plot_pr_curve(ax=axs[1])\n plt.show()\n return roc_auc, fpr, tpr, thresholds, conf_mat\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_bound(trues, falses, n):\n try:\n fig, ax = plt.subplots(figsize=(12, 4))\n ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r',\n marker='o', label='1')\n ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b',\n marker='s', label='0')\n plt.axhline(0.5, color='green')\n plt.legend(loc='upper right'), ax.set_title('Límite de decisión')\n ax.set_xlabel('Observaciones'), ax.set_ylabel(\n 'Predicción de probabilidad')\n plt.show()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n",
"<docstring token>\n<import token>\nfindspark.init('/usr/lib/python3.7/site-packages/pyspark')\nhost = '10.7.84.102'\nport = '5432'\nuser = 'postgres'\npassword = 'testPassword'\nspark = SparkSession.builder.master('local').appName('Main').config(\n 'spark.ui.port', '4050').config('spark.driver.extraClassPath',\n 'postgresql-42.2.14.jar').config('spark.executor.extraClassPath',\n 'postgresql-42.2.14.jar').config('spark.jars', 'postgresql-42.2.14.jar'\n ).getOrCreate()\nspark.sparkContext.setLogLevel('ERROR')\n\n\ndef escribir_df(df, host=host, port=port, user=user, password=password,\n table='table'):\n try:\n df.write.format('jdbc').mode('overwrite').option('url', \n 'jdbc:postgresql://' + host + ':' + port + '/postgres').option(\n 'user', user).option('password', password).option('dbtable', table\n ).save()\n return True\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<docstring token>\n\n\ndef leer_df(host=host, port=port, user=user, password=password, table='table'):\n try:\n df = spark.read.format('jdbc').option('url', 'jdbc:postgresql://' +\n host + ':' + port + '/postgres').option('user', user).option(\n 'password', password).option('dbtable', table).load()\n df.count()\n return df\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef cols2vec(dfin, inputcols=[], outputcol='features', label='label',\n lab_alias='label', print_=False):\n try:\n assy = VectorAssembler(inputCols=inputcols, outputCol=outputcol,\n handleInvalid='skip')\n dfout = assy.transform(dfin)\n if lab_alias:\n dfout = dfout.select([outputcol, F.col(label).alias(lab_alias)])\n else:\n dfout = dfout.select([outputcol])\n if print_:\n dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef vec2cols(dfin, inputcol='features', outputcols=[], label='label',\n lab_alias='label', print_=False, prediction=None):\n try:\n if lab_alias:\n if prediction:\n dfout = dfin.select(inputcol, label, prediction).withColumn(\n 'temp', mlF.vector_to_array(inputcol)).select([F.col(\n 'temp')[i].alias(outputcols[i]) for i in range(len(\n outputcols))] + [F.col(label).alias(lab_alias)] + [F.\n col(prediction)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF\n .vector_to_array(inputcol)).select([F.col('temp')[i].\n alias(outputcols[i]) for i in range(len(outputcols))] +\n [F.col(label).alias(lab_alias)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF.\n vector_to_array(inputcol)).select([F.col('temp')[i].alias(\n outputcols[i]) for i in range(len(outputcols))])\n if print_:\n dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_corr(df=None, inputcols=[]):\n try:\n sns.set(font_scale=1.5)\n dfvec = cols2vec(df, inputcols=inputcols, outputcol='features')\n dfscaled = StandardScaler(inputCol='features', outputCol='scaled',\n withStd=True, withMean=True).fit(dfvec).transform(dfvec).select([\n 'scaled', 'label'])\n pearson_matrix = Correlation.corr(dfscaled, column='scaled', method\n ='pearson').collect()[0][0]\n dfcols = vec2cols(dfscaled, inputcol='scaled', outputcols=inputcols)\n print('\\nMapa de calor')\n grid_kws = {'height_ratios': (1, 0.05), 'hspace': 0.2}\n f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws, figsize=(\n 24, 8))\n sns.heatmap(pearson_matrix.toArray(), yticklabels=inputcols,\n xticklabels=inputcols, mask=np.triu(pearson_matrix.toArray()),\n annot=True, fmt='.2f', linewidths=0.5, cmap=sns.\n diverging_palette(220, 20, as_cmap=True), ax=ax, cbar_ax=\n cbar_ax, cbar_kws={'orientation': 'horizontal'})\n plt.show()\n print('\\nGráfico de parcela')\n sns.pairplot(dfcols.toPandas(), height=2, aspect=16 / 9, corner=\n True, hue='label')\n plt.show()\n return dfscaled\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_metrics(dfcoll=None, ver=1, metric=None):\n try:\n sns.set(font_scale=1)\n fpr, tpr, thresholds = roc_curve(np.asarray(list(i[1] for i in\n dfcoll)), np.asarray(list(i[4][1] for i in dfcoll)))\n roc_auc = auc(fpr, tpr)\n conf_mat = confusion_matrix(list(i[1] for i in dfcoll), list(i[5] for\n i in dfcoll))\n if ver == 1:\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n ax[0].plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n ax[0].plot([0, 1], [0, 1], 'k--')\n ax[0].set_xlim([-0.05, 1.0]), ax[0].set_ylim([0.0, 1.05])\n ax[0].set_xlabel('Falsos positivos'), ax[0].set_ylabel(\n 'Verdaderos positivos')\n ax[0].set_title('Curva ROC'), ax[0].legend(loc='lower right')\n sns.heatmap(conf_mat, annot=True, fmt='.0f', ax=ax[1])\n ax[1].set_title('Matriz de confusión')\n plt.show()\n else:\n fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n metric.plot_roc_curve(ax=axs[0])\n metric.plot_pr_curve(ax=axs[1])\n plt.show()\n return roc_auc, fpr, tpr, thresholds, conf_mat\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_bound(trues, falses, n):\n try:\n fig, ax = plt.subplots(figsize=(12, 4))\n ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r',\n marker='o', label='1')\n ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b',\n marker='s', label='0')\n plt.axhline(0.5, color='green')\n plt.legend(loc='upper right'), ax.set_title('Límite de decisión')\n ax.set_xlabel('Observaciones'), ax.set_ylabel(\n 'Predicción de probabilidad')\n plt.show()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n",
"<docstring token>\n<import token>\nfindspark.init('/usr/lib/python3.7/site-packages/pyspark')\n<assignment token>\nspark.sparkContext.setLogLevel('ERROR')\n\n\ndef escribir_df(df, host=host, port=port, user=user, password=password,\n table='table'):\n try:\n df.write.format('jdbc').mode('overwrite').option('url', \n 'jdbc:postgresql://' + host + ':' + port + '/postgres').option(\n 'user', user).option('password', password).option('dbtable', table\n ).save()\n return True\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<docstring token>\n\n\ndef leer_df(host=host, port=port, user=user, password=password, table='table'):\n try:\n df = spark.read.format('jdbc').option('url', 'jdbc:postgresql://' +\n host + ':' + port + '/postgres').option('user', user).option(\n 'password', password).option('dbtable', table).load()\n df.count()\n return df\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef cols2vec(dfin, inputcols=[], outputcol='features', label='label',\n lab_alias='label', print_=False):\n try:\n assy = VectorAssembler(inputCols=inputcols, outputCol=outputcol,\n handleInvalid='skip')\n dfout = assy.transform(dfin)\n if lab_alias:\n dfout = dfout.select([outputcol, F.col(label).alias(lab_alias)])\n else:\n dfout = dfout.select([outputcol])\n if print_:\n dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef vec2cols(dfin, inputcol='features', outputcols=[], label='label',\n lab_alias='label', print_=False, prediction=None):\n try:\n if lab_alias:\n if prediction:\n dfout = dfin.select(inputcol, label, prediction).withColumn(\n 'temp', mlF.vector_to_array(inputcol)).select([F.col(\n 'temp')[i].alias(outputcols[i]) for i in range(len(\n outputcols))] + [F.col(label).alias(lab_alias)] + [F.\n col(prediction)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF\n .vector_to_array(inputcol)).select([F.col('temp')[i].\n alias(outputcols[i]) for i in range(len(outputcols))] +\n [F.col(label).alias(lab_alias)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF.\n vector_to_array(inputcol)).select([F.col('temp')[i].alias(\n outputcols[i]) for i in range(len(outputcols))])\n if print_:\n dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_corr(df=None, inputcols=[]):\n try:\n sns.set(font_scale=1.5)\n dfvec = cols2vec(df, inputcols=inputcols, outputcol='features')\n dfscaled = StandardScaler(inputCol='features', outputCol='scaled',\n withStd=True, withMean=True).fit(dfvec).transform(dfvec).select([\n 'scaled', 'label'])\n pearson_matrix = Correlation.corr(dfscaled, column='scaled', method\n ='pearson').collect()[0][0]\n dfcols = vec2cols(dfscaled, inputcol='scaled', outputcols=inputcols)\n print('\\nMapa de calor')\n grid_kws = {'height_ratios': (1, 0.05), 'hspace': 0.2}\n f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws, figsize=(\n 24, 8))\n sns.heatmap(pearson_matrix.toArray(), yticklabels=inputcols,\n xticklabels=inputcols, mask=np.triu(pearson_matrix.toArray()),\n annot=True, fmt='.2f', linewidths=0.5, cmap=sns.\n diverging_palette(220, 20, as_cmap=True), ax=ax, cbar_ax=\n cbar_ax, cbar_kws={'orientation': 'horizontal'})\n plt.show()\n print('\\nGráfico de parcela')\n sns.pairplot(dfcols.toPandas(), height=2, aspect=16 / 9, corner=\n True, hue='label')\n plt.show()\n return dfscaled\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_metrics(dfcoll=None, ver=1, metric=None):\n try:\n sns.set(font_scale=1)\n fpr, tpr, thresholds = roc_curve(np.asarray(list(i[1] for i in\n dfcoll)), np.asarray(list(i[4][1] for i in dfcoll)))\n roc_auc = auc(fpr, tpr)\n conf_mat = confusion_matrix(list(i[1] for i in dfcoll), list(i[5] for\n i in dfcoll))\n if ver == 1:\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n ax[0].plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n ax[0].plot([0, 1], [0, 1], 'k--')\n ax[0].set_xlim([-0.05, 1.0]), ax[0].set_ylim([0.0, 1.05])\n ax[0].set_xlabel('Falsos positivos'), ax[0].set_ylabel(\n 'Verdaderos positivos')\n ax[0].set_title('Curva ROC'), ax[0].legend(loc='lower right')\n sns.heatmap(conf_mat, annot=True, fmt='.0f', ax=ax[1])\n ax[1].set_title('Matriz de confusión')\n plt.show()\n else:\n fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n metric.plot_roc_curve(ax=axs[0])\n metric.plot_pr_curve(ax=axs[1])\n plt.show()\n return roc_auc, fpr, tpr, thresholds, conf_mat\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_bound(trues, falses, n):\n try:\n fig, ax = plt.subplots(figsize=(12, 4))\n ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r',\n marker='o', label='1')\n ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b',\n marker='s', label='0')\n plt.axhline(0.5, color='green')\n plt.legend(loc='upper right'), ax.set_title('Límite de decisión')\n ax.set_xlabel('Observaciones'), ax.set_ylabel(\n 'Predicción de probabilidad')\n plt.show()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef escribir_df(df, host=host, port=port, user=user, password=password,\n table='table'):\n try:\n df.write.format('jdbc').mode('overwrite').option('url', \n 'jdbc:postgresql://' + host + ':' + port + '/postgres').option(\n 'user', user).option('password', password).option('dbtable', table\n ).save()\n return True\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<docstring token>\n\n\ndef leer_df(host=host, port=port, user=user, password=password, table='table'):\n try:\n df = spark.read.format('jdbc').option('url', 'jdbc:postgresql://' +\n host + ':' + port + '/postgres').option('user', user).option(\n 'password', password).option('dbtable', table).load()\n df.count()\n return df\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef cols2vec(dfin, inputcols=[], outputcol='features', label='label',\n lab_alias='label', print_=False):\n try:\n assy = VectorAssembler(inputCols=inputcols, outputCol=outputcol,\n handleInvalid='skip')\n dfout = assy.transform(dfin)\n if lab_alias:\n dfout = dfout.select([outputcol, F.col(label).alias(lab_alias)])\n else:\n dfout = dfout.select([outputcol])\n if print_:\n dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef vec2cols(dfin, inputcol='features', outputcols=[], label='label',\n lab_alias='label', print_=False, prediction=None):\n try:\n if lab_alias:\n if prediction:\n dfout = dfin.select(inputcol, label, prediction).withColumn(\n 'temp', mlF.vector_to_array(inputcol)).select([F.col(\n 'temp')[i].alias(outputcols[i]) for i in range(len(\n outputcols))] + [F.col(label).alias(lab_alias)] + [F.\n col(prediction)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF\n .vector_to_array(inputcol)).select([F.col('temp')[i].\n alias(outputcols[i]) for i in range(len(outputcols))] +\n [F.col(label).alias(lab_alias)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF.\n vector_to_array(inputcol)).select([F.col('temp')[i].alias(\n outputcols[i]) for i in range(len(outputcols))])\n if print_:\n dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_corr(df=None, inputcols=[]):\n try:\n sns.set(font_scale=1.5)\n dfvec = cols2vec(df, inputcols=inputcols, outputcol='features')\n dfscaled = StandardScaler(inputCol='features', outputCol='scaled',\n withStd=True, withMean=True).fit(dfvec).transform(dfvec).select([\n 'scaled', 'label'])\n pearson_matrix = Correlation.corr(dfscaled, column='scaled', method\n ='pearson').collect()[0][0]\n dfcols = vec2cols(dfscaled, inputcol='scaled', outputcols=inputcols)\n print('\\nMapa de calor')\n grid_kws = {'height_ratios': (1, 0.05), 'hspace': 0.2}\n f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws, figsize=(\n 24, 8))\n sns.heatmap(pearson_matrix.toArray(), yticklabels=inputcols,\n xticklabels=inputcols, mask=np.triu(pearson_matrix.toArray()),\n annot=True, fmt='.2f', linewidths=0.5, cmap=sns.\n diverging_palette(220, 20, as_cmap=True), ax=ax, cbar_ax=\n cbar_ax, cbar_kws={'orientation': 'horizontal'})\n plt.show()\n print('\\nGráfico de parcela')\n sns.pairplot(dfcols.toPandas(), height=2, aspect=16 / 9, corner=\n True, hue='label')\n plt.show()\n return dfscaled\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_metrics(dfcoll=None, ver=1, metric=None):\n try:\n sns.set(font_scale=1)\n fpr, tpr, thresholds = roc_curve(np.asarray(list(i[1] for i in\n dfcoll)), np.asarray(list(i[4][1] for i in dfcoll)))\n roc_auc = auc(fpr, tpr)\n conf_mat = confusion_matrix(list(i[1] for i in dfcoll), list(i[5] for\n i in dfcoll))\n if ver == 1:\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n ax[0].plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n ax[0].plot([0, 1], [0, 1], 'k--')\n ax[0].set_xlim([-0.05, 1.0]), ax[0].set_ylim([0.0, 1.05])\n ax[0].set_xlabel('Falsos positivos'), ax[0].set_ylabel(\n 'Verdaderos positivos')\n ax[0].set_title('Curva ROC'), ax[0].legend(loc='lower right')\n sns.heatmap(conf_mat, annot=True, fmt='.0f', ax=ax[1])\n ax[1].set_title('Matriz de confusión')\n plt.show()\n else:\n fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n metric.plot_roc_curve(ax=axs[0])\n metric.plot_pr_curve(ax=axs[1])\n plt.show()\n return roc_auc, fpr, tpr, thresholds, conf_mat\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_bound(trues, falses, n):\n try:\n fig, ax = plt.subplots(figsize=(12, 4))\n ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r',\n marker='o', label='1')\n ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b',\n marker='s', label='0')\n plt.axhline(0.5, color='green')\n plt.legend(loc='upper right'), ax.set_title('Límite de decisión')\n ax.set_xlabel('Observaciones'), ax.set_ylabel(\n 'Predicción de probabilidad')\n plt.show()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef escribir_df(df, host=host, port=port, user=user, password=password,\n table='table'):\n try:\n df.write.format('jdbc').mode('overwrite').option('url', \n 'jdbc:postgresql://' + host + ':' + port + '/postgres').option(\n 'user', user).option('password', password).option('dbtable', table\n ).save()\n return True\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<docstring token>\n\n\ndef leer_df(host=host, port=port, user=user, password=password, table='table'):\n try:\n df = spark.read.format('jdbc').option('url', 'jdbc:postgresql://' +\n host + ':' + port + '/postgres').option('user', user).option(\n 'password', password).option('dbtable', table).load()\n df.count()\n return df\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef cols2vec(dfin, inputcols=[], outputcol='features', label='label',\n lab_alias='label', print_=False):\n try:\n assy = VectorAssembler(inputCols=inputcols, outputCol=outputcol,\n handleInvalid='skip')\n dfout = assy.transform(dfin)\n if lab_alias:\n dfout = dfout.select([outputcol, F.col(label).alias(lab_alias)])\n else:\n dfout = dfout.select([outputcol])\n if print_:\n dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef vec2cols(dfin, inputcol='features', outputcols=[], label='label',\n lab_alias='label', print_=False, prediction=None):\n try:\n if lab_alias:\n if prediction:\n dfout = dfin.select(inputcol, label, prediction).withColumn(\n 'temp', mlF.vector_to_array(inputcol)).select([F.col(\n 'temp')[i].alias(outputcols[i]) for i in range(len(\n outputcols))] + [F.col(label).alias(lab_alias)] + [F.\n col(prediction)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF\n .vector_to_array(inputcol)).select([F.col('temp')[i].\n alias(outputcols[i]) for i in range(len(outputcols))] +\n [F.col(label).alias(lab_alias)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF.\n vector_to_array(inputcol)).select([F.col('temp')[i].alias(\n outputcols[i]) for i in range(len(outputcols))])\n if print_:\n dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<function token>\n\n\ndef plot_metrics(dfcoll=None, ver=1, metric=None):\n try:\n sns.set(font_scale=1)\n fpr, tpr, thresholds = roc_curve(np.asarray(list(i[1] for i in\n dfcoll)), np.asarray(list(i[4][1] for i in dfcoll)))\n roc_auc = auc(fpr, tpr)\n conf_mat = confusion_matrix(list(i[1] for i in dfcoll), list(i[5] for\n i in dfcoll))\n if ver == 1:\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n ax[0].plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n ax[0].plot([0, 1], [0, 1], 'k--')\n ax[0].set_xlim([-0.05, 1.0]), ax[0].set_ylim([0.0, 1.05])\n ax[0].set_xlabel('Falsos positivos'), ax[0].set_ylabel(\n 'Verdaderos positivos')\n ax[0].set_title('Curva ROC'), ax[0].legend(loc='lower right')\n sns.heatmap(conf_mat, annot=True, fmt='.0f', ax=ax[1])\n ax[1].set_title('Matriz de confusión')\n plt.show()\n else:\n fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n metric.plot_roc_curve(ax=axs[0])\n metric.plot_pr_curve(ax=axs[1])\n plt.show()\n return roc_auc, fpr, tpr, thresholds, conf_mat\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_bound(trues, falses, n):\n try:\n fig, ax = plt.subplots(figsize=(12, 4))\n ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r',\n marker='o', label='1')\n ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b',\n marker='s', label='0')\n plt.axhline(0.5, color='green')\n plt.legend(loc='upper right'), ax.set_title('Límite de decisión')\n ax.set_xlabel('Observaciones'), ax.set_ylabel(\n 'Predicción de probabilidad')\n plt.show()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef escribir_df(df, host=host, port=port, user=user, password=password,\n table='table'):\n try:\n df.write.format('jdbc').mode('overwrite').option('url', \n 'jdbc:postgresql://' + host + ':' + port + '/postgres').option(\n 'user', user).option('password', password).option('dbtable', table\n ).save()\n return True\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<docstring token>\n\n\ndef leer_df(host=host, port=port, user=user, password=password, table='table'):\n try:\n df = spark.read.format('jdbc').option('url', 'jdbc:postgresql://' +\n host + ':' + port + '/postgres').option('user', user).option(\n 'password', password).option('dbtable', table).load()\n df.count()\n return df\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<function token>\n\n\ndef vec2cols(dfin, inputcol='features', outputcols=[], label='label',\n lab_alias='label', print_=False, prediction=None):\n try:\n if lab_alias:\n if prediction:\n dfout = dfin.select(inputcol, label, prediction).withColumn(\n 'temp', mlF.vector_to_array(inputcol)).select([F.col(\n 'temp')[i].alias(outputcols[i]) for i in range(len(\n outputcols))] + [F.col(label).alias(lab_alias)] + [F.\n col(prediction)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF\n .vector_to_array(inputcol)).select([F.col('temp')[i].\n alias(outputcols[i]) for i in range(len(outputcols))] +\n [F.col(label).alias(lab_alias)])\n else:\n dfout = dfin.select(inputcol, label).withColumn('temp', mlF.\n vector_to_array(inputcol)).select([F.col('temp')[i].alias(\n outputcols[i]) for i in range(len(outputcols))])\n if print_:\n dfout.show(10, truncate=False)\n return dfout\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<function token>\n\n\ndef plot_metrics(dfcoll=None, ver=1, metric=None):\n try:\n sns.set(font_scale=1)\n fpr, tpr, thresholds = roc_curve(np.asarray(list(i[1] for i in\n dfcoll)), np.asarray(list(i[4][1] for i in dfcoll)))\n roc_auc = auc(fpr, tpr)\n conf_mat = confusion_matrix(list(i[1] for i in dfcoll), list(i[5] for\n i in dfcoll))\n if ver == 1:\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n ax[0].plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n ax[0].plot([0, 1], [0, 1], 'k--')\n ax[0].set_xlim([-0.05, 1.0]), ax[0].set_ylim([0.0, 1.05])\n ax[0].set_xlabel('Falsos positivos'), ax[0].set_ylabel(\n 'Verdaderos positivos')\n ax[0].set_title('Curva ROC'), ax[0].legend(loc='lower right')\n sns.heatmap(conf_mat, annot=True, fmt='.0f', ax=ax[1])\n ax[1].set_title('Matriz de confusión')\n plt.show()\n else:\n fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n metric.plot_roc_curve(ax=axs[0])\n metric.plot_pr_curve(ax=axs[1])\n plt.show()\n return roc_auc, fpr, tpr, thresholds, conf_mat\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_bound(trues, falses, n):\n try:\n fig, ax = plt.subplots(figsize=(12, 4))\n ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r',\n marker='o', label='1')\n ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b',\n marker='s', label='0')\n plt.axhline(0.5, color='green')\n plt.legend(loc='upper right'), ax.set_title('Límite de decisión')\n ax.set_xlabel('Observaciones'), ax.set_ylabel(\n 'Predicción de probabilidad')\n plt.show()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef escribir_df(df, host=host, port=port, user=user, password=password,\n table='table'):\n try:\n df.write.format('jdbc').mode('overwrite').option('url', \n 'jdbc:postgresql://' + host + ':' + port + '/postgres').option(\n 'user', user).option('password', password).option('dbtable', table\n ).save()\n return True\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<docstring token>\n\n\ndef leer_df(host=host, port=port, user=user, password=password, table='table'):\n try:\n df = spark.read.format('jdbc').option('url', 'jdbc:postgresql://' +\n host + ':' + port + '/postgres').option('user', user).option(\n 'password', password).option('dbtable', table).load()\n df.count()\n return df\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef plot_metrics(dfcoll=None, ver=1, metric=None):\n try:\n sns.set(font_scale=1)\n fpr, tpr, thresholds = roc_curve(np.asarray(list(i[1] for i in\n dfcoll)), np.asarray(list(i[4][1] for i in dfcoll)))\n roc_auc = auc(fpr, tpr)\n conf_mat = confusion_matrix(list(i[1] for i in dfcoll), list(i[5] for\n i in dfcoll))\n if ver == 1:\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n ax[0].plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n ax[0].plot([0, 1], [0, 1], 'k--')\n ax[0].set_xlim([-0.05, 1.0]), ax[0].set_ylim([0.0, 1.05])\n ax[0].set_xlabel('Falsos positivos'), ax[0].set_ylabel(\n 'Verdaderos positivos')\n ax[0].set_title('Curva ROC'), ax[0].legend(loc='lower right')\n sns.heatmap(conf_mat, annot=True, fmt='.0f', ax=ax[1])\n ax[1].set_title('Matriz de confusión')\n plt.show()\n else:\n fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n metric.plot_roc_curve(ax=axs[0])\n metric.plot_pr_curve(ax=axs[1])\n plt.show()\n return roc_auc, fpr, tpr, thresholds, conf_mat\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_bound(trues, falses, n):\n try:\n fig, ax = plt.subplots(figsize=(12, 4))\n ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r',\n marker='o', label='1')\n ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b',\n marker='s', label='0')\n plt.axhline(0.5, color='green')\n plt.legend(loc='upper right'), ax.set_title('Límite de decisión')\n ax.set_xlabel('Observaciones'), ax.set_ylabel(\n 'Predicción de probabilidad')\n plt.show()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef escribir_df(df, host=host, port=port, user=user, password=password,\n table='table'):\n try:\n df.write.format('jdbc').mode('overwrite').option('url', \n 'jdbc:postgresql://' + host + ':' + port + '/postgres').option(\n 'user', user).option('password', password).option('dbtable', table\n ).save()\n return True\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\n<docstring token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef plot_metrics(dfcoll=None, ver=1, metric=None):\n try:\n sns.set(font_scale=1)\n fpr, tpr, thresholds = roc_curve(np.asarray(list(i[1] for i in\n dfcoll)), np.asarray(list(i[4][1] for i in dfcoll)))\n roc_auc = auc(fpr, tpr)\n conf_mat = confusion_matrix(list(i[1] for i in dfcoll), list(i[5] for\n i in dfcoll))\n if ver == 1:\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n ax[0].plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n ax[0].plot([0, 1], [0, 1], 'k--')\n ax[0].set_xlim([-0.05, 1.0]), ax[0].set_ylim([0.0, 1.05])\n ax[0].set_xlabel('Falsos positivos'), ax[0].set_ylabel(\n 'Verdaderos positivos')\n ax[0].set_title('Curva ROC'), ax[0].legend(loc='lower right')\n sns.heatmap(conf_mat, annot=True, fmt='.0f', ax=ax[1])\n ax[1].set_title('Matriz de confusión')\n plt.show()\n else:\n fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n metric.plot_roc_curve(ax=axs[0])\n metric.plot_pr_curve(ax=axs[1])\n plt.show()\n return roc_auc, fpr, tpr, thresholds, conf_mat\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_bound(trues, falses, n):\n try:\n fig, ax = plt.subplots(figsize=(12, 4))\n ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r',\n marker='o', label='1')\n ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b',\n marker='s', label='0')\n plt.axhline(0.5, color='green')\n plt.legend(loc='upper right'), ax.set_title('Límite de decisión')\n ax.set_xlabel('Observaciones'), ax.set_ylabel(\n 'Predicción de probabilidad')\n plt.show()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<docstring token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef plot_metrics(dfcoll=None, ver=1, metric=None):\n try:\n sns.set(font_scale=1)\n fpr, tpr, thresholds = roc_curve(np.asarray(list(i[1] for i in\n dfcoll)), np.asarray(list(i[4][1] for i in dfcoll)))\n roc_auc = auc(fpr, tpr)\n conf_mat = confusion_matrix(list(i[1] for i in dfcoll), list(i[5] for\n i in dfcoll))\n if ver == 1:\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n ax[0].plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n ax[0].plot([0, 1], [0, 1], 'k--')\n ax[0].set_xlim([-0.05, 1.0]), ax[0].set_ylim([0.0, 1.05])\n ax[0].set_xlabel('Falsos positivos'), ax[0].set_ylabel(\n 'Verdaderos positivos')\n ax[0].set_title('Curva ROC'), ax[0].legend(loc='lower right')\n sns.heatmap(conf_mat, annot=True, fmt='.0f', ax=ax[1])\n ax[1].set_title('Matriz de confusión')\n plt.show()\n else:\n fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n metric.plot_roc_curve(ax=axs[0])\n metric.plot_pr_curve(ax=axs[1])\n plt.show()\n return roc_auc, fpr, tpr, thresholds, conf_mat\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n\n\ndef plot_bound(trues, falses, n):\n try:\n fig, ax = plt.subplots(figsize=(12, 4))\n ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r',\n marker='o', label='1')\n ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b',\n marker='s', label='0')\n plt.axhline(0.5, color='green')\n plt.legend(loc='upper right'), ax.set_title('Límite de decisión')\n ax.set_xlabel('Observaciones'), ax.set_ylabel(\n 'Predicción de probabilidad')\n plt.show()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<docstring token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef plot_bound(trues, falses, n):\n try:\n fig, ax = plt.subplots(figsize=(12, 4))\n ax.scatter(list(range(n)), trues[:n], s=10, alpha=0.7, c='r',\n marker='o', label='1')\n ax.scatter(list(range(n)), falses[:n], s=10, alpha=0.7, c='b',\n marker='s', label='0')\n plt.axhline(0.5, color='green')\n plt.legend(loc='upper right'), ax.set_title('Límite de decisión')\n ax.set_xlabel('Observaciones'), ax.set_ylabel(\n 'Predicción de probabilidad')\n plt.show()\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1\n ], exc_tb.tb_lineno, exc_obj)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<docstring token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,502 |
54ca70d27bc26052c7612e7f97cca1974f7fbeea
|
# fun with operators
i=10
j=3
# addition
print("The addition of two operators")
print(i+j)
print("The subtraction of two operators")
print(i-j)
print("The multiplication of two operators")
print(i*j)
print("The division of two operators")
print(i/j)
print("The remainder of two operators")
print(i%j)
#yet more fun
print("Is i equal to j?")
print(i==j)
print("is I not equal to j?")
print(i!=j)
print("Is i greater than j?")
print(i>j)
print("is i less than j?")
print(i<j)
print("is i gtorequal to j?")
print(i>=j)
print("is i lessorequal to j?")
print(i<=j)
|
[
"# fun with operators\ni=10\nj=3\n\n# addition\nprint(\"The addition of two operators\")\nprint(i+j)\nprint(\"The subtraction of two operators\")\nprint(i-j)\nprint(\"The multiplication of two operators\")\nprint(i*j)\nprint(\"The division of two operators\")\nprint(i/j)\nprint(\"The remainder of two operators\")\nprint(i%j)\n\n#yet more fun\nprint(\"Is i equal to j?\")\nprint(i==j)\nprint(\"is I not equal to j?\")\nprint(i!=j)\nprint(\"Is i greater than j?\")\nprint(i>j)\nprint(\"is i less than j?\")\nprint(i<j)\nprint(\"is i gtorequal to j?\")\nprint(i>=j)\nprint(\"is i lessorequal to j?\")\nprint(i<=j)",
"i = 10\nj = 3\nprint('The addition of two operators')\nprint(i + j)\nprint('The subtraction of two operators')\nprint(i - j)\nprint('The multiplication of two operators')\nprint(i * j)\nprint('The division of two operators')\nprint(i / j)\nprint('The remainder of two operators')\nprint(i % j)\nprint('Is i equal to j?')\nprint(i == j)\nprint('is I not equal to j?')\nprint(i != j)\nprint('Is i greater than j?')\nprint(i > j)\nprint('is i less than j?')\nprint(i < j)\nprint('is i gtorequal to j?')\nprint(i >= j)\nprint('is i lessorequal to j?')\nprint(i <= j)\n",
"<assignment token>\nprint('The addition of two operators')\nprint(i + j)\nprint('The subtraction of two operators')\nprint(i - j)\nprint('The multiplication of two operators')\nprint(i * j)\nprint('The division of two operators')\nprint(i / j)\nprint('The remainder of two operators')\nprint(i % j)\nprint('Is i equal to j?')\nprint(i == j)\nprint('is I not equal to j?')\nprint(i != j)\nprint('Is i greater than j?')\nprint(i > j)\nprint('is i less than j?')\nprint(i < j)\nprint('is i gtorequal to j?')\nprint(i >= j)\nprint('is i lessorequal to j?')\nprint(i <= j)\n",
"<assignment token>\n<code token>\n"
] | false |
98,503 |
e497332a9d6e6614287148767f635282e5d8d65f
|
import gm2
from gm2 import plt, np
tmp = gm2.Temperature()
t_time = []
t_t = []
with open("/Users/scorrodi/Downloads/7721177770818dat.txt") as file:
lines = file.readlines()
for line in lines[1:]:
data = line.split(" ")
if not data[25] in ["*","**","***","****","******",""]:
#print(data[2][0:4], data[2][4:6], data[2][6:8], data[2][8:10], data[2][10:12], (float(data[25]) -32) * 5/9)
t_time.append(gm2.util.datetime2ts(int(data[2][0:4]), int(data[2][4:6]), int(data[2][6:8]), int(data[2][8:10]), int(data[2][10:12]), 0))
t_t.append((float(data[25]) -32) * 5/9.)
t_time = np.array(t_time)
t_t = np.array(t_t)
from matplotlib.dates import DateFormatter
formatter = DateFormatter('%m/%d\n%H:%M')
formatter = DateFormatter('%m/%d')
st2017 = (t_time > gm2.util.date2ts(2017,10,1))&(t_time < gm2.util.date2ts(2017,10,28))
st2018 = (t_time > gm2.util.date2ts(2018,10,1))&(t_time < gm2.util.date2ts(2018,10,28))
from matplotlib.backends.backend_pdf import PdfPages
with PdfPages('plots/tmp_oct_2017_2018.pdf') as pdf:
for yoke in np.arange(ord('A'), ord('A')+12):
ax = []
f = plt.figure(figsize=[gm2.plotutil.figsize()[0] * 1.5, gm2.plotutil.figsize()[1] * 1.0])
#for i, pos in enumerate(['Air','Top', 'Back', 'Bottom']):
tmp_time_top, tmp_t_top = tmp.get(chr(yoke), 'Top')
tmp_time_bot, tmp_t_bot = tmp.get(chr(yoke), 'Bottom')
dt = gm2.util.date2ts(2018,10,1) - gm2.util.date2ts(2017,10,1)
s2017_top = (tmp_time_top > gm2.util.date2ts(2017,10,12))&(tmp_time_top < gm2.util.date2ts(2017,10,28))
s2018_top = (tmp_time_top > gm2.util.date2ts(2018,10,12))&(tmp_time_top < gm2.util.date2ts(2018,10,28))
s2017_bot = (tmp_time_bot > gm2.util.date2ts(2017,10,12))&(tmp_time_bot < gm2.util.date2ts(2017,10,28))
s2018_bot = (tmp_time_bot > gm2.util.date2ts(2018,10,12))&(tmp_time_bot < gm2.util.date2ts(2018,10,28))
gm2.plotutil.plot_ts((tmp_time_top[s2017_top]+dt)*1e9, tmp_t_top[s2017_top] - tmp_t_bot[s2017_bot], markersize=2, label="2017")
gm2.plotutil.plot_ts(tmp_time_top[s2018_top]*1e9, tmp_t_top[s2018_top] - tmp_t_bot[s2018_bot], markersize=2, label="2018")
#plt.legend(title=pos)
#if len(ax) == 1:
plt.title("Yoke "+chr(yoke))
#if len(ax) == 1:
plt.ylabel("temperature gradient [$^{\circ}$C]")
# if len(tmp_t[s2018])>0:
# mean = np.nanmean(tmp_t[s2018][tmp_t[s2018]<50.])
# plt.ylim([mean-0.8,mean+0.8])
plt.xlabel("date")
plt.legend()
plt.gca().get_xaxis().set_visible(True)
plt.gca().xaxis.set_major_formatter(formatter)
gm2.despine()
#f.savefig("plots/tmp_oct_2017_2018_yoke"+chr(yoke)+".png")
#pdf.savefig(f)
plt.show()
|
[
"import gm2\nfrom gm2 import plt, np\n\ntmp = gm2.Temperature()\n\n\nt_time = []\nt_t = []\n\nwith open(\"/Users/scorrodi/Downloads/7721177770818dat.txt\") as file:\n lines = file.readlines()\n for line in lines[1:]:\n data = line.split(\" \") \n if not data[25] in [\"*\",\"**\",\"***\",\"****\",\"******\",\"\"]:\n #print(data[2][0:4], data[2][4:6], data[2][6:8], data[2][8:10], data[2][10:12], (float(data[25]) -32) * 5/9)\n t_time.append(gm2.util.datetime2ts(int(data[2][0:4]), int(data[2][4:6]), int(data[2][6:8]), int(data[2][8:10]), int(data[2][10:12]), 0))\n t_t.append((float(data[25]) -32) * 5/9.)\n\nt_time = np.array(t_time)\nt_t = np.array(t_t)\n\nfrom matplotlib.dates import DateFormatter\nformatter = DateFormatter('%m/%d\\n%H:%M')\nformatter = DateFormatter('%m/%d')\n\nst2017 = (t_time > gm2.util.date2ts(2017,10,1))&(t_time < gm2.util.date2ts(2017,10,28)) \nst2018 = (t_time > gm2.util.date2ts(2018,10,1))&(t_time < gm2.util.date2ts(2018,10,28)) \n\nfrom matplotlib.backends.backend_pdf import PdfPages\nwith PdfPages('plots/tmp_oct_2017_2018.pdf') as pdf:\n for yoke in np.arange(ord('A'), ord('A')+12):\n ax = []\n f = plt.figure(figsize=[gm2.plotutil.figsize()[0] * 1.5, gm2.plotutil.figsize()[1] * 1.0])\n #for i, pos in enumerate(['Air','Top', 'Back', 'Bottom']):\n tmp_time_top, tmp_t_top = tmp.get(chr(yoke), 'Top')\n tmp_time_bot, tmp_t_bot = tmp.get(chr(yoke), 'Bottom')\n\n dt = gm2.util.date2ts(2018,10,1) - gm2.util.date2ts(2017,10,1)\n s2017_top = (tmp_time_top > gm2.util.date2ts(2017,10,12))&(tmp_time_top < gm2.util.date2ts(2017,10,28))\n s2018_top = (tmp_time_top > gm2.util.date2ts(2018,10,12))&(tmp_time_top < gm2.util.date2ts(2018,10,28))\n s2017_bot = (tmp_time_bot > gm2.util.date2ts(2017,10,12))&(tmp_time_bot < gm2.util.date2ts(2017,10,28))\n s2018_bot = (tmp_time_bot > gm2.util.date2ts(2018,10,12))&(tmp_time_bot < gm2.util.date2ts(2018,10,28))\n gm2.plotutil.plot_ts((tmp_time_top[s2017_top]+dt)*1e9, tmp_t_top[s2017_top] - tmp_t_bot[s2017_bot], markersize=2, label=\"2017\")\n gm2.plotutil.plot_ts(tmp_time_top[s2018_top]*1e9, tmp_t_top[s2018_top] - tmp_t_bot[s2018_bot], markersize=2, label=\"2018\")\n \n #plt.legend(title=pos)\n #if len(ax) == 1:\n plt.title(\"Yoke \"+chr(yoke))\n #if len(ax) == 1:\n plt.ylabel(\"temperature gradient [$^{\\circ}$C]\")\n # if len(tmp_t[s2018])>0:\n # mean = np.nanmean(tmp_t[s2018][tmp_t[s2018]<50.])\n # plt.ylim([mean-0.8,mean+0.8])\n\n plt.xlabel(\"date\")\n plt.legend()\n\n plt.gca().get_xaxis().set_visible(True)\n plt.gca().xaxis.set_major_formatter(formatter)\n gm2.despine()\n #f.savefig(\"plots/tmp_oct_2017_2018_yoke\"+chr(yoke)+\".png\")\n #pdf.savefig(f)\n plt.show()\n",
"import gm2\nfrom gm2 import plt, np\ntmp = gm2.Temperature()\nt_time = []\nt_t = []\nwith open('/Users/scorrodi/Downloads/7721177770818dat.txt') as file:\n lines = file.readlines()\n for line in lines[1:]:\n data = line.split(' ')\n if not data[25] in ['*', '**', '***', '****', '******', '']:\n t_time.append(gm2.util.datetime2ts(int(data[2][0:4]), int(data[\n 2][4:6]), int(data[2][6:8]), int(data[2][8:10]), int(data[2\n ][10:12]), 0))\n t_t.append((float(data[25]) - 32) * 5 / 9.0)\nt_time = np.array(t_time)\nt_t = np.array(t_t)\nfrom matplotlib.dates import DateFormatter\nformatter = DateFormatter('%m/%d\\n%H:%M')\nformatter = DateFormatter('%m/%d')\nst2017 = (t_time > gm2.util.date2ts(2017, 10, 1)) & (t_time < gm2.util.\n date2ts(2017, 10, 28))\nst2018 = (t_time > gm2.util.date2ts(2018, 10, 1)) & (t_time < gm2.util.\n date2ts(2018, 10, 28))\nfrom matplotlib.backends.backend_pdf import PdfPages\nwith PdfPages('plots/tmp_oct_2017_2018.pdf') as pdf:\n for yoke in np.arange(ord('A'), ord('A') + 12):\n ax = []\n f = plt.figure(figsize=[gm2.plotutil.figsize()[0] * 1.5, gm2.\n plotutil.figsize()[1] * 1.0])\n tmp_time_top, tmp_t_top = tmp.get(chr(yoke), 'Top')\n tmp_time_bot, tmp_t_bot = tmp.get(chr(yoke), 'Bottom')\n dt = gm2.util.date2ts(2018, 10, 1) - gm2.util.date2ts(2017, 10, 1)\n s2017_top = (tmp_time_top > gm2.util.date2ts(2017, 10, 12)) & (\n tmp_time_top < gm2.util.date2ts(2017, 10, 28))\n s2018_top = (tmp_time_top > gm2.util.date2ts(2018, 10, 12)) & (\n tmp_time_top < gm2.util.date2ts(2018, 10, 28))\n s2017_bot = (tmp_time_bot > gm2.util.date2ts(2017, 10, 12)) & (\n tmp_time_bot < gm2.util.date2ts(2017, 10, 28))\n s2018_bot = (tmp_time_bot > gm2.util.date2ts(2018, 10, 12)) & (\n tmp_time_bot < gm2.util.date2ts(2018, 10, 28))\n gm2.plotutil.plot_ts((tmp_time_top[s2017_top] + dt) * 1000000000.0,\n tmp_t_top[s2017_top] - tmp_t_bot[s2017_bot], markersize=2,\n label='2017')\n gm2.plotutil.plot_ts(tmp_time_top[s2018_top] * 1000000000.0, \n tmp_t_top[s2018_top] - tmp_t_bot[s2018_bot], markersize=2,\n label='2018')\n plt.title('Yoke ' + chr(yoke))\n plt.ylabel('temperature gradient [$^{\\\\circ}$C]')\n plt.xlabel('date')\n plt.legend()\n plt.gca().get_xaxis().set_visible(True)\n plt.gca().xaxis.set_major_formatter(formatter)\n gm2.despine()\n plt.show()\n",
"<import token>\ntmp = gm2.Temperature()\nt_time = []\nt_t = []\nwith open('/Users/scorrodi/Downloads/7721177770818dat.txt') as file:\n lines = file.readlines()\n for line in lines[1:]:\n data = line.split(' ')\n if not data[25] in ['*', '**', '***', '****', '******', '']:\n t_time.append(gm2.util.datetime2ts(int(data[2][0:4]), int(data[\n 2][4:6]), int(data[2][6:8]), int(data[2][8:10]), int(data[2\n ][10:12]), 0))\n t_t.append((float(data[25]) - 32) * 5 / 9.0)\nt_time = np.array(t_time)\nt_t = np.array(t_t)\n<import token>\nformatter = DateFormatter('%m/%d\\n%H:%M')\nformatter = DateFormatter('%m/%d')\nst2017 = (t_time > gm2.util.date2ts(2017, 10, 1)) & (t_time < gm2.util.\n date2ts(2017, 10, 28))\nst2018 = (t_time > gm2.util.date2ts(2018, 10, 1)) & (t_time < gm2.util.\n date2ts(2018, 10, 28))\n<import token>\nwith PdfPages('plots/tmp_oct_2017_2018.pdf') as pdf:\n for yoke in np.arange(ord('A'), ord('A') + 12):\n ax = []\n f = plt.figure(figsize=[gm2.plotutil.figsize()[0] * 1.5, gm2.\n plotutil.figsize()[1] * 1.0])\n tmp_time_top, tmp_t_top = tmp.get(chr(yoke), 'Top')\n tmp_time_bot, tmp_t_bot = tmp.get(chr(yoke), 'Bottom')\n dt = gm2.util.date2ts(2018, 10, 1) - gm2.util.date2ts(2017, 10, 1)\n s2017_top = (tmp_time_top > gm2.util.date2ts(2017, 10, 12)) & (\n tmp_time_top < gm2.util.date2ts(2017, 10, 28))\n s2018_top = (tmp_time_top > gm2.util.date2ts(2018, 10, 12)) & (\n tmp_time_top < gm2.util.date2ts(2018, 10, 28))\n s2017_bot = (tmp_time_bot > gm2.util.date2ts(2017, 10, 12)) & (\n tmp_time_bot < gm2.util.date2ts(2017, 10, 28))\n s2018_bot = (tmp_time_bot > gm2.util.date2ts(2018, 10, 12)) & (\n tmp_time_bot < gm2.util.date2ts(2018, 10, 28))\n gm2.plotutil.plot_ts((tmp_time_top[s2017_top] + dt) * 1000000000.0,\n tmp_t_top[s2017_top] - tmp_t_bot[s2017_bot], markersize=2,\n label='2017')\n gm2.plotutil.plot_ts(tmp_time_top[s2018_top] * 1000000000.0, \n tmp_t_top[s2018_top] - tmp_t_bot[s2018_bot], markersize=2,\n label='2018')\n plt.title('Yoke ' + chr(yoke))\n plt.ylabel('temperature gradient [$^{\\\\circ}$C]')\n plt.xlabel('date')\n plt.legend()\n plt.gca().get_xaxis().set_visible(True)\n plt.gca().xaxis.set_major_formatter(formatter)\n gm2.despine()\n plt.show()\n",
"<import token>\n<assignment token>\nwith open('/Users/scorrodi/Downloads/7721177770818dat.txt') as file:\n lines = file.readlines()\n for line in lines[1:]:\n data = line.split(' ')\n if not data[25] in ['*', '**', '***', '****', '******', '']:\n t_time.append(gm2.util.datetime2ts(int(data[2][0:4]), int(data[\n 2][4:6]), int(data[2][6:8]), int(data[2][8:10]), int(data[2\n ][10:12]), 0))\n t_t.append((float(data[25]) - 32) * 5 / 9.0)\n<assignment token>\n<import token>\n<assignment token>\n<import token>\nwith PdfPages('plots/tmp_oct_2017_2018.pdf') as pdf:\n for yoke in np.arange(ord('A'), ord('A') + 12):\n ax = []\n f = plt.figure(figsize=[gm2.plotutil.figsize()[0] * 1.5, gm2.\n plotutil.figsize()[1] * 1.0])\n tmp_time_top, tmp_t_top = tmp.get(chr(yoke), 'Top')\n tmp_time_bot, tmp_t_bot = tmp.get(chr(yoke), 'Bottom')\n dt = gm2.util.date2ts(2018, 10, 1) - gm2.util.date2ts(2017, 10, 1)\n s2017_top = (tmp_time_top > gm2.util.date2ts(2017, 10, 12)) & (\n tmp_time_top < gm2.util.date2ts(2017, 10, 28))\n s2018_top = (tmp_time_top > gm2.util.date2ts(2018, 10, 12)) & (\n tmp_time_top < gm2.util.date2ts(2018, 10, 28))\n s2017_bot = (tmp_time_bot > gm2.util.date2ts(2017, 10, 12)) & (\n tmp_time_bot < gm2.util.date2ts(2017, 10, 28))\n s2018_bot = (tmp_time_bot > gm2.util.date2ts(2018, 10, 12)) & (\n tmp_time_bot < gm2.util.date2ts(2018, 10, 28))\n gm2.plotutil.plot_ts((tmp_time_top[s2017_top] + dt) * 1000000000.0,\n tmp_t_top[s2017_top] - tmp_t_bot[s2017_bot], markersize=2,\n label='2017')\n gm2.plotutil.plot_ts(tmp_time_top[s2018_top] * 1000000000.0, \n tmp_t_top[s2018_top] - tmp_t_bot[s2018_bot], markersize=2,\n label='2018')\n plt.title('Yoke ' + chr(yoke))\n plt.ylabel('temperature gradient [$^{\\\\circ}$C]')\n plt.xlabel('date')\n plt.legend()\n plt.gca().get_xaxis().set_visible(True)\n plt.gca().xaxis.set_major_formatter(formatter)\n gm2.despine()\n plt.show()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<code token>\n"
] | false |
98,504 |
b1bb2406b83bb02b2aa484905c275befdf1154d0
|
for i in range(6):
print(i)
print(range(6), 'finish')
for i in range(6, 10):
print(i)
print(range(6, 10), 'finish')
for i in range(6, 12, 2):
print(i)
print(range(6, 12, 2), 'finish')
# 迭代链表
a = ['i', 'love', 'coding', 'and', 'free']
for i in range(len(a)):
print(i, a[i])
|
[
"for i in range(6):\n print(i)\nprint(range(6), 'finish')\n\nfor i in range(6, 10):\n print(i)\nprint(range(6, 10), 'finish')\n\nfor i in range(6, 12, 2):\n print(i)\nprint(range(6, 12, 2), 'finish')\n\n# 迭代链表\na = ['i', 'love', 'coding', 'and', 'free']\nfor i in range(len(a)):\n print(i, a[i])\n",
"for i in range(6):\n print(i)\nprint(range(6), 'finish')\nfor i in range(6, 10):\n print(i)\nprint(range(6, 10), 'finish')\nfor i in range(6, 12, 2):\n print(i)\nprint(range(6, 12, 2), 'finish')\na = ['i', 'love', 'coding', 'and', 'free']\nfor i in range(len(a)):\n print(i, a[i])\n",
"for i in range(6):\n print(i)\nprint(range(6), 'finish')\nfor i in range(6, 10):\n print(i)\nprint(range(6, 10), 'finish')\nfor i in range(6, 12, 2):\n print(i)\nprint(range(6, 12, 2), 'finish')\n<assignment token>\nfor i in range(len(a)):\n print(i, a[i])\n",
"<code token>\n<assignment token>\n<code token>\n"
] | false |
98,505 |
b99d76ed31133ad82a59ba8f180da99e8b66a5fb
|
from DTO.LED import LED
class Room:
def __init__(self, db_id, name, LEDs : [LED]):
self.db_id = db_id
self.name = name
self.LEDs = LEDs
def to_dict(self):
return {
u'name': self.name
}
def __str__(self):
LED_str = [str(led) for led in self.LEDs]
return f"Room {{ name: '{self.name}', # LEDs: {len(self.LEDs)} leds: {LED_str} }}"
|
[
"from DTO.LED import LED\n\nclass Room:\n def __init__(self, db_id, name, LEDs : [LED]):\n self.db_id = db_id\n \n self.name = name\n self.LEDs = LEDs\n\n def to_dict(self):\n return {\n u'name': self.name \n }\n\n def __str__(self):\n LED_str = [str(led) for led in self.LEDs]\n return f\"Room {{ name: '{self.name}', # LEDs: {len(self.LEDs)} leds: {LED_str} }}\" ",
"from DTO.LED import LED\n\n\nclass Room:\n\n def __init__(self, db_id, name, LEDs: [LED]):\n self.db_id = db_id\n self.name = name\n self.LEDs = LEDs\n\n def to_dict(self):\n return {u'name': self.name}\n\n def __str__(self):\n LED_str = [str(led) for led in self.LEDs]\n return (\n f\"Room {{ name: '{self.name}', # LEDs: {len(self.LEDs)} leds: {LED_str} }}\"\n )\n",
"<import token>\n\n\nclass Room:\n\n def __init__(self, db_id, name, LEDs: [LED]):\n self.db_id = db_id\n self.name = name\n self.LEDs = LEDs\n\n def to_dict(self):\n return {u'name': self.name}\n\n def __str__(self):\n LED_str = [str(led) for led in self.LEDs]\n return (\n f\"Room {{ name: '{self.name}', # LEDs: {len(self.LEDs)} leds: {LED_str} }}\"\n )\n",
"<import token>\n\n\nclass Room:\n\n def __init__(self, db_id, name, LEDs: [LED]):\n self.db_id = db_id\n self.name = name\n self.LEDs = LEDs\n\n def to_dict(self):\n return {u'name': self.name}\n <function token>\n",
"<import token>\n\n\nclass Room:\n <function token>\n\n def to_dict(self):\n return {u'name': self.name}\n <function token>\n",
"<import token>\n\n\nclass Room:\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,506 |
8f01eb0ada90032d8d72cb69532c520cd87629e7
|
import os
from mappy import config
from . import process
from .album import Album
def all_albums():
return [
Album(aid) for aid in os.listdir(config.Images.ALBUMS_ROOT)
if os.path.isdir(os.path.join(config.Images.ALBUMS_ROOT, aid))
]
|
[
"import os\n\nfrom mappy import config\n\nfrom . import process\nfrom .album import Album\n\n\ndef all_albums():\n return [\n Album(aid) for aid in os.listdir(config.Images.ALBUMS_ROOT)\n if os.path.isdir(os.path.join(config.Images.ALBUMS_ROOT, aid))\n ]\n",
"import os\nfrom mappy import config\nfrom . import process\nfrom .album import Album\n\n\ndef all_albums():\n return [Album(aid) for aid in os.listdir(config.Images.ALBUMS_ROOT) if\n os.path.isdir(os.path.join(config.Images.ALBUMS_ROOT, aid))]\n",
"<import token>\n\n\ndef all_albums():\n return [Album(aid) for aid in os.listdir(config.Images.ALBUMS_ROOT) if\n os.path.isdir(os.path.join(config.Images.ALBUMS_ROOT, aid))]\n",
"<import token>\n<function token>\n"
] | false |
98,507 |
500b516770bd77404bffb4073c1e8890bef4228e
|
##################################################
# TrafficEstimatorService_services.py
# generated by ZSI.generate.wsdl2python
##################################################
from TrafficEstimatorService_services_types import *
import urlparse, types
from ZSI.TCcompound import ComplexType, Struct
from ZSI import client
import ZSI
# Locator
class TrafficEstimatorServiceLocator:
TrafficEstimatorInterface_address = "https://adwords.google.com:443/api/adwords/v13/TrafficEstimatorService"
def getTrafficEstimatorInterfaceAddress(self):
return TrafficEstimatorServiceLocator.TrafficEstimatorInterface_address
def getTrafficEstimatorInterface(self, url=None, **kw):
return TrafficEstimatorServiceSoapBindingSOAP(url or TrafficEstimatorServiceLocator.TrafficEstimatorInterface_address, **kw)
# Methods
class TrafficEstimatorServiceSoapBindingSOAP:
def __init__(self, url, **kw):
kw.setdefault("readerclass", None)
kw.setdefault("writerclass", None)
# no resource properties
self.binding = client.Binding(url=url, **kw)
# no ws-addressing
# op: checkKeywordTraffic
def checkKeywordTraffic(self, request):
if isinstance(request, checkKeywordTrafficRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(checkKeywordTrafficResponse.typecode)
return response
# op: estimateAdGroupList
def estimateAdGroupList(self, request):
if isinstance(request, estimateAdGroupListRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(estimateAdGroupListResponse.typecode)
return response
# op: estimateCampaignList
def estimateCampaignList(self, request):
if isinstance(request, estimateCampaignListRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(estimateCampaignListResponse.typecode)
return response
# op: estimateKeywordList
def estimateKeywordList(self, request):
if isinstance(request, estimateKeywordListRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(estimateKeywordListResponse.typecode)
return response
checkKeywordTrafficRequest = ns0.checkKeywordTraffic_Dec().pyclass
checkKeywordTrafficResponse = ns0.checkKeywordTrafficResponse_Dec().pyclass
estimateAdGroupListRequest = ns0.estimateAdGroupList_Dec().pyclass
estimateAdGroupListResponse = ns0.estimateAdGroupListResponse_Dec().pyclass
estimateCampaignListRequest = ns0.estimateCampaignList_Dec().pyclass
estimateCampaignListResponse = ns0.estimateCampaignListResponse_Dec().pyclass
estimateKeywordListRequest = ns0.estimateKeywordList_Dec().pyclass
estimateKeywordListResponse = ns0.estimateKeywordListResponse_Dec().pyclass
|
[
"################################################## \n# TrafficEstimatorService_services.py \n# generated by ZSI.generate.wsdl2python\n##################################################\n\n\nfrom TrafficEstimatorService_services_types import *\nimport urlparse, types\nfrom ZSI.TCcompound import ComplexType, Struct\nfrom ZSI import client\nimport ZSI\n\n# Locator\nclass TrafficEstimatorServiceLocator:\n TrafficEstimatorInterface_address = \"https://adwords.google.com:443/api/adwords/v13/TrafficEstimatorService\"\n def getTrafficEstimatorInterfaceAddress(self):\n return TrafficEstimatorServiceLocator.TrafficEstimatorInterface_address\n def getTrafficEstimatorInterface(self, url=None, **kw):\n return TrafficEstimatorServiceSoapBindingSOAP(url or TrafficEstimatorServiceLocator.TrafficEstimatorInterface_address, **kw)\n\n# Methods\nclass TrafficEstimatorServiceSoapBindingSOAP:\n def __init__(self, url, **kw):\n kw.setdefault(\"readerclass\", None)\n kw.setdefault(\"writerclass\", None)\n # no resource properties\n self.binding = client.Binding(url=url, **kw)\n # no ws-addressing\n\n # op: checkKeywordTraffic\n def checkKeywordTraffic(self, request):\n if isinstance(request, checkKeywordTrafficRequest) is False:\n raise TypeError, \"%s incorrect request type\" % (request.__class__)\n kw = {}\n # no input wsaction\n self.binding.Send(None, None, request, soapaction=\"\", **kw)\n # no output wsaction\n response = self.binding.Receive(checkKeywordTrafficResponse.typecode)\n return response\n\n # op: estimateAdGroupList\n def estimateAdGroupList(self, request):\n if isinstance(request, estimateAdGroupListRequest) is False:\n raise TypeError, \"%s incorrect request type\" % (request.__class__)\n kw = {}\n # no input wsaction\n self.binding.Send(None, None, request, soapaction=\"\", **kw)\n # no output wsaction\n response = self.binding.Receive(estimateAdGroupListResponse.typecode)\n return response\n\n # op: estimateCampaignList\n def estimateCampaignList(self, request):\n if isinstance(request, estimateCampaignListRequest) is False:\n raise TypeError, \"%s incorrect request type\" % (request.__class__)\n kw = {}\n # no input wsaction\n self.binding.Send(None, None, request, soapaction=\"\", **kw)\n # no output wsaction\n response = self.binding.Receive(estimateCampaignListResponse.typecode)\n return response\n\n # op: estimateKeywordList\n def estimateKeywordList(self, request):\n if isinstance(request, estimateKeywordListRequest) is False:\n raise TypeError, \"%s incorrect request type\" % (request.__class__)\n kw = {}\n # no input wsaction\n self.binding.Send(None, None, request, soapaction=\"\", **kw)\n # no output wsaction\n response = self.binding.Receive(estimateKeywordListResponse.typecode)\n return response\n\ncheckKeywordTrafficRequest = ns0.checkKeywordTraffic_Dec().pyclass\n\ncheckKeywordTrafficResponse = ns0.checkKeywordTrafficResponse_Dec().pyclass\n\nestimateAdGroupListRequest = ns0.estimateAdGroupList_Dec().pyclass\n\nestimateAdGroupListResponse = ns0.estimateAdGroupListResponse_Dec().pyclass\n\nestimateCampaignListRequest = ns0.estimateCampaignList_Dec().pyclass\n\nestimateCampaignListResponse = ns0.estimateCampaignListResponse_Dec().pyclass\n\nestimateKeywordListRequest = ns0.estimateKeywordList_Dec().pyclass\n\nestimateKeywordListResponse = ns0.estimateKeywordListResponse_Dec().pyclass\n"
] | true |
98,508 |
0b763bd4041ead30aa76b04e9c0f5209eaed136d
|
# suit_mailfactory versioning:
# <suit version updated>.<mailfactory version updated>.<hotfix>
__SUIT_VERSION__ = '0.2.8'
__MAILFACTORY_VERSION__ = '0.11'
__HOTFIX__ = 0
__VERSION__ = u'{}.{}.{}'.format(
__SUIT_VERSION__.replace('.', ''),
__MAILFACTORY_VERSION__.replace('.', ''),
__HOTFIX__
)
|
[
"# suit_mailfactory versioning:\n# <suit version updated>.<mailfactory version updated>.<hotfix>\n__SUIT_VERSION__ = '0.2.8'\n__MAILFACTORY_VERSION__ = '0.11'\n__HOTFIX__ = 0\n\n__VERSION__ = u'{}.{}.{}'.format(\n __SUIT_VERSION__.replace('.', ''),\n __MAILFACTORY_VERSION__.replace('.', ''),\n __HOTFIX__\n)\n",
"__SUIT_VERSION__ = '0.2.8'\n__MAILFACTORY_VERSION__ = '0.11'\n__HOTFIX__ = 0\n__VERSION__ = u'{}.{}.{}'.format(__SUIT_VERSION__.replace('.', ''),\n __MAILFACTORY_VERSION__.replace('.', ''), __HOTFIX__)\n",
"<assignment token>\n"
] | false |
98,509 |
ffbc1ad01b447ac19ed623edd9a2b9b2cce5b36c
|
#!/usr/bin/env python
"""
Evaluate the trajectory output by BLOG against the ground-truth GPS trajectory.
Example usage:
python -m ppaml_car.evaluate eval_data_dir out_dir --plot
"""
import argparse
import csv
import numpy as np
import os
from ppaml_car.lasers import plot_obstacles
from slam_eval.slam_eval import obsticle_compare
def read_csv(path):
"""
Read CSV of real values, discard header, and return np.array.
"""
rows = []
with open(path) as csv_file:
reader = csv.reader(csv_file)
header = reader.next()
if header[0].isdigit():
print "Warning: Discarding header that looks like numbers."
for line in reader:
rows.append(map(float, line))
return np.array(rows)
def compute_error(ground_traj, my_traj):
"""
Compute error between trajectories, in the same way as the evaluator does.
"""
# Times should match.
if ground_traj.shape != my_traj.shape:
raise ValueError("ground_traj and my_traj must have the same shape")
if np.max(np.abs(ground_traj[:, 0] - my_traj[:, 0])) > 1e-2:
raise ValueError("ground_traj and my_traj must have the same times")
d = ground_traj[:, 1:3] - my_traj[:, 1:3]
norm2 = np.sqrt(np.sum(d * d, axis=1))
return np.sum(norm2)
def evaluate_trajectories(args):
# Array with columns (time, lat, lon).
# We get rid of the 4th orientation column.
ground_traj = read_csv(os.path.join(args.eval_data_dir, 'eval_gps.csv'))
ground_traj = ground_traj[:, :3]
# Array with columns (time, lat, lon).
out_traj = read_csv(os.path.join(args.out_dir, 'slam_out_path.csv'))
# Evaluate error between the trajectories.
if len(out_traj) == len(ground_traj):
print "Traj error: {}".format(compute_error(ground_traj, out_traj))
else:
print "Traj not the same length; can't evaluate."
# Optionally plot trajectories.
if args.plot:
plt.plot(
ground_traj[:, 2], ground_traj[:, 1], label='ground', color='g')
plt.plot(
out_traj[:, 2], out_traj[:, 1], label='out', color='r')
def evaluate_obstacles(args):
# Array with columns (x, y).
ground_obst = read_csv(os.path.join(
args.eval_data_dir, 'eval_obstacles.csv'))
# Array with columns (x, y).
out_obst = read_csv(os.path.join(args.out_dir, 'slam_out_landmarks.csv'))
# Use evaluation metric written by Galois.
n_extras, score = obsticle_compare(out_obst, ground_obst)
print "{} ground obstacles; {} out obstacles".format(
len(ground_obst), len(out_obst))
print "Map error: {}".format(score)
# Optionally plot obstacles.
if args.plot:
r = 0.37
ground_obst_3 = [(x, y, r) for x, y in ground_obst]
out_obst_3 = [(x, y, r) for x, y in out_obst]
plot_obstacles(ground_obst_3, plt.gca(), color=(0, 1, 0, 0.6))
plot_obstacles(out_obst_3, plt.gca(), color=(1, 0, 0, 0.6))
if __name__ == "__main__":
# Parse command-line args.
parser = argparse.ArgumentParser(
description='Evaluate trajectory output by BLOG.')
parser.add_argument('eval_data_dir')
parser.add_argument('out_dir')
parser.add_argument('--plot', action='store_true')
args = parser.parse_args()
if args.plot:
import matplotlib.pyplot as plt
plt.figure(figsize=(8, 8))
evaluate_trajectories(args)
evaluate_obstacles(args)
if args.plot:
plt.plot([-7, -7, 7, 7, -7], [-7, 7, 7, -7, -7], 'k')
plt.xlim(-8, 8)
plt.ylim(-8, 8)
plt.legend()
plt.show()
|
[
"#!/usr/bin/env python\n\n\"\"\"\nEvaluate the trajectory output by BLOG against the ground-truth GPS trajectory.\n\nExample usage:\n python -m ppaml_car.evaluate eval_data_dir out_dir --plot\n\"\"\"\n\n\nimport argparse\nimport csv\nimport numpy as np\nimport os\n\nfrom ppaml_car.lasers import plot_obstacles\nfrom slam_eval.slam_eval import obsticle_compare\n\n\ndef read_csv(path):\n \"\"\"\n Read CSV of real values, discard header, and return np.array.\n \"\"\"\n rows = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file)\n header = reader.next()\n if header[0].isdigit():\n print \"Warning: Discarding header that looks like numbers.\"\n for line in reader:\n rows.append(map(float, line))\n return np.array(rows)\n\n\ndef compute_error(ground_traj, my_traj):\n \"\"\"\n Compute error between trajectories, in the same way as the evaluator does.\n \"\"\"\n # Times should match.\n if ground_traj.shape != my_traj.shape:\n raise ValueError(\"ground_traj and my_traj must have the same shape\")\n if np.max(np.abs(ground_traj[:, 0] - my_traj[:, 0])) > 1e-2:\n raise ValueError(\"ground_traj and my_traj must have the same times\")\n\n d = ground_traj[:, 1:3] - my_traj[:, 1:3]\n norm2 = np.sqrt(np.sum(d * d, axis=1))\n return np.sum(norm2)\n\n\ndef evaluate_trajectories(args):\n # Array with columns (time, lat, lon).\n # We get rid of the 4th orientation column.\n ground_traj = read_csv(os.path.join(args.eval_data_dir, 'eval_gps.csv'))\n ground_traj = ground_traj[:, :3]\n\n # Array with columns (time, lat, lon).\n out_traj = read_csv(os.path.join(args.out_dir, 'slam_out_path.csv'))\n\n # Evaluate error between the trajectories.\n if len(out_traj) == len(ground_traj):\n print \"Traj error: {}\".format(compute_error(ground_traj, out_traj))\n else:\n print \"Traj not the same length; can't evaluate.\"\n\n # Optionally plot trajectories.\n if args.plot:\n plt.plot(\n ground_traj[:, 2], ground_traj[:, 1], label='ground', color='g')\n plt.plot(\n out_traj[:, 2], out_traj[:, 1], label='out', color='r')\n\n\ndef evaluate_obstacles(args):\n # Array with columns (x, y).\n ground_obst = read_csv(os.path.join(\n args.eval_data_dir, 'eval_obstacles.csv'))\n\n # Array with columns (x, y).\n out_obst = read_csv(os.path.join(args.out_dir, 'slam_out_landmarks.csv'))\n\n # Use evaluation metric written by Galois.\n n_extras, score = obsticle_compare(out_obst, ground_obst)\n print \"{} ground obstacles; {} out obstacles\".format(\n len(ground_obst), len(out_obst))\n print \"Map error: {}\".format(score)\n\n # Optionally plot obstacles.\n if args.plot:\n r = 0.37\n ground_obst_3 = [(x, y, r) for x, y in ground_obst]\n out_obst_3 = [(x, y, r) for x, y in out_obst]\n plot_obstacles(ground_obst_3, plt.gca(), color=(0, 1, 0, 0.6))\n plot_obstacles(out_obst_3, plt.gca(), color=(1, 0, 0, 0.6))\n\n\nif __name__ == \"__main__\":\n # Parse command-line args.\n parser = argparse.ArgumentParser(\n description='Evaluate trajectory output by BLOG.')\n parser.add_argument('eval_data_dir')\n parser.add_argument('out_dir')\n parser.add_argument('--plot', action='store_true')\n args = parser.parse_args()\n\n if args.plot:\n import matplotlib.pyplot as plt\n plt.figure(figsize=(8, 8))\n\n evaluate_trajectories(args)\n evaluate_obstacles(args)\n\n if args.plot:\n plt.plot([-7, -7, 7, 7, -7], [-7, 7, 7, -7, -7], 'k')\n plt.xlim(-8, 8)\n plt.ylim(-8, 8)\n plt.legend()\n plt.show()\n"
] | true |
98,510 |
e515c3dc40e4936c7bac5ffcac52c30090d7c554
|
import numpy as np
from itertools import product
from helper.empirical_distribution import empirical_distribution
from scipy.stats import entropy
def pairwise_entropy(a: np.array):
contingency = empirical_distribution(a)
ps = contingency.flatten()
return entropy(ps)
def entropy_matrix(training):
"""Computes entropy matrix for each pixel tupel - Needs vectorization!"""
shape = training.shape[1]
e = np.zeros((shape, shape))
for i, j in product(*map(range, (shape, shape))):
a = training[:, [i, j]]
e[i, j] = pairwise_entropy(a)
return e
|
[
"import numpy as np\nfrom itertools import product\nfrom helper.empirical_distribution import empirical_distribution\nfrom scipy.stats import entropy\n\n\ndef pairwise_entropy(a: np.array):\n contingency = empirical_distribution(a)\n ps = contingency.flatten()\n return entropy(ps)\n\n\ndef entropy_matrix(training):\n \"\"\"Computes entropy matrix for each pixel tupel - Needs vectorization!\"\"\"\n shape = training.shape[1]\n e = np.zeros((shape, shape))\n for i, j in product(*map(range, (shape, shape))):\n a = training[:, [i, j]]\n e[i, j] = pairwise_entropy(a)\n return e",
"import numpy as np\nfrom itertools import product\nfrom helper.empirical_distribution import empirical_distribution\nfrom scipy.stats import entropy\n\n\ndef pairwise_entropy(a: np.array):\n contingency = empirical_distribution(a)\n ps = contingency.flatten()\n return entropy(ps)\n\n\ndef entropy_matrix(training):\n \"\"\"Computes entropy matrix for each pixel tupel - Needs vectorization!\"\"\"\n shape = training.shape[1]\n e = np.zeros((shape, shape))\n for i, j in product(*map(range, (shape, shape))):\n a = training[:, [i, j]]\n e[i, j] = pairwise_entropy(a)\n return e\n",
"<import token>\n\n\ndef pairwise_entropy(a: np.array):\n contingency = empirical_distribution(a)\n ps = contingency.flatten()\n return entropy(ps)\n\n\ndef entropy_matrix(training):\n \"\"\"Computes entropy matrix for each pixel tupel - Needs vectorization!\"\"\"\n shape = training.shape[1]\n e = np.zeros((shape, shape))\n for i, j in product(*map(range, (shape, shape))):\n a = training[:, [i, j]]\n e[i, j] = pairwise_entropy(a)\n return e\n",
"<import token>\n<function token>\n\n\ndef entropy_matrix(training):\n \"\"\"Computes entropy matrix for each pixel tupel - Needs vectorization!\"\"\"\n shape = training.shape[1]\n e = np.zeros((shape, shape))\n for i, j in product(*map(range, (shape, shape))):\n a = training[:, [i, j]]\n e[i, j] = pairwise_entropy(a)\n return e\n",
"<import token>\n<function token>\n<function token>\n"
] | false |
98,511 |
7f57052a3c04063d2cb277e0938c4aeb18485e32
|
import pandas as pd
import numpy as np
import pickle
import re
import math
from data_manager import DataManager
def delete_empty_rows(data):
i = 0
new_data = pd.DataFrame()
for index, row in data.iterrows():
print('To delete or not to delete the question is now:', i)
if len(list(row.iloc[2])) != 0:
new_data = new_data.append(row)
i += 1
print(new_data)
with open("../data/data_features_clean.txt", "wb") as fp: # Pickling
pickle.dump(new_data, fp)
return new_data
def clean_sentences():
sentences = DataManager.get_sentences()
i = 0
new_data = pd.DataFrame()
for index, row in sentences.iterrows():
if i % 1000 == 0:
print('To delete or not to delete the question is now:', i)
if isinstance(row[0], float) and math.isnan(row[0]):
print('Deleting...', i)
else:
new_data = new_data.append(row)
i += 1
new_data.to_csv('../data/sentences_clean.csv')
print(new_data)
def get_embeddings():
data = DataManager.get_data_features_clean()
data = data[['rev_id', 'label', 'words', 'embedding']]
data.set_index('rev_id')
embeddings = data['embedding']
with open("../features/embeddings.txt", "wb") as fp: # Pickling
pickle.dump(embeddings, fp)
def flat_data(data):
embeddings = data['embedding'].tolist()
data = data.drop(['embedding'], axis=1)
data = data.reset_index(drop=True)
names = list(data)
embeddings_names = [str('embedding: ' + str(x + 1)) for x in range(0, 100)]
# names.append(embeddings_names)
names[len(names):len(names) + 1] = embeddings_names
print(names)
new_data = pd.DataFrame()
for index, row in data.iterrows():
print(index)
row = pd.DataFrame(np.append(row.values, embeddings[index])).transpose()
new_data = new_data.append(row)
new_data = new_data.reset_index(drop=True)
new_data.columns = names
print(new_data)
new_data.to_csv('../features/data_features_clean_flat.csv', header=False, mode='a')
def flat_data_calling():
for i in range (0, 115):
print('-----------', i)
data = DataManager.get_data_features_clean()
dt = data.iloc[1000*i:1000*(i+1)]
del data
flat_data(dt)
data = DataManager.get_data_features_clean()
dt = data.iloc[1000*i:]
del data
flat_data(dt)
def separate_features():
data = DataManager.get_data_features()
print(data)
basic = data.iloc[:, 1:3]
data = data.drop(3, axis=1)
# n_grams = data.iloc[:, 1:525]
for i in range(4, 527):
data = data.drop(i, axis=1)
data = data.drop(1, axis=1)
embeddings = data
print('basic', basic)
# print('n_grams', n_grams)
print('embeddings', embeddings)
# n_grams.to_csv('../features/n_grams.csv')
embeddings.to_csv('../features/embeddings.csv')
with open("../features/basic.txt", "wb") as fp: # Pickling
pickle.dump(basic, fp)
def sentences_clean():
comments = DataManager.get_comments()
sentences = DataManager.get_sentences_clean()
sent = comments.merge(sentences, how='left', left_index=True, right_index=True).drop('words', axis=1)
sent.columns = ['rev_id', 'sentence']
sent.to_csv('../features/sentences_clean.csv', index=False)
def relabel():
labels = DataManager.get_labels()
class_0 = labels[labels['label'] == 0].iloc[0:, :]
class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]).iloc[0:, :]
class_2.iloc[:, 2] = 0
class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]).append(labels[labels.label == 5]).iloc[0:,:]
class_3.iloc[:, 2] = 1
labels = class_0.append(class_2).append(class_3)
labels = labels.sample(frac=1)
labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)
labels.to_csv('../features/labels_2_classes.csv', index=False)
print('-------------Labels done!++++++++++++++')
def clean_whitespaces(comment):
# print('Whitespaces')
comment = re.sub(r'[^\w\s]{2,}', '', comment)
comment = re.sub(r' [^\w\s] ', ' ', comment)
comment = re.sub(r' {2,}', ' ', comment)
return comment
def group_data(labels):
grouped = labels.groupby('rev_id')
i = 0
partial = pd.DataFrame()
print('Total:', len(grouped))
for name, group in grouped:
print(i)
temp = pd.DataFrame(np.zeros(group.shape[0] + 1))
temp.iloc[0] = group.iloc[0, 0]
j = 0
for index, row in group.iterrows():
temp.iloc[j + 1] = row[1]
j += 1
partial = partial.append(temp.transpose())
i += 1
print(partial)
partial.to_csv('../data/partial.csv')
def get_average_scores():
partial = pd.read_csv('../data/partial.csv').drop(['Unnamed: 0'], axis=1)
avg_partial = pd.DataFrame()
length = partial.iloc[0, :].shape[0]
c = 0
for index, row in partial.iterrows():
print(index)
scores = row.iloc[1: length]
# avg = int(round(np.mean(scores)))
avg = np.mean(scores)
av = 0
if avg < 0.05:
av = 0
c += 1
elif avg < 0.1:
av = 1
elif avg < 0.3:
av = 2
elif avg < 0.5:
av = 3
elif avg < 0.7:
av = 4
else:
av = 5
avg_partial = avg_partial.append([np.append(int(row.iloc[0]), av)])
print(c/115863)
# avg_partial.to_csv('../data/partial_avg.csv')
avg_partial.to_csv('../features/labels.csv')
print(avg_partial)
return avg_partial
def read_data():
comments = DataManager.get_comments_clean()
labels = DataManager.get_original_comments()
labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack', 'third_party_attack', 'other_attack'], axis=1)
return labels, comments
def merge_data(group, comments):
data = group.merge(comments, left_on='0', right_on=0, how='inner')
data.columns = ['rev_id', 'label', '0', 'words']
data = data.drop(data.columns[2], axis=1)
with open("../data/data.txt", "wb") as fp: # Pickling
pickle.dump(data, fp)
return data
def merge_comments_words():
basic = DataManager.get_basic()
comments = DataManager.get_original_comments()
comments = comments.drop(['year', 'sample', 'split'], axis=1)
basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')
labels = basic_comments.iloc[:, 0:2]
basic_comments = basic_comments.drop(2, axis=1)
basic_comments = basic_comments.drop(1, axis=1)
cl = basic_comments.columns.tolist()
cl[0] = 'comment'
with open("../features/basic_comments.txt", "wb") as fp: # Pickling
pickle.dump(basic_comments, fp)
labels = labels[[2, 1]]
cols = ['rev_id', 'label']
labels.columns = cols
pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)
def create_basic_data(path):
data = DataManager.get_data()
comments = DataManager.get_original_comments()
basic = data.merge(comments, on='rev_id')
basic = basic.drop('label', axis=1)
basic = basic.drop('sample', axis=1)
basic = basic.drop('split', axis=1)
revs = []
for index, row in basic.iterrows():
if len(row['words']) == 0:
revs.append(row['rev_id'])
bsc = pd.DataFrame()
for index, row in basic.iterrows():
print(index)
if row['rev_id'] not in revs:
bsc = bsc.append(row)
basic = bsc
with open(path, "wb") as fp: # Pickling
pickle.dump(basic, fp)
def clean_new_lines(path):
data = DataManager.get_comments()
data = data.reset_index(drop=True)
for index, row in data.iterrows():
data.iloc[index, 0] = clean_whitespaces(row['comment'])
if index % 1000 == 0:
print(index)
print(row['comment'])
print(data.iloc[index, 0])
if index % 10000 == 0:
with open(path, "wb") as fp: # Pickling
pickle.dump(data, fp)
data = data.reset_index(drop=True)
with open(path, "wb") as fp: # Pickling
pickle.dump(data, fp)
separate_features()
sentences_clean()
relabel()
create_basic_data('../features/basic_comments.txt')
clean_new_lines('../features/basic_comments_clean.txt')
data = DataManager.get_comments()
labels = DataManager.get_labels()
labels.columns = ['rev_id', 'label']
dt = data.merge(labels, how='left', on='rev_id')
dt = dt.drop('comment', axis=1)
dt = dt.reset_index()
dt.to_csv('../features/labels.csv', index=False)
|
[
"import pandas as pd\nimport numpy as np\nimport pickle\nimport re\nimport math\n\nfrom data_manager import DataManager\n\n\ndef delete_empty_rows(data):\n i = 0\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print('To delete or not to delete the question is now:', i)\n if len(list(row.iloc[2])) != 0:\n new_data = new_data.append(row)\n i += 1\n\n print(new_data)\n with open(\"../data/data_features_clean.txt\", \"wb\") as fp: # Pickling\n pickle.dump(new_data, fp)\n return new_data\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open(\"../features/embeddings.txt\", \"wb\") as fp: # Pickling\n pickle.dump(embeddings, fp)\n\n\ndef flat_data(data):\n embeddings = data['embedding'].tolist()\n data = data.drop(['embedding'], axis=1)\n data = data.reset_index(drop=True)\n names = list(data)\n embeddings_names = [str('embedding: ' + str(x + 1)) for x in range(0, 100)]\n # names.append(embeddings_names)\n names[len(names):len(names) + 1] = embeddings_names\n print(names)\n new_data = pd.DataFrame()\n\n for index, row in data.iterrows():\n print(index)\n row = pd.DataFrame(np.append(row.values, embeddings[index])).transpose()\n new_data = new_data.append(row)\n\n\n new_data = new_data.reset_index(drop=True)\n new_data.columns = names\n\n print(new_data)\n new_data.to_csv('../features/data_features_clean_flat.csv', header=False, mode='a')\n\ndef flat_data_calling():\n for i in range (0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000*i:1000*(i+1)]\n del data\n flat_data(dt)\n\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000*i:]\n del data\n flat_data(dt)\n\n\ndef separate_features():\n data = DataManager.get_data_features()\n print(data)\n basic = data.iloc[:, 1:3]\n data = data.drop(3, axis=1)\n # n_grams = data.iloc[:, 1:525]\n for i in range(4, 527):\n data = data.drop(i, axis=1)\n data = data.drop(1, axis=1)\n\n embeddings = data\n print('basic', basic)\n # print('n_grams', n_grams)\n print('embeddings', embeddings)\n # n_grams.to_csv('../features/n_grams.csv')\n embeddings.to_csv('../features/embeddings.csv')\n with open(\"../features/basic.txt\", \"wb\") as fp: # Pickling\n pickle.dump(basic, fp)\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True, right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]).append(labels[labels.label == 5]).iloc[0:,:]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\ndef clean_whitespaces(comment):\n # print('Whitespaces')\n comment = re.sub(r'[^\\w\\s]{2,}', '', comment)\n comment = re.sub(r' [^\\w\\s] ', ' ', comment)\n comment = re.sub(r' {2,}', ' ', comment)\n\n return comment\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\ndef get_average_scores():\n partial = pd.read_csv('../data/partial.csv').drop(['Unnamed: 0'], axis=1)\n avg_partial = pd.DataFrame()\n\n length = partial.iloc[0, :].shape[0]\n c = 0\n for index, row in partial.iterrows():\n print(index)\n scores = row.iloc[1: length]\n # avg = int(round(np.mean(scores)))\n avg = np.mean(scores)\n av = 0\n\n if avg < 0.05:\n av = 0\n c += 1\n elif avg < 0.1:\n av = 1\n elif avg < 0.3:\n av = 2\n elif avg < 0.5:\n av = 3\n elif avg < 0.7:\n av = 4\n else:\n av = 5\n\n avg_partial = avg_partial.append([np.append(int(row.iloc[0]), av)])\n print(c/115863)\n\n # avg_partial.to_csv('../data/partial_avg.csv')\n\n avg_partial.to_csv('../features/labels.csv')\n\n print(avg_partial)\n\n return avg_partial\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack', 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\ndef merge_data(group, comments):\n data = group.merge(comments, left_on='0', right_on=0, how='inner')\n data.columns = ['rev_id', 'label', '0', 'words']\n data = data.drop(data.columns[2], axis=1)\n\n with open(\"../data/data.txt\", \"wb\") as fp: # Pickling\n pickle.dump(data, fp)\n return data\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open(\"../features/basic_comments.txt\", \"wb\") as fp: # Pickling\n pickle.dump(basic_comments, fp)\n\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\ndef create_basic_data(path):\n data = DataManager.get_data()\n comments = DataManager.get_original_comments()\n\n basic = data.merge(comments, on='rev_id')\n\n basic = basic.drop('label', axis=1)\n basic = basic.drop('sample', axis=1)\n basic = basic.drop('split', axis=1)\n\n revs = []\n\n for index, row in basic.iterrows():\n if len(row['words']) == 0:\n revs.append(row['rev_id'])\n\n bsc = pd.DataFrame()\n\n for index, row in basic.iterrows():\n print(index)\n if row['rev_id'] not in revs:\n bsc = bsc.append(row)\n\n basic = bsc\n\n with open(path, \"wb\") as fp: # Pickling\n pickle.dump(basic, fp)\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n\n if index % 10000 == 0:\n with open(path, \"wb\") as fp: # Pickling\n pickle.dump(data, fp)\n\n data = data.reset_index(drop=True)\n with open(path, \"wb\") as fp: # Pickling\n pickle.dump(data, fp)\n\n\nseparate_features()\nsentences_clean()\nrelabel()\ncreate_basic_data('../features/basic_comments.txt')\nclean_new_lines('../features/basic_comments_clean.txt')\n\ndata = DataManager.get_comments()\n\nlabels = DataManager.get_labels()\n\nlabels.columns = ['rev_id', 'label']\ndt = data.merge(labels, how='left', on='rev_id')\ndt = dt.drop('comment', axis=1)\n\ndt = dt.reset_index()\ndt.to_csv('../features/labels.csv', index=False)\n",
"import pandas as pd\nimport numpy as np\nimport pickle\nimport re\nimport math\nfrom data_manager import DataManager\n\n\ndef delete_empty_rows(data):\n i = 0\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print('To delete or not to delete the question is now:', i)\n if len(list(row.iloc[2])) != 0:\n new_data = new_data.append(row)\n i += 1\n print(new_data)\n with open('../data/data_features_clean.txt', 'wb') as fp:\n pickle.dump(new_data, fp)\n return new_data\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\ndef flat_data(data):\n embeddings = data['embedding'].tolist()\n data = data.drop(['embedding'], axis=1)\n data = data.reset_index(drop=True)\n names = list(data)\n embeddings_names = [str('embedding: ' + str(x + 1)) for x in range(0, 100)]\n names[len(names):len(names) + 1] = embeddings_names\n print(names)\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print(index)\n row = pd.DataFrame(np.append(row.values, embeddings[index])).transpose(\n )\n new_data = new_data.append(row)\n new_data = new_data.reset_index(drop=True)\n new_data.columns = names\n print(new_data)\n new_data.to_csv('../features/data_features_clean_flat.csv', header=\n False, mode='a')\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\ndef separate_features():\n data = DataManager.get_data_features()\n print(data)\n basic = data.iloc[:, 1:3]\n data = data.drop(3, axis=1)\n for i in range(4, 527):\n data = data.drop(i, axis=1)\n data = data.drop(1, axis=1)\n embeddings = data\n print('basic', basic)\n print('embeddings', embeddings)\n embeddings.to_csv('../features/embeddings.csv')\n with open('../features/basic.txt', 'wb') as fp:\n pickle.dump(basic, fp)\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\ndef clean_whitespaces(comment):\n comment = re.sub('[^\\\\w\\\\s]{2,}', '', comment)\n comment = re.sub(' [^\\\\w\\\\s] ', ' ', comment)\n comment = re.sub(' {2,}', ' ', comment)\n return comment\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\ndef get_average_scores():\n partial = pd.read_csv('../data/partial.csv').drop(['Unnamed: 0'], axis=1)\n avg_partial = pd.DataFrame()\n length = partial.iloc[0, :].shape[0]\n c = 0\n for index, row in partial.iterrows():\n print(index)\n scores = row.iloc[1:length]\n avg = np.mean(scores)\n av = 0\n if avg < 0.05:\n av = 0\n c += 1\n elif avg < 0.1:\n av = 1\n elif avg < 0.3:\n av = 2\n elif avg < 0.5:\n av = 3\n elif avg < 0.7:\n av = 4\n else:\n av = 5\n avg_partial = avg_partial.append([np.append(int(row.iloc[0]), av)])\n print(c / 115863)\n avg_partial.to_csv('../features/labels.csv')\n print(avg_partial)\n return avg_partial\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\ndef merge_data(group, comments):\n data = group.merge(comments, left_on='0', right_on=0, how='inner')\n data.columns = ['rev_id', 'label', '0', 'words']\n data = data.drop(data.columns[2], axis=1)\n with open('../data/data.txt', 'wb') as fp:\n pickle.dump(data, fp)\n return data\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\ndef create_basic_data(path):\n data = DataManager.get_data()\n comments = DataManager.get_original_comments()\n basic = data.merge(comments, on='rev_id')\n basic = basic.drop('label', axis=1)\n basic = basic.drop('sample', axis=1)\n basic = basic.drop('split', axis=1)\n revs = []\n for index, row in basic.iterrows():\n if len(row['words']) == 0:\n revs.append(row['rev_id'])\n bsc = pd.DataFrame()\n for index, row in basic.iterrows():\n print(index)\n if row['rev_id'] not in revs:\n bsc = bsc.append(row)\n basic = bsc\n with open(path, 'wb') as fp:\n pickle.dump(basic, fp)\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\nseparate_features()\nsentences_clean()\nrelabel()\ncreate_basic_data('../features/basic_comments.txt')\nclean_new_lines('../features/basic_comments_clean.txt')\ndata = DataManager.get_comments()\nlabels = DataManager.get_labels()\nlabels.columns = ['rev_id', 'label']\ndt = data.merge(labels, how='left', on='rev_id')\ndt = dt.drop('comment', axis=1)\ndt = dt.reset_index()\ndt.to_csv('../features/labels.csv', index=False)\n",
"<import token>\n\n\ndef delete_empty_rows(data):\n i = 0\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print('To delete or not to delete the question is now:', i)\n if len(list(row.iloc[2])) != 0:\n new_data = new_data.append(row)\n i += 1\n print(new_data)\n with open('../data/data_features_clean.txt', 'wb') as fp:\n pickle.dump(new_data, fp)\n return new_data\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\ndef flat_data(data):\n embeddings = data['embedding'].tolist()\n data = data.drop(['embedding'], axis=1)\n data = data.reset_index(drop=True)\n names = list(data)\n embeddings_names = [str('embedding: ' + str(x + 1)) for x in range(0, 100)]\n names[len(names):len(names) + 1] = embeddings_names\n print(names)\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print(index)\n row = pd.DataFrame(np.append(row.values, embeddings[index])).transpose(\n )\n new_data = new_data.append(row)\n new_data = new_data.reset_index(drop=True)\n new_data.columns = names\n print(new_data)\n new_data.to_csv('../features/data_features_clean_flat.csv', header=\n False, mode='a')\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\ndef separate_features():\n data = DataManager.get_data_features()\n print(data)\n basic = data.iloc[:, 1:3]\n data = data.drop(3, axis=1)\n for i in range(4, 527):\n data = data.drop(i, axis=1)\n data = data.drop(1, axis=1)\n embeddings = data\n print('basic', basic)\n print('embeddings', embeddings)\n embeddings.to_csv('../features/embeddings.csv')\n with open('../features/basic.txt', 'wb') as fp:\n pickle.dump(basic, fp)\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\ndef clean_whitespaces(comment):\n comment = re.sub('[^\\\\w\\\\s]{2,}', '', comment)\n comment = re.sub(' [^\\\\w\\\\s] ', ' ', comment)\n comment = re.sub(' {2,}', ' ', comment)\n return comment\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\ndef get_average_scores():\n partial = pd.read_csv('../data/partial.csv').drop(['Unnamed: 0'], axis=1)\n avg_partial = pd.DataFrame()\n length = partial.iloc[0, :].shape[0]\n c = 0\n for index, row in partial.iterrows():\n print(index)\n scores = row.iloc[1:length]\n avg = np.mean(scores)\n av = 0\n if avg < 0.05:\n av = 0\n c += 1\n elif avg < 0.1:\n av = 1\n elif avg < 0.3:\n av = 2\n elif avg < 0.5:\n av = 3\n elif avg < 0.7:\n av = 4\n else:\n av = 5\n avg_partial = avg_partial.append([np.append(int(row.iloc[0]), av)])\n print(c / 115863)\n avg_partial.to_csv('../features/labels.csv')\n print(avg_partial)\n return avg_partial\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\ndef merge_data(group, comments):\n data = group.merge(comments, left_on='0', right_on=0, how='inner')\n data.columns = ['rev_id', 'label', '0', 'words']\n data = data.drop(data.columns[2], axis=1)\n with open('../data/data.txt', 'wb') as fp:\n pickle.dump(data, fp)\n return data\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\ndef create_basic_data(path):\n data = DataManager.get_data()\n comments = DataManager.get_original_comments()\n basic = data.merge(comments, on='rev_id')\n basic = basic.drop('label', axis=1)\n basic = basic.drop('sample', axis=1)\n basic = basic.drop('split', axis=1)\n revs = []\n for index, row in basic.iterrows():\n if len(row['words']) == 0:\n revs.append(row['rev_id'])\n bsc = pd.DataFrame()\n for index, row in basic.iterrows():\n print(index)\n if row['rev_id'] not in revs:\n bsc = bsc.append(row)\n basic = bsc\n with open(path, 'wb') as fp:\n pickle.dump(basic, fp)\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\nseparate_features()\nsentences_clean()\nrelabel()\ncreate_basic_data('../features/basic_comments.txt')\nclean_new_lines('../features/basic_comments_clean.txt')\ndata = DataManager.get_comments()\nlabels = DataManager.get_labels()\nlabels.columns = ['rev_id', 'label']\ndt = data.merge(labels, how='left', on='rev_id')\ndt = dt.drop('comment', axis=1)\ndt = dt.reset_index()\ndt.to_csv('../features/labels.csv', index=False)\n",
"<import token>\n\n\ndef delete_empty_rows(data):\n i = 0\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print('To delete or not to delete the question is now:', i)\n if len(list(row.iloc[2])) != 0:\n new_data = new_data.append(row)\n i += 1\n print(new_data)\n with open('../data/data_features_clean.txt', 'wb') as fp:\n pickle.dump(new_data, fp)\n return new_data\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\ndef flat_data(data):\n embeddings = data['embedding'].tolist()\n data = data.drop(['embedding'], axis=1)\n data = data.reset_index(drop=True)\n names = list(data)\n embeddings_names = [str('embedding: ' + str(x + 1)) for x in range(0, 100)]\n names[len(names):len(names) + 1] = embeddings_names\n print(names)\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print(index)\n row = pd.DataFrame(np.append(row.values, embeddings[index])).transpose(\n )\n new_data = new_data.append(row)\n new_data = new_data.reset_index(drop=True)\n new_data.columns = names\n print(new_data)\n new_data.to_csv('../features/data_features_clean_flat.csv', header=\n False, mode='a')\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\ndef separate_features():\n data = DataManager.get_data_features()\n print(data)\n basic = data.iloc[:, 1:3]\n data = data.drop(3, axis=1)\n for i in range(4, 527):\n data = data.drop(i, axis=1)\n data = data.drop(1, axis=1)\n embeddings = data\n print('basic', basic)\n print('embeddings', embeddings)\n embeddings.to_csv('../features/embeddings.csv')\n with open('../features/basic.txt', 'wb') as fp:\n pickle.dump(basic, fp)\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\ndef clean_whitespaces(comment):\n comment = re.sub('[^\\\\w\\\\s]{2,}', '', comment)\n comment = re.sub(' [^\\\\w\\\\s] ', ' ', comment)\n comment = re.sub(' {2,}', ' ', comment)\n return comment\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\ndef get_average_scores():\n partial = pd.read_csv('../data/partial.csv').drop(['Unnamed: 0'], axis=1)\n avg_partial = pd.DataFrame()\n length = partial.iloc[0, :].shape[0]\n c = 0\n for index, row in partial.iterrows():\n print(index)\n scores = row.iloc[1:length]\n avg = np.mean(scores)\n av = 0\n if avg < 0.05:\n av = 0\n c += 1\n elif avg < 0.1:\n av = 1\n elif avg < 0.3:\n av = 2\n elif avg < 0.5:\n av = 3\n elif avg < 0.7:\n av = 4\n else:\n av = 5\n avg_partial = avg_partial.append([np.append(int(row.iloc[0]), av)])\n print(c / 115863)\n avg_partial.to_csv('../features/labels.csv')\n print(avg_partial)\n return avg_partial\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\ndef merge_data(group, comments):\n data = group.merge(comments, left_on='0', right_on=0, how='inner')\n data.columns = ['rev_id', 'label', '0', 'words']\n data = data.drop(data.columns[2], axis=1)\n with open('../data/data.txt', 'wb') as fp:\n pickle.dump(data, fp)\n return data\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\ndef create_basic_data(path):\n data = DataManager.get_data()\n comments = DataManager.get_original_comments()\n basic = data.merge(comments, on='rev_id')\n basic = basic.drop('label', axis=1)\n basic = basic.drop('sample', axis=1)\n basic = basic.drop('split', axis=1)\n revs = []\n for index, row in basic.iterrows():\n if len(row['words']) == 0:\n revs.append(row['rev_id'])\n bsc = pd.DataFrame()\n for index, row in basic.iterrows():\n print(index)\n if row['rev_id'] not in revs:\n bsc = bsc.append(row)\n basic = bsc\n with open(path, 'wb') as fp:\n pickle.dump(basic, fp)\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\nseparate_features()\nsentences_clean()\nrelabel()\ncreate_basic_data('../features/basic_comments.txt')\nclean_new_lines('../features/basic_comments_clean.txt')\n<assignment token>\ndt.to_csv('../features/labels.csv', index=False)\n",
"<import token>\n\n\ndef delete_empty_rows(data):\n i = 0\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print('To delete or not to delete the question is now:', i)\n if len(list(row.iloc[2])) != 0:\n new_data = new_data.append(row)\n i += 1\n print(new_data)\n with open('../data/data_features_clean.txt', 'wb') as fp:\n pickle.dump(new_data, fp)\n return new_data\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\ndef flat_data(data):\n embeddings = data['embedding'].tolist()\n data = data.drop(['embedding'], axis=1)\n data = data.reset_index(drop=True)\n names = list(data)\n embeddings_names = [str('embedding: ' + str(x + 1)) for x in range(0, 100)]\n names[len(names):len(names) + 1] = embeddings_names\n print(names)\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print(index)\n row = pd.DataFrame(np.append(row.values, embeddings[index])).transpose(\n )\n new_data = new_data.append(row)\n new_data = new_data.reset_index(drop=True)\n new_data.columns = names\n print(new_data)\n new_data.to_csv('../features/data_features_clean_flat.csv', header=\n False, mode='a')\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\ndef separate_features():\n data = DataManager.get_data_features()\n print(data)\n basic = data.iloc[:, 1:3]\n data = data.drop(3, axis=1)\n for i in range(4, 527):\n data = data.drop(i, axis=1)\n data = data.drop(1, axis=1)\n embeddings = data\n print('basic', basic)\n print('embeddings', embeddings)\n embeddings.to_csv('../features/embeddings.csv')\n with open('../features/basic.txt', 'wb') as fp:\n pickle.dump(basic, fp)\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\ndef clean_whitespaces(comment):\n comment = re.sub('[^\\\\w\\\\s]{2,}', '', comment)\n comment = re.sub(' [^\\\\w\\\\s] ', ' ', comment)\n comment = re.sub(' {2,}', ' ', comment)\n return comment\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\ndef get_average_scores():\n partial = pd.read_csv('../data/partial.csv').drop(['Unnamed: 0'], axis=1)\n avg_partial = pd.DataFrame()\n length = partial.iloc[0, :].shape[0]\n c = 0\n for index, row in partial.iterrows():\n print(index)\n scores = row.iloc[1:length]\n avg = np.mean(scores)\n av = 0\n if avg < 0.05:\n av = 0\n c += 1\n elif avg < 0.1:\n av = 1\n elif avg < 0.3:\n av = 2\n elif avg < 0.5:\n av = 3\n elif avg < 0.7:\n av = 4\n else:\n av = 5\n avg_partial = avg_partial.append([np.append(int(row.iloc[0]), av)])\n print(c / 115863)\n avg_partial.to_csv('../features/labels.csv')\n print(avg_partial)\n return avg_partial\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\ndef merge_data(group, comments):\n data = group.merge(comments, left_on='0', right_on=0, how='inner')\n data.columns = ['rev_id', 'label', '0', 'words']\n data = data.drop(data.columns[2], axis=1)\n with open('../data/data.txt', 'wb') as fp:\n pickle.dump(data, fp)\n return data\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\ndef create_basic_data(path):\n data = DataManager.get_data()\n comments = DataManager.get_original_comments()\n basic = data.merge(comments, on='rev_id')\n basic = basic.drop('label', axis=1)\n basic = basic.drop('sample', axis=1)\n basic = basic.drop('split', axis=1)\n revs = []\n for index, row in basic.iterrows():\n if len(row['words']) == 0:\n revs.append(row['rev_id'])\n bsc = pd.DataFrame()\n for index, row in basic.iterrows():\n print(index)\n if row['rev_id'] not in revs:\n bsc = bsc.append(row)\n basic = bsc\n with open(path, 'wb') as fp:\n pickle.dump(basic, fp)\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef delete_empty_rows(data):\n i = 0\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print('To delete or not to delete the question is now:', i)\n if len(list(row.iloc[2])) != 0:\n new_data = new_data.append(row)\n i += 1\n print(new_data)\n with open('../data/data_features_clean.txt', 'wb') as fp:\n pickle.dump(new_data, fp)\n return new_data\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\ndef separate_features():\n data = DataManager.get_data_features()\n print(data)\n basic = data.iloc[:, 1:3]\n data = data.drop(3, axis=1)\n for i in range(4, 527):\n data = data.drop(i, axis=1)\n data = data.drop(1, axis=1)\n embeddings = data\n print('basic', basic)\n print('embeddings', embeddings)\n embeddings.to_csv('../features/embeddings.csv')\n with open('../features/basic.txt', 'wb') as fp:\n pickle.dump(basic, fp)\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\ndef clean_whitespaces(comment):\n comment = re.sub('[^\\\\w\\\\s]{2,}', '', comment)\n comment = re.sub(' [^\\\\w\\\\s] ', ' ', comment)\n comment = re.sub(' {2,}', ' ', comment)\n return comment\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\ndef get_average_scores():\n partial = pd.read_csv('../data/partial.csv').drop(['Unnamed: 0'], axis=1)\n avg_partial = pd.DataFrame()\n length = partial.iloc[0, :].shape[0]\n c = 0\n for index, row in partial.iterrows():\n print(index)\n scores = row.iloc[1:length]\n avg = np.mean(scores)\n av = 0\n if avg < 0.05:\n av = 0\n c += 1\n elif avg < 0.1:\n av = 1\n elif avg < 0.3:\n av = 2\n elif avg < 0.5:\n av = 3\n elif avg < 0.7:\n av = 4\n else:\n av = 5\n avg_partial = avg_partial.append([np.append(int(row.iloc[0]), av)])\n print(c / 115863)\n avg_partial.to_csv('../features/labels.csv')\n print(avg_partial)\n return avg_partial\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\ndef merge_data(group, comments):\n data = group.merge(comments, left_on='0', right_on=0, how='inner')\n data.columns = ['rev_id', 'label', '0', 'words']\n data = data.drop(data.columns[2], axis=1)\n with open('../data/data.txt', 'wb') as fp:\n pickle.dump(data, fp)\n return data\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\ndef create_basic_data(path):\n data = DataManager.get_data()\n comments = DataManager.get_original_comments()\n basic = data.merge(comments, on='rev_id')\n basic = basic.drop('label', axis=1)\n basic = basic.drop('sample', axis=1)\n basic = basic.drop('split', axis=1)\n revs = []\n for index, row in basic.iterrows():\n if len(row['words']) == 0:\n revs.append(row['rev_id'])\n bsc = pd.DataFrame()\n for index, row in basic.iterrows():\n print(index)\n if row['rev_id'] not in revs:\n bsc = bsc.append(row)\n basic = bsc\n with open(path, 'wb') as fp:\n pickle.dump(basic, fp)\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef delete_empty_rows(data):\n i = 0\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print('To delete or not to delete the question is now:', i)\n if len(list(row.iloc[2])) != 0:\n new_data = new_data.append(row)\n i += 1\n print(new_data)\n with open('../data/data_features_clean.txt', 'wb') as fp:\n pickle.dump(new_data, fp)\n return new_data\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\ndef separate_features():\n data = DataManager.get_data_features()\n print(data)\n basic = data.iloc[:, 1:3]\n data = data.drop(3, axis=1)\n for i in range(4, 527):\n data = data.drop(i, axis=1)\n data = data.drop(1, axis=1)\n embeddings = data\n print('basic', basic)\n print('embeddings', embeddings)\n embeddings.to_csv('../features/embeddings.csv')\n with open('../features/basic.txt', 'wb') as fp:\n pickle.dump(basic, fp)\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\ndef clean_whitespaces(comment):\n comment = re.sub('[^\\\\w\\\\s]{2,}', '', comment)\n comment = re.sub(' [^\\\\w\\\\s] ', ' ', comment)\n comment = re.sub(' {2,}', ' ', comment)\n return comment\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\ndef get_average_scores():\n partial = pd.read_csv('../data/partial.csv').drop(['Unnamed: 0'], axis=1)\n avg_partial = pd.DataFrame()\n length = partial.iloc[0, :].shape[0]\n c = 0\n for index, row in partial.iterrows():\n print(index)\n scores = row.iloc[1:length]\n avg = np.mean(scores)\n av = 0\n if avg < 0.05:\n av = 0\n c += 1\n elif avg < 0.1:\n av = 1\n elif avg < 0.3:\n av = 2\n elif avg < 0.5:\n av = 3\n elif avg < 0.7:\n av = 4\n else:\n av = 5\n avg_partial = avg_partial.append([np.append(int(row.iloc[0]), av)])\n print(c / 115863)\n avg_partial.to_csv('../features/labels.csv')\n print(avg_partial)\n return avg_partial\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\ndef merge_data(group, comments):\n data = group.merge(comments, left_on='0', right_on=0, how='inner')\n data.columns = ['rev_id', 'label', '0', 'words']\n data = data.drop(data.columns[2], axis=1)\n with open('../data/data.txt', 'wb') as fp:\n pickle.dump(data, fp)\n return data\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef delete_empty_rows(data):\n i = 0\n new_data = pd.DataFrame()\n for index, row in data.iterrows():\n print('To delete or not to delete the question is now:', i)\n if len(list(row.iloc[2])) != 0:\n new_data = new_data.append(row)\n i += 1\n print(new_data)\n with open('../data/data_features_clean.txt', 'wb') as fp:\n pickle.dump(new_data, fp)\n return new_data\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\n<function token>\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\ndef clean_whitespaces(comment):\n comment = re.sub('[^\\\\w\\\\s]{2,}', '', comment)\n comment = re.sub(' [^\\\\w\\\\s] ', ' ', comment)\n comment = re.sub(' {2,}', ' ', comment)\n return comment\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\ndef get_average_scores():\n partial = pd.read_csv('../data/partial.csv').drop(['Unnamed: 0'], axis=1)\n avg_partial = pd.DataFrame()\n length = partial.iloc[0, :].shape[0]\n c = 0\n for index, row in partial.iterrows():\n print(index)\n scores = row.iloc[1:length]\n avg = np.mean(scores)\n av = 0\n if avg < 0.05:\n av = 0\n c += 1\n elif avg < 0.1:\n av = 1\n elif avg < 0.3:\n av = 2\n elif avg < 0.5:\n av = 3\n elif avg < 0.7:\n av = 4\n else:\n av = 5\n avg_partial = avg_partial.append([np.append(int(row.iloc[0]), av)])\n print(c / 115863)\n avg_partial.to_csv('../features/labels.csv')\n print(avg_partial)\n return avg_partial\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\ndef merge_data(group, comments):\n data = group.merge(comments, left_on='0', right_on=0, how='inner')\n data.columns = ['rev_id', 'label', '0', 'words']\n data = data.drop(data.columns[2], axis=1)\n with open('../data/data.txt', 'wb') as fp:\n pickle.dump(data, fp)\n return data\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\n<function token>\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\ndef clean_whitespaces(comment):\n comment = re.sub('[^\\\\w\\\\s]{2,}', '', comment)\n comment = re.sub(' [^\\\\w\\\\s] ', ' ', comment)\n comment = re.sub(' {2,}', ' ', comment)\n return comment\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\ndef get_average_scores():\n partial = pd.read_csv('../data/partial.csv').drop(['Unnamed: 0'], axis=1)\n avg_partial = pd.DataFrame()\n length = partial.iloc[0, :].shape[0]\n c = 0\n for index, row in partial.iterrows():\n print(index)\n scores = row.iloc[1:length]\n avg = np.mean(scores)\n av = 0\n if avg < 0.05:\n av = 0\n c += 1\n elif avg < 0.1:\n av = 1\n elif avg < 0.3:\n av = 2\n elif avg < 0.5:\n av = 3\n elif avg < 0.7:\n av = 4\n else:\n av = 5\n avg_partial = avg_partial.append([np.append(int(row.iloc[0]), av)])\n print(c / 115863)\n avg_partial.to_csv('../features/labels.csv')\n print(avg_partial)\n return avg_partial\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\ndef merge_data(group, comments):\n data = group.merge(comments, left_on='0', right_on=0, how='inner')\n data.columns = ['rev_id', 'label', '0', 'words']\n data = data.drop(data.columns[2], axis=1)\n with open('../data/data.txt', 'wb') as fp:\n pickle.dump(data, fp)\n return data\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\n<function token>\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\ndef clean_whitespaces(comment):\n comment = re.sub('[^\\\\w\\\\s]{2,}', '', comment)\n comment = re.sub(' [^\\\\w\\\\s] ', ' ', comment)\n comment = re.sub(' {2,}', ' ', comment)\n return comment\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\n<function token>\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\ndef merge_data(group, comments):\n data = group.merge(comments, left_on='0', right_on=0, how='inner')\n data.columns = ['rev_id', 'label', '0', 'words']\n data = data.drop(data.columns[2], axis=1)\n with open('../data/data.txt', 'wb') as fp:\n pickle.dump(data, fp)\n return data\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\n<function token>\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\ndef clean_whitespaces(comment):\n comment = re.sub('[^\\\\w\\\\s]{2,}', '', comment)\n comment = re.sub(' [^\\\\w\\\\s] ', ' ', comment)\n comment = re.sub(' {2,}', ' ', comment)\n return comment\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\n<function token>\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\n<function token>\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n\n\ndef clean_sentences():\n sentences = DataManager.get_sentences()\n i = 0\n new_data = pd.DataFrame()\n for index, row in sentences.iterrows():\n if i % 1000 == 0:\n print('To delete or not to delete the question is now:', i)\n if isinstance(row[0], float) and math.isnan(row[0]):\n print('Deleting...', i)\n else:\n new_data = new_data.append(row)\n i += 1\n new_data.to_csv('../data/sentences_clean.csv')\n print(new_data)\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\n<function token>\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\n<function token>\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\n<function token>\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\n<function token>\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\n<function token>\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\n<function token>\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\n<function token>\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\n<function token>\n\n\ndef merge_comments_words():\n basic = DataManager.get_basic()\n comments = DataManager.get_original_comments()\n comments = comments.drop(['year', 'sample', 'split'], axis=1)\n basic_comments = basic.merge(comments, left_on=2, right_on='rev_id')\n labels = basic_comments.iloc[:, 0:2]\n basic_comments = basic_comments.drop(2, axis=1)\n basic_comments = basic_comments.drop(1, axis=1)\n cl = basic_comments.columns.tolist()\n cl[0] = 'comment'\n with open('../features/basic_comments.txt', 'wb') as fp:\n pickle.dump(basic_comments, fp)\n labels = labels[[2, 1]]\n cols = ['rev_id', 'label']\n labels.columns = cols\n pd.DataFrame(labels).to_csv('../features/labels.csv', header=cols)\n\n\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\n<function token>\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\n<function token>\n\n\ndef group_data(labels):\n grouped = labels.groupby('rev_id')\n i = 0\n partial = pd.DataFrame()\n print('Total:', len(grouped))\n for name, group in grouped:\n print(i)\n temp = pd.DataFrame(np.zeros(group.shape[0] + 1))\n temp.iloc[0] = group.iloc[0, 0]\n j = 0\n for index, row in group.iterrows():\n temp.iloc[j + 1] = row[1]\n j += 1\n partial = partial.append(temp.transpose())\n i += 1\n print(partial)\n partial.to_csv('../data/partial.csv')\n\n\n<function token>\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\n<function token>\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef read_data():\n comments = DataManager.get_comments_clean()\n labels = DataManager.get_original_comments()\n labels = labels.drop(['worker_id', 'quoting_attack', 'recipient_attack',\n 'third_party_attack', 'other_attack'], axis=1)\n return labels, comments\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n\n\ndef flat_data_calling():\n for i in range(0, 115):\n print('-----------', i)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:1000 * (i + 1)]\n del data\n flat_data(dt)\n data = DataManager.get_data_features_clean()\n dt = data.iloc[1000 * i:]\n del data\n flat_data(dt)\n\n\n<function token>\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef sentences_clean():\n comments = DataManager.get_comments()\n sentences = DataManager.get_sentences_clean()\n sent = comments.merge(sentences, how='left', left_index=True,\n right_index=True).drop('words', axis=1)\n sent.columns = ['rev_id', 'sentence']\n sent.to_csv('../features/sentences_clean.csv', index=False)\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef get_embeddings():\n data = DataManager.get_data_features_clean()\n data = data[['rev_id', 'label', 'words', 'embedding']]\n data.set_index('rev_id')\n embeddings = data['embedding']\n with open('../features/embeddings.txt', 'wb') as fp:\n pickle.dump(embeddings, fp)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef clean_new_lines(path):\n data = DataManager.get_comments()\n data = data.reset_index(drop=True)\n for index, row in data.iterrows():\n data.iloc[index, 0] = clean_whitespaces(row['comment'])\n if index % 1000 == 0:\n print(index)\n print(row['comment'])\n print(data.iloc[index, 0])\n if index % 10000 == 0:\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n data = data.reset_index(drop=True)\n with open(path, 'wb') as fp:\n pickle.dump(data, fp)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef relabel():\n labels = DataManager.get_labels()\n class_0 = labels[labels['label'] == 0].iloc[0:, :]\n class_2 = labels[labels['label'] == 2].append(labels[labels['label'] == 1]\n ).iloc[0:, :]\n class_2.iloc[:, 2] = 0\n class_3 = labels[labels['label'] == 3].append(labels[labels.label == 4]\n ).append(labels[labels.label == 5]).iloc[0:, :]\n class_3.iloc[:, 2] = 1\n labels = class_0.append(class_2).append(class_3)\n labels = labels.sample(frac=1)\n labels = labels.reset_index().drop('level_0', axis=1).drop('index', axis=1)\n labels.to_csv('../features/labels_2_classes.csv', index=False)\n print('-------------Labels done!++++++++++++++')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,512 |
46266f6b9e567df665fe37fc1bb3425ae7c9c543
|
# coding=utf-8
from trading_future.config_self import *
import pandas as pd
import numpy as np
import re
import datetime
import pymongo
from jqdatasdk import *
from configDB import *
auth(JOINQUANT_USER, JOINQUANT_PW)
class Future:
"""
全系统Future单例,只需创建一次,合约、品种基础信息类
"""
def __init__(self):
self.products = list()
self.products_base_msg = dict()
self.products_symbol_msg = dict()
self.tradedays_msg = dict()
self.main_contract_msg = dict()
self.__get_product_mongomsg()
self.__get_trading_sessions()
self.__get_tradedays()
self.__get_main_contract()
def get_VolumeMultiple(self, contract_lst=None):
"""
获取合约单位
"""
info_lst = ['VolumeMultiple']
dict_all = dict()
contract_lst = [i.upper() for i in contract_lst]
for contract in contract_lst:
contract_temp = contract
if len(contract) > 3:
contract_temp = contract[:-4]
dict_all[contract] = {i: self.products_base_msg[contract_temp][i] for i in info_lst}
return dict_all
def get_PriceTick(self, contract_lst=None):
"""
获取交易所名称
"""
info_lst = ['PriceTick']
dict_all = dict()
contract_lst = [i.upper() for i in contract_lst]
for contract in contract_lst:
contract_temp = contract
if len(contract) > 3:
contract_temp = contract[:-4]
dict_all[contract] = {i: self.products_base_msg[contract_temp][i] for i in info_lst}
return dict_all
def get_ExchangeID(self, contract_lst=None):
"""
获取交易所名称
"""
info_lst = ['ExchangeID']
dict_all = dict()
contract_lst = [i.upper() for i in contract_lst]
for contract in contract_lst:
dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]
for i in info_lst}
return dict_all
def get_ExchangeInstID(self, contract_lst=None):
"""
获取交易所名称
"""
info_lst = ['ExchangeInstID']
dict_all = dict()
contract_lst = [i.upper() for i in contract_lst]
for contract in contract_lst:
dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]
for i in info_lst}
return dict_all
def get_LongMarginRatio(self, contract_lst=None):
"""
获取交易所名称
"""
info_lst = ['LongMarginRatio']
dict_all = dict()
contract_lst = [i.upper() for i in contract_lst]
for contract in contract_lst:
dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]
for i in info_lst}
return dict_all
def get_ShortMarginRatio(self, contract_lst=None):
"""
获取交易所名称
"""
info_lst = ['ShortMarginRatio']
dict_all = dict()
contract_lst = [i.upper() for i in contract_lst]
for contract in contract_lst:
dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]
for i in info_lst}
return dict_all
def get_MaxMarketOrderVolume(self, contract_lst=None):
"""
获取交易所名称
"""
info_lst = ['MaxMarketOrderVolume']
dict_all = dict()
contract_lst = [i.upper() for i in contract_lst]
for contract in contract_lst:
dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]
for i in info_lst}
return dict_all
def get_MaxLimitOrderVolume(self, contract_lst=None):
"""
获取交易所名称
"""
info_lst = ['MaxLimitOrderVolume']
dict_all = dict()
contract_lst = [i.upper() for i in contract_lst]
for contract in contract_lst:
dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]
for i in info_lst}
return dict_all
def get_contract_info(self, contract_lst=None, info_lst=None):
"""
获取主力合约
:return:
:info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',
'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',
'ExchangeInstID', 'IsTrading']
"""
dict_all = dict()
contract_lst = [i.upper() for i in contract_lst]
for contract in contract_lst:
dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i] for i in info_lst}
# dict_all[contract]['VolumeMultiple'] = self.products_base_msg[contract[0:-4]][contract]
return dict_all
def __get_product_mongomsg(self):
"""
获取mongo里的product数据
:return:
"""
print(MONGDB_USER,MONGDB_PW,MONGDB_IP)
with pymongo.MongoClient(f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:
col = m_cl['MARKET']['product']
df_product = pd.DataFrame(col.find({'ProductID': {'$regex': '^[a-zA-Z]{1,2}$'}}))
# del df_product['contract']
self.products = list(df_product['ProductID'])
self.products.remove('OI')
df_product.index = df_product['ProductID']
self.products_base_msg = df_product.T.to_dict()
col = m_cl['MARKET']['instruments']
del df_product
df_symbols = pd.DataFrame(col.find({'symbol': {'$regex': '^[a-zA-Z]+[0-9]+$'}, "ASSET_TYPE": 'Future'}))
df_symbols['product'] = df_symbols['symbol'].str.extract(r'(^[a-zA-Z]+)', expand=False).str.upper()
for product, symbols in df_symbols.groupby('product'):
symbols.index = symbols['symbol']
symbols_dict = symbols.T.to_dict()
self.products_symbol_msg[product] = symbols_dict
def __get_main_contract(self):
"""
获取主力合约
:return:
"""
with pymongo.MongoClient(f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:
db = m_cl['MARKET']
for mark in['', '_OI', '_VOL']:
mark = 'main_contract' + mark
col = db[mark]
df = pd.DataFrame(col.find({}, {'_id': 0}))
df = df.set_index('date').sort_index()
df.index = pd.to_datetime(df.index)
self.main_contract_msg[mark] = df
def get_windy_code(self, code):
"""
其他code转windy的code
:param code: 需要转的code
:return:
"""
change_dict = {
"DCE": 'DCE',
"CZCE": 'CZC',
"SHFE": 'SHF',
"INE": 'INE',
"CFFEX": 'CFE'
}
pattern = re.compile(r'^[a-zA-Z]{1,2}')
product = (pattern.match(code)).group(0).upper()
exchange_id = self.products_base_msg[product]['ExchangeID']
if exchange_id is np.NaN:
return
windy_code = code + '.' + change_dict[exchange_id]
return windy_code
def get_jq_code(self, code):
"""
其他code转jq的code
:param code: 需要转的code
:return:
"""
change_dict = {
"DCE": 'XDCE',
"CZCE": 'XZCE',
"SHFE": 'XSGE',
"INE": 'XINE',
"CFFEX": 'CCFX'
}
def get_main_symbol(self, product=None, date=None):
"""
:param product: str 或者list
:param date:
:return:
"""
if product:
product = product if isinstance(product, list) else [product]
date = pd.to_datetime(date) if date else pd.to_datetime(datetime.date.today())
df = {}
for symbol in product:
print(symbol)
df[symbol] = get_dominant_future(symbol, date)[:-5]
return df
def __get_trading_sessions(self):
"""
获取期货历史交易时间窗口
:return:
"""
with pymongo.MongoClient(f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:
col = m_cl['MARKET']['TradingSessions']
trading_sessions_df = pd.DataFrame(col.find())
del trading_sessions_df['_id']
trading_sessions_df['DateRange_Start'] = trading_sessions_df['DateRange_Start'].fillna('1990/01/01')
trading_sessions_df['DateRange_End'] = trading_sessions_df['DateRange_End'].fillna('2099/01/01')
for product in self.products_base_msg.keys():
product_trading_sessions = trading_sessions_df.loc[trading_sessions_df['Market'] == product]
self.products_base_msg[product]['trading_session'] = product_trading_sessions
def get_product_trading_sessions(self, product, date: str = None):
"""
获取交易的时间窗口
:param product: 品种
:param date: 日期, 默认今日,如果是'all',返回全周期的
:return:
"""
trade_sessions = self.products_base_msg[product]['trading_session']
if date != 'all':
date = pd.to_datetime(date) if date else datetime.date.today()
date = date.strftime('%Y/%m/%d')
trade_sessions = trade_sessions.loc[(trade_sessions['DateRange_Start'] <= date) &
(trade_sessions['DateRange_End'] >= date), ]
trade_sessions = trade_sessions
return trade_sessions
def __get_tradedays(self):
change_dict = {
"DCE": 'DCE',
"CZCE": 'CZC',
"SHFE": 'SHF',
"INE": 'SHF',
"CFFEX": 'CFE'
}
with pymongo.MongoClient(f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:
db = m_cl['Tradedays']
for jz_code, mongo_code in change_dict.items():
col = db[mongo_code]
tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))
tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]
tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df['Tradedays'].dt.strftime("%Y-%m-%d %H:%M:%S"))
tradedays_df.drop_duplicates(subset=['Tradedays_str'], inplace=True)
tradedays_df.set_index('Tradedays', inplace=True)
tradedays_df.sort_index(inplace=True)
self.tradedays_msg[jz_code] = tradedays_df
def find_tradeday(self, day: int, date=None, exchange: str = 'DCE'):
"""
根据date查询距此日day天的交易日
:param date: None 默认是今日
:param day: day为0 时为判断今天是否是交易日,返回Bool
:param exchange:
:return: date:str
"""
date = pd.to_datetime(datetime.date.today()) if not date else pd.to_datetime(date).replace(hour=0,
minute=0, second=0)
tradeday_df = self.tradedays_msg[exchange]
if day == 0:
return date.strftime("%Y-%m-%d") in tradeday_df['Tradedays_str'].values
if day > 0:
tradeday_df = tradeday_df.loc[tradeday_df.index > date]
return tradeday_df.iloc[day-1]['Tradedays_str']
if day < 0:
tradeday_df = tradeday_df.loc[tradeday_df.index < date]
return tradeday_df.iloc[day]['Tradedays_str']
def get_limit_position(self, symbols):
"""
获取合约今日的最大持仓限制
:param symbol: jz格式
:return:
"""
symbols = symbols if isinstance(symbols, list) else [symbols]
data_dict = dict()
for symbol in symbols:
pattern = re.compile(r'^[a-zA-Z]{1,2}')
product = (pattern.match(symbol)).group(0).upper()
expireDate = self.products_symbol_msg[product][symbol]['ExpireDate']
exchange_id = self.products_base_msg[product]['ExchangeID']
today = datetime.date.today().strftime('%Y-%m-%d')
if pd.to_datetime(today).strftime('%Y%m%d') >= expireDate:
data_dict[symbol] = 'expired'
elif product in ['SA', 'CF', 'SR', 'TA', 'OI', 'MA', 'FG', 'RM', 'ZC', 'PM', 'WH', 'RS', 'RI', 'JR', 'LR', 'SF',
'SM', 'CY', 'AP']:
expireDate = pd.to_datetime(expireDate)
lst_lst_month_last_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)
).replace(day=1) - datetime.timedelta(days=1)
last_change_day = self.find_tradeday(15, date=lst_lst_month_last_day, exchange=exchange_id) # 交割月前一个月第15个日历日期间的交易日
change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')
if product == 'AP' and symbol[-1] == '7':
product = product + '7'
if today <= last_change_day:
limit_dict = {
'SA': '单边持仓量<20万:20000手, 单边持仓量≥20万:单边持仓量×10%',
'CF': '单边持仓量<15万:15000手,单边持仓量≥15万:单边持仓量×10%',
'SR': '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%',
'TA': '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%',
'OI': '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%',
'MA': '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%',
'FG': '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%',
'RM': '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%',
'ZC': '单边持仓量<60万:60000手,单边持仓量≥60万:单边持仓量×10%',
'PM': 2000, 'WH': 2500, 'RS': 10000, 'RI': 7500, 'JR': 20000,
'LR': 20000, 'SF': 8000, 'SM': 30000, 'CY': 5000, 'AP': 500, 'AP7': 100
}
elif change_day > today > last_change_day:
limit_dict = {'SA': 4000,
'CF': 4000, 'SR': 5000, 'TA': 10000, 'OI': 3000, 'MA': 2000, 'FG': 5000,
'RM': 2000, 'ZC': 20000, 'PM': 600, 'WH': 1000, 'RS': 1000, 'RI': 2000, 'JR': 3000,
'LR': 3000, 'SF': 2000, 'SM': 10000, 'CY': 500, 'AP': 100, 'AP7': 20
}
else:
limit_dict = {'SA': 800,
'CF': 800, 'SR': 1000, 'TA': 5000, 'OI': 1000, 'MA': 1000, 'FG': 1000,
'RM': 1000, 'ZC': 4000, 'PM': 200, 'WH': 300, 'RS': 500, 'RI': 400, 'JR': 500,
'LR': 500, 'SF': 500, 'SM': 2000, 'CY': 100, 'AP': 10, 'AP7': 6
}
data_dict[symbol] = limit_dict[product]
elif product in ['CJ']:
expireDate = pd.to_datetime(expireDate)
lst_lst_month_last_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)
).replace(day=1) - datetime.timedelta(days=1)
lst_month_fst_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)).replace(day=1)
lst_lst_change_day = self.find_tradeday(-1, date=lst_month_fst_day, exchange=exchange_id)
last_change_day = self.find_tradeday(15, date=lst_lst_month_last_day, exchange=exchange_id)
change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')
if today <= lst_lst_change_day:
limit_dict = {
'CJ': 300
}
elif last_change_day >= today > lst_lst_change_day:
limit_dict = {
'CJ': 60
}
elif change_day > today > last_change_day:
limit_dict = {
'CJ': 20
}
else:
limit_dict = {
'CJ': 6
}
data_dict[symbol] = limit_dict[product]
elif product in ['A', 'V', 'PP', 'C', 'B', 'L', 'P', 'J', 'JM', 'I', 'FB', 'BB', 'CS', 'Y', 'M', 'EG']:
expireDate = pd.to_datetime(expireDate)
lst_lst_month_last_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)
).replace(day=1) - datetime.timedelta(days=1)
last_change_day = self.find_tradeday(14, date=lst_lst_month_last_day,
exchange=exchange_id) # 交割月前一个月第15个日历日期间的交易日
change_day = self.find_tradeday(-1, date=expireDate.replace(day=1), exchange=exchange_id)
if today <= last_change_day:
limit_dict = {
'A': '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%',
'V': '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%',
'PP': '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%',
'C': '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%',
'B': '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%',
'L': '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%',
'P': '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%',
'J': '单边持仓量<=5万:5000手,单边持仓量>5万:单边持仓量×10%',
'JM': '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%',
'I': '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%',
'FB': '单边持仓量<=16万:16000手,单边持仓量>16万:单边持仓量×10%',
'BB': '单边持仓量<=6万:6000手,单边持仓量>6万:单边持仓量×10%',
'CS': '单边持仓量<=15万:15000手,单边持仓量>15万:单边持仓量×10%',
'Y': '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%',
'M': '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%',
'EG': '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%,'
' 单边持仓>12万:3000手, 提高保证金合约价值×10%',
}
elif change_day >= today > last_change_day:
limit_dict = {
'A': 5000, 'V': 5000, 'PP': 5000, 'C': 15000, 'B': 4500, 'L': 3000, 'P': 1500, 'J': 900,
'JM': 1500, 'I': 6000, 'FB': 400, 'BB': 80, 'CS': 4500, 'Y': 3000, 'M': 7500,
'EG': '3000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '
}
else:
limit_dict = {
'A': 2500, 'V': 2500, 'PP': 2500, 'C': 5000, 'B': 1500, 'L': 1000, 'P': 500, 'J': 300,
'JM': 500, 'I': 2000, 'FB': 100, 'BB': 20, 'CS': 1500, 'Y': 1000, 'M': 2500,
'EG': '1000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '
}
data_dict[symbol] = limit_dict[product]
elif product in ['JD']:
expireDate = pd.to_datetime(expireDate)
lst_lst_month_last_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)
).replace(day=1) - datetime.timedelta(days=1)
lst_lst_change_day = self.find_tradeday(1, date=lst_lst_month_last_day, exchange=exchange_id)
last_change_day = self.find_tradeday(10, date=lst_lst_month_last_day, exchange=exchange_id)
change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')
if today < lst_lst_change_day:
limit_dict = {
'JD': 1200
}
elif last_change_day > today >= lst_lst_change_day:
limit_dict = {
'JD': 400
}
elif change_day > today >= last_change_day:
limit_dict = {
'JD': 120
}
else:
limit_dict = {
'JD': 20
}
data_dict[symbol] = limit_dict[product]
elif product in ['CU', 'AL', 'ZN', 'PB', 'NI', 'SN', 'RB', 'WR', 'HC', 'RU', 'BU', 'AU', 'AG', 'SP']:
expireDate = pd.to_datetime(expireDate)
lst_month_fst_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)).replace(day=1)
last_change_day = self.find_tradeday(-1, date=lst_month_fst_day, exchange=exchange_id) # 合约挂牌至交割月前第二月的最后一个交易日
change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')
if today <= last_change_day:
limit_dict = {
'CU': '单边持仓量<16万:8000, 单边持仓量>=16万:单边持仓量×10%',
'AL': '单边持仓量<20万:10000, 单边持仓量>=20万:单边持仓量×10%',
'ZN': '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%',
'PB': '单边持仓量<10万:5000, 单边持仓量>=10万:单边持仓量×10%',
'NI': '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%',
'SN': '单边持仓量<3万:1500, 单边持仓量>=3万:单边持仓量×10%',
'RB': '单边持仓量<180万:90000, 单边持仓量>=180万:单边持仓量×10%',
'WR': '单边持仓量<45万:22500, 单边持仓量>=45万:单边持仓量×10%',
'HC': '单边持仓量<240万:120000, 单边持仓量>=240万:单边持仓量×10%',
'RU': 500, 'BU': 8000, 'AU': '非期货公司会员:18000, 客户:9000',
'AG': '非期货公司会员:18000手, 客户:9000手', 'SP': 4500
}
elif change_day > today > last_change_day:
limit_dict = {
'CU': 3000, 'AL': 3000, 'ZN': 2400, 'PB': 1800, 'NI': 1800, 'SN': 600, 'RB': 4500, 'WR': 1800,
'HC': 9000, 'RU': 150, 'BU': 1500, 'AU': '非期货公司会员:5400, 客户:2700',
'AG': '非期货公司会员:5400手, 客户:2700手', 'SP': 900
}
else:
limit_dict = {
'CU': 1000, 'AL': 1000, 'ZN': 800, 'PB': 600, 'NI': 600, 'SN': 200, 'RB': 900, 'WR': 360,
'HC': 1800, 'RU': 50, 'BU': 500, 'AU': '非期货公司会员:1800手, 客户:900手',
'AG': '非期货公司会员:1800手, 客户:900手', 'SP': 300
}
data_dict[symbol] = limit_dict[product]
elif product in ['FU', 'SC']:
expireDate = pd.to_datetime(expireDate)
last_change_day = ((expireDate.replace(day=1) -
datetime.timedelta(days=1)).replace(day=1)
- datetime.timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')
change_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')
if today < last_change_day:
limit_dict = {
'FU': 7500, 'SC': 3000
}
elif change_day > today >= last_change_day:
limit_dict = {
'FU': 1500, 'SC': 1500
}
else:
limit_dict = {
'FU': 500, 'SC': 500
}
data_dict[symbol] = limit_dict[product]
elif product in ['TF', 'TS', 'T']:
expireDate = pd.to_datetime(expireDate)
change_day = self.find_tradeday(-1, date=expireDate.replace(day=1), exchange=exchange_id)
if today < change_day:
limit_dict = {
'TF': 2000, 'TS': 2000, 'T': 2000
}
else:
limit_dict = {
'TF': 600, 'TS': 600, 'T': 600
}
data_dict[symbol] = limit_dict[product]
return data_dict
# future = Future()
# print(future.get_main_symbol(product=['A', 'B']))
|
[
"# coding=utf-8\nfrom trading_future.config_self import *\nimport pandas as pd\nimport numpy as np\nimport re\nimport datetime\nimport pymongo\nfrom jqdatasdk import *\nfrom configDB import *\nauth(JOINQUANT_USER, JOINQUANT_PW)\n\n\nclass Future:\n \"\"\"\n 全系统Future单例,只需创建一次,合约、品种基础信息类\n \"\"\"\n def __init__(self):\n self.products = list()\n self.products_base_msg = dict()\n self.products_symbol_msg = dict()\n self.tradedays_msg = dict()\n self.main_contract_msg = dict()\n self.__get_product_mongomsg()\n self.__get_trading_sessions()\n self.__get_tradedays()\n self.__get_main_contract()\n\n def get_VolumeMultiple(self, contract_lst=None):\n \"\"\"\n 获取合约单位\n \"\"\"\n info_lst = ['VolumeMultiple']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][i] for i in info_lst}\n return dict_all\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][i] for i in info_lst}\n\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]\n for i in info_lst}\n return dict_all\n\n def get_ExchangeInstID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeInstID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]\n for i in info_lst}\n return dict_all\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]\n for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]\n for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]\n for i in info_lst}\n return dict_all\n\n\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i]\n for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.split(r'[^A-Za-z]', contract))][contract][i] for i in info_lst}\n # dict_all[contract]['VolumeMultiple'] = self.products_base_msg[contract[0:-4]][contract]\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER,MONGDB_PW,MONGDB_IP)\n with pymongo.MongoClient(f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex': '^[a-zA-Z]{1,2}$'}}))\n # del df_product['contract']\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex': '^[a-zA-Z]+[0-9]+$'}, \"ASSET_TYPE\": 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(r'(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n\n def __get_main_contract(self):\n \"\"\"\n 获取主力合约\n :return:\n \"\"\"\n with pymongo.MongoClient(f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['MARKET']\n for mark in['', '_OI', '_VOL']:\n mark = 'main_contract' + mark\n col = db[mark]\n df = pd.DataFrame(col.find({}, {'_id': 0}))\n df = df.set_index('date').sort_index()\n df.index = pd.to_datetime(df.index)\n self.main_contract_msg[mark] = df\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {\n \"DCE\": 'DCE',\n \"CZCE\": 'CZC',\n \"SHFE\": 'SHF',\n \"INE\": 'INE',\n \"CFFEX\": 'CFE'\n }\n pattern = re.compile(r'^[a-zA-Z]{1,2}')\n product = (pattern.match(code)).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {\n \"DCE\": 'XDCE',\n \"CZCE\": 'XZCE',\n \"SHFE\": 'XSGE',\n \"INE\": 'XINE',\n \"CFFEX\": 'CCFX'\n }\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.date.today())\n\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n\n return df\n\n def __get_trading_sessions(self):\n \"\"\"\n 获取期货历史交易时间窗口\n :return:\n \"\"\"\n with pymongo.MongoClient(f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['TradingSessions']\n trading_sessions_df = pd.DataFrame(col.find())\n del trading_sessions_df['_id']\n trading_sessions_df['DateRange_Start'] = trading_sessions_df['DateRange_Start'].fillna('1990/01/01')\n trading_sessions_df['DateRange_End'] = trading_sessions_df['DateRange_End'].fillna('2099/01/01')\n for product in self.products_base_msg.keys():\n product_trading_sessions = trading_sessions_df.loc[trading_sessions_df['Market'] == product]\n self.products_base_msg[product]['trading_session'] = product_trading_sessions\n\n def get_product_trading_sessions(self, product, date: str = None):\n \"\"\"\n 获取交易的时间窗口\n :param product: 品种\n :param date: 日期, 默认今日,如果是'all',返回全周期的\n :return:\n \"\"\"\n trade_sessions = self.products_base_msg[product]['trading_session']\n if date != 'all':\n date = pd.to_datetime(date) if date else datetime.date.today()\n date = date.strftime('%Y/%m/%d')\n\n trade_sessions = trade_sessions.loc[(trade_sessions['DateRange_Start'] <= date) &\n (trade_sessions['DateRange_End'] >= date), ]\n trade_sessions = trade_sessions\n return trade_sessions\n\n def __get_tradedays(self):\n change_dict = {\n \"DCE\": 'DCE',\n \"CZCE\": 'CZC',\n \"SHFE\": 'SHF',\n \"INE\": 'SHF',\n \"CFFEX\": 'CFE'\n }\n\n with pymongo.MongoClient(f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df['Tradedays'].dt.strftime(\"%Y-%m-%d %H:%M:%S\"))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'], inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str = 'DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n\n date = pd.to_datetime(datetime.date.today()) if not date else pd.to_datetime(date).replace(hour=0,\n minute=0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime(\"%Y-%m-%d\") in tradeday_df['Tradedays_str'].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day-1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n\n def get_limit_position(self, symbols):\n \"\"\"\n 获取合约今日的最大持仓限制\n :param symbol: jz格式\n :return:\n \"\"\"\n symbols = symbols if isinstance(symbols, list) else [symbols]\n data_dict = dict()\n for symbol in symbols:\n pattern = re.compile(r'^[a-zA-Z]{1,2}')\n product = (pattern.match(symbol)).group(0).upper()\n expireDate = self.products_symbol_msg[product][symbol]['ExpireDate']\n exchange_id = self.products_base_msg[product]['ExchangeID']\n today = datetime.date.today().strftime('%Y-%m-%d')\n if pd.to_datetime(today).strftime('%Y%m%d') >= expireDate:\n data_dict[symbol] = 'expired'\n elif product in ['SA', 'CF', 'SR', 'TA', 'OI', 'MA', 'FG', 'RM', 'ZC', 'PM', 'WH', 'RS', 'RI', 'JR', 'LR', 'SF',\n 'SM', 'CY', 'AP']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)\n ).replace(day=1) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(15, date=lst_lst_month_last_day, exchange=exchange_id) # 交割月前一个月第15个日历日期间的交易日\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if product == 'AP' and symbol[-1] == '7':\n product = product + '7'\n if today <= last_change_day:\n limit_dict = {\n 'SA': '单边持仓量<20万:20000手, 单边持仓量≥20万:单边持仓量×10%',\n 'CF': '单边持仓量<15万:15000手,单边持仓量≥15万:单边持仓量×10%',\n 'SR': '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%',\n 'TA': '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%',\n 'OI': '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%',\n 'MA': '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%',\n 'FG': '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%',\n 'RM': '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%',\n 'ZC': '单边持仓量<60万:60000手,单边持仓量≥60万:单边持仓量×10%',\n 'PM': 2000, 'WH': 2500, 'RS': 10000, 'RI': 7500, 'JR': 20000,\n 'LR': 20000, 'SF': 8000, 'SM': 30000, 'CY': 5000, 'AP': 500, 'AP7': 100\n }\n\n elif change_day > today > last_change_day:\n limit_dict = {'SA': 4000,\n 'CF': 4000, 'SR': 5000, 'TA': 10000, 'OI': 3000, 'MA': 2000, 'FG': 5000,\n 'RM': 2000, 'ZC': 20000, 'PM': 600, 'WH': 1000, 'RS': 1000, 'RI': 2000, 'JR': 3000,\n 'LR': 3000, 'SF': 2000, 'SM': 10000, 'CY': 500, 'AP': 100, 'AP7': 20\n }\n else:\n limit_dict = {'SA': 800,\n 'CF': 800, 'SR': 1000, 'TA': 5000, 'OI': 1000, 'MA': 1000, 'FG': 1000,\n 'RM': 1000, 'ZC': 4000, 'PM': 200, 'WH': 300, 'RS': 500, 'RI': 400, 'JR': 500,\n 'LR': 500, 'SF': 500, 'SM': 2000, 'CY': 100, 'AP': 10, 'AP7': 6\n }\n data_dict[symbol] = limit_dict[product]\n elif product in ['CJ']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)\n ).replace(day=1) - datetime.timedelta(days=1)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)).replace(day=1)\n lst_lst_change_day = self.find_tradeday(-1, date=lst_month_fst_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(15, date=lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= lst_lst_change_day:\n limit_dict = {\n 'CJ': 300\n }\n\n elif last_change_day >= today > lst_lst_change_day:\n limit_dict = {\n 'CJ': 60\n }\n elif change_day > today > last_change_day:\n limit_dict = {\n 'CJ': 20\n }\n else:\n limit_dict = {\n 'CJ': 6\n }\n data_dict[symbol] = limit_dict[product]\n elif product in ['A', 'V', 'PP', 'C', 'B', 'L', 'P', 'J', 'JM', 'I', 'FB', 'BB', 'CS', 'Y', 'M', 'EG']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)\n ).replace(day=1) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(14, date=lst_lst_month_last_day,\n exchange=exchange_id) # 交割月前一个月第15个日历日期间的交易日\n change_day = self.find_tradeday(-1, date=expireDate.replace(day=1), exchange=exchange_id)\n if today <= last_change_day:\n limit_dict = {\n 'A': '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%',\n 'V': '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%',\n 'PP': '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%',\n 'C': '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%',\n 'B': '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%',\n 'L': '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%',\n 'P': '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%',\n 'J': '单边持仓量<=5万:5000手,单边持仓量>5万:单边持仓量×10%',\n 'JM': '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%',\n 'I': '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%',\n 'FB': '单边持仓量<=16万:16000手,单边持仓量>16万:单边持仓量×10%',\n 'BB': '单边持仓量<=6万:6000手,单边持仓量>6万:单边持仓量×10%',\n 'CS': '单边持仓量<=15万:15000手,单边持仓量>15万:单边持仓量×10%',\n 'Y': '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%',\n 'M': '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%',\n 'EG': '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%,'\n ' 单边持仓>12万:3000手, 提高保证金合约价值×10%',\n }\n\n elif change_day >= today > last_change_day:\n limit_dict = {\n 'A': 5000, 'V': 5000, 'PP': 5000, 'C': 15000, 'B': 4500, 'L': 3000, 'P': 1500, 'J': 900,\n 'JM': 1500, 'I': 6000, 'FB': 400, 'BB': 80, 'CS': 4500, 'Y': 3000, 'M': 7500,\n 'EG': '3000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '\n }\n else:\n limit_dict = {\n 'A': 2500, 'V': 2500, 'PP': 2500, 'C': 5000, 'B': 1500, 'L': 1000, 'P': 500, 'J': 300,\n 'JM': 500, 'I': 2000, 'FB': 100, 'BB': 20, 'CS': 1500, 'Y': 1000, 'M': 2500,\n 'EG': '1000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '\n }\n data_dict[symbol] = limit_dict[product]\n elif product in ['JD']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)\n ).replace(day=1) - datetime.timedelta(days=1)\n lst_lst_change_day = self.find_tradeday(1, date=lst_lst_month_last_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(10, date=lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today < lst_lst_change_day:\n limit_dict = {\n 'JD': 1200\n }\n\n elif last_change_day > today >= lst_lst_change_day:\n limit_dict = {\n 'JD': 400\n }\n elif change_day > today >= last_change_day:\n limit_dict = {\n 'JD': 120\n }\n else:\n limit_dict = {\n 'JD': 20\n }\n data_dict[symbol] = limit_dict[product]\n elif product in ['CU', 'AL', 'ZN', 'PB', 'NI', 'SN', 'RB', 'WR', 'HC', 'RU', 'BU', 'AU', 'AG', 'SP']:\n expireDate = pd.to_datetime(expireDate)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)).replace(day=1)\n last_change_day = self.find_tradeday(-1, date=lst_month_fst_day, exchange=exchange_id) # 合约挂牌至交割月前第二月的最后一个交易日\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= last_change_day:\n limit_dict = {\n 'CU': '单边持仓量<16万:8000, 单边持仓量>=16万:单边持仓量×10%',\n 'AL': '单边持仓量<20万:10000, 单边持仓量>=20万:单边持仓量×10%',\n 'ZN': '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%',\n 'PB': '单边持仓量<10万:5000, 单边持仓量>=10万:单边持仓量×10%',\n 'NI': '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%',\n 'SN': '单边持仓量<3万:1500, 单边持仓量>=3万:单边持仓量×10%',\n 'RB': '单边持仓量<180万:90000, 单边持仓量>=180万:单边持仓量×10%',\n 'WR': '单边持仓量<45万:22500, 单边持仓量>=45万:单边持仓量×10%',\n 'HC': '单边持仓量<240万:120000, 单边持仓量>=240万:单边持仓量×10%',\n 'RU': 500, 'BU': 8000, 'AU': '非期货公司会员:18000, 客户:9000',\n 'AG': '非期货公司会员:18000手, 客户:9000手', 'SP': 4500\n }\n\n elif change_day > today > last_change_day:\n limit_dict = {\n 'CU': 3000, 'AL': 3000, 'ZN': 2400, 'PB': 1800, 'NI': 1800, 'SN': 600, 'RB': 4500, 'WR': 1800,\n 'HC': 9000, 'RU': 150, 'BU': 1500, 'AU': '非期货公司会员:5400, 客户:2700',\n 'AG': '非期货公司会员:5400手, 客户:2700手', 'SP': 900\n }\n else:\n limit_dict = {\n 'CU': 1000, 'AL': 1000, 'ZN': 800, 'PB': 600, 'NI': 600, 'SN': 200, 'RB': 900, 'WR': 360,\n 'HC': 1800, 'RU': 50, 'BU': 500, 'AU': '非期货公司会员:1800手, 客户:900手',\n 'AG': '非期货公司会员:1800手, 客户:900手', 'SP': 300\n }\n data_dict[symbol] = limit_dict[product]\n elif product in ['FU', 'SC']:\n expireDate = pd.to_datetime(expireDate)\n\n last_change_day = ((expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1)\n - datetime.timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')\n change_day = (expireDate.replace(day=1) - datetime.timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')\n\n if today < last_change_day:\n limit_dict = {\n 'FU': 7500, 'SC': 3000\n }\n\n elif change_day > today >= last_change_day:\n limit_dict = {\n 'FU': 1500, 'SC': 1500\n }\n else:\n limit_dict = {\n 'FU': 500, 'SC': 500\n }\n data_dict[symbol] = limit_dict[product]\n elif product in ['TF', 'TS', 'T']:\n expireDate = pd.to_datetime(expireDate)\n change_day = self.find_tradeday(-1, date=expireDate.replace(day=1), exchange=exchange_id)\n if today < change_day:\n limit_dict = {\n 'TF': 2000, 'TS': 2000, 'T': 2000\n }\n else:\n limit_dict = {\n 'TF': 600, 'TS': 600, 'T': 600\n }\n data_dict[symbol] = limit_dict[product]\n return data_dict\n\n# future = Future()\n# print(future.get_main_symbol(product=['A', 'B']))",
"from trading_future.config_self import *\nimport pandas as pd\nimport numpy as np\nimport re\nimport datetime\nimport pymongo\nfrom jqdatasdk import *\nfrom configDB import *\nauth(JOINQUANT_USER, JOINQUANT_PW)\n\n\nclass Future:\n \"\"\"\n 全系统Future单例,只需创建一次,合约、品种基础信息类\n \"\"\"\n\n def __init__(self):\n self.products = list()\n self.products_base_msg = dict()\n self.products_symbol_msg = dict()\n self.tradedays_msg = dict()\n self.main_contract_msg = dict()\n self.__get_product_mongomsg()\n self.__get_trading_sessions()\n self.__get_tradedays()\n self.__get_main_contract()\n\n def get_VolumeMultiple(self, contract_lst=None):\n \"\"\"\n 获取合约单位\n \"\"\"\n info_lst = ['VolumeMultiple']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ExchangeInstID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeInstID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER, MONGDB_PW, MONGDB_IP)\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex':\n '^[a-zA-Z]{1,2}$'}}))\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex':\n '^[a-zA-Z]+[0-9]+$'}, 'ASSET_TYPE': 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(\n '(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n\n def __get_main_contract(self):\n \"\"\"\n 获取主力合约\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['MARKET']\n for mark in ['', '_OI', '_VOL']:\n mark = 'main_contract' + mark\n col = db[mark]\n df = pd.DataFrame(col.find({}, {'_id': 0}))\n df = df.set_index('date').sort_index()\n df.index = pd.to_datetime(df.index)\n self.main_contract_msg[mark] = df\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n\n def __get_trading_sessions(self):\n \"\"\"\n 获取期货历史交易时间窗口\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['TradingSessions']\n trading_sessions_df = pd.DataFrame(col.find())\n del trading_sessions_df['_id']\n trading_sessions_df['DateRange_Start'] = trading_sessions_df[\n 'DateRange_Start'].fillna('1990/01/01')\n trading_sessions_df['DateRange_End'] = trading_sessions_df[\n 'DateRange_End'].fillna('2099/01/01')\n for product in self.products_base_msg.keys():\n product_trading_sessions = trading_sessions_df.loc[\n trading_sessions_df['Market'] == product]\n self.products_base_msg[product]['trading_session'\n ] = product_trading_sessions\n\n def get_product_trading_sessions(self, product, date: str=None):\n \"\"\"\n 获取交易的时间窗口\n :param product: 品种\n :param date: 日期, 默认今日,如果是'all',返回全周期的\n :return:\n \"\"\"\n trade_sessions = self.products_base_msg[product]['trading_session']\n if date != 'all':\n date = pd.to_datetime(date) if date else datetime.date.today()\n date = date.strftime('%Y/%m/%d')\n trade_sessions = trade_sessions.loc[(trade_sessions[\n 'DateRange_Start'] <= date) & (trade_sessions[\n 'DateRange_End'] >= date),]\n trade_sessions = trade_sessions\n return trade_sessions\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n\n def get_limit_position(self, symbols):\n \"\"\"\n 获取合约今日的最大持仓限制\n :param symbol: jz格式\n :return:\n \"\"\"\n symbols = symbols if isinstance(symbols, list) else [symbols]\n data_dict = dict()\n for symbol in symbols:\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(symbol).group(0).upper()\n expireDate = self.products_symbol_msg[product][symbol]['ExpireDate'\n ]\n exchange_id = self.products_base_msg[product]['ExchangeID']\n today = datetime.date.today().strftime('%Y-%m-%d')\n if pd.to_datetime(today).strftime('%Y%m%d') >= expireDate:\n data_dict[symbol] = 'expired'\n elif product in ['SA', 'CF', 'SR', 'TA', 'OI', 'MA', 'FG', 'RM',\n 'ZC', 'PM', 'WH', 'RS', 'RI', 'JR', 'LR', 'SF', 'SM', 'CY',\n 'AP']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if product == 'AP' and symbol[-1] == '7':\n product = product + '7'\n if today <= last_change_day:\n limit_dict = {'SA':\n '单边持仓量<20万:20000手, 单边持仓量≥20万:单边持仓量×10%', 'CF':\n '单边持仓量<15万:15000手,单边持仓量≥15万:单边持仓量×10%', 'SR':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'TA':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'OI':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'MA':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'FG':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'RM':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'ZC':\n '单边持仓量<60万:60000手,单边持仓量≥60万:单边持仓量×10%', 'PM': 2000,\n 'WH': 2500, 'RS': 10000, 'RI': 7500, 'JR': 20000,\n 'LR': 20000, 'SF': 8000, 'SM': 30000, 'CY': 5000,\n 'AP': 500, 'AP7': 100}\n elif change_day > today > last_change_day:\n limit_dict = {'SA': 4000, 'CF': 4000, 'SR': 5000, 'TA':\n 10000, 'OI': 3000, 'MA': 2000, 'FG': 5000, 'RM': \n 2000, 'ZC': 20000, 'PM': 600, 'WH': 1000, 'RS': \n 1000, 'RI': 2000, 'JR': 3000, 'LR': 3000, 'SF': \n 2000, 'SM': 10000, 'CY': 500, 'AP': 100, 'AP7': 20}\n else:\n limit_dict = {'SA': 800, 'CF': 800, 'SR': 1000, 'TA': \n 5000, 'OI': 1000, 'MA': 1000, 'FG': 1000, 'RM': \n 1000, 'ZC': 4000, 'PM': 200, 'WH': 300, 'RS': 500,\n 'RI': 400, 'JR': 500, 'LR': 500, 'SF': 500, 'SM': \n 2000, 'CY': 100, 'AP': 10, 'AP7': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CJ']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n lst_lst_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= lst_lst_change_day:\n limit_dict = {'CJ': 300}\n elif last_change_day >= today > lst_lst_change_day:\n limit_dict = {'CJ': 60}\n elif change_day > today > last_change_day:\n limit_dict = {'CJ': 20}\n else:\n limit_dict = {'CJ': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['A', 'V', 'PP', 'C', 'B', 'L', 'P', 'J', 'JM',\n 'I', 'FB', 'BB', 'CS', 'Y', 'M', 'EG']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(14, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today <= last_change_day:\n limit_dict = {'A':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'V':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'PP':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'C':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'B':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'L':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'P':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'J':\n '单边持仓量<=5万:5000手,单边持仓量>5万:单边持仓量×10%', 'JM':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%', 'I':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'FB':\n '单边持仓量<=16万:16000手,单边持仓量>16万:单边持仓量×10%', 'BB':\n '单边持仓量<=6万:6000手,单边持仓量>6万:单边持仓量×10%', 'CS':\n '单边持仓量<=15万:15000手,单边持仓量>15万:单边持仓量×10%', 'Y':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'M':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'EG':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%, 单边持仓>12万:3000手, 提高保证金合约价值×10%'\n }\n elif change_day >= today > last_change_day:\n limit_dict = {'A': 5000, 'V': 5000, 'PP': 5000, 'C': \n 15000, 'B': 4500, 'L': 3000, 'P': 1500, 'J': 900,\n 'JM': 1500, 'I': 6000, 'FB': 400, 'BB': 80, 'CS': \n 4500, 'Y': 3000, 'M': 7500, 'EG':\n '3000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n else:\n limit_dict = {'A': 2500, 'V': 2500, 'PP': 2500, 'C': \n 5000, 'B': 1500, 'L': 1000, 'P': 500, 'J': 300,\n 'JM': 500, 'I': 2000, 'FB': 100, 'BB': 20, 'CS': \n 1500, 'Y': 1000, 'M': 2500, 'EG':\n '1000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n data_dict[symbol] = limit_dict[product]\n elif product in ['JD']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_lst_change_day = self.find_tradeday(1, date=\n lst_lst_month_last_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(10, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today < lst_lst_change_day:\n limit_dict = {'JD': 1200}\n elif last_change_day > today >= lst_lst_change_day:\n limit_dict = {'JD': 400}\n elif change_day > today >= last_change_day:\n limit_dict = {'JD': 120}\n else:\n limit_dict = {'JD': 20}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CU', 'AL', 'ZN', 'PB', 'NI', 'SN', 'RB', 'WR',\n 'HC', 'RU', 'BU', 'AU', 'AG', 'SP']:\n expireDate = pd.to_datetime(expireDate)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n last_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= last_change_day:\n limit_dict = {'CU':\n '单边持仓量<16万:8000, 单边持仓量>=16万:单边持仓量×10%', 'AL':\n '单边持仓量<20万:10000, 单边持仓量>=20万:单边持仓量×10%', 'ZN':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'PB':\n '单边持仓量<10万:5000, 单边持仓量>=10万:单边持仓量×10%', 'NI':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'SN':\n '单边持仓量<3万:1500, 单边持仓量>=3万:单边持仓量×10%', 'RB':\n '单边持仓量<180万:90000, 单边持仓量>=180万:单边持仓量×10%', 'WR':\n '单边持仓量<45万:22500, 单边持仓量>=45万:单边持仓量×10%', 'HC':\n '单边持仓量<240万:120000, 单边持仓量>=240万:单边持仓量×10%', 'RU': \n 500, 'BU': 8000, 'AU': '非期货公司会员:18000, 客户:9000',\n 'AG': '非期货公司会员:18000手, 客户:9000手', 'SP': 4500}\n elif change_day > today > last_change_day:\n limit_dict = {'CU': 3000, 'AL': 3000, 'ZN': 2400, 'PB':\n 1800, 'NI': 1800, 'SN': 600, 'RB': 4500, 'WR': 1800,\n 'HC': 9000, 'RU': 150, 'BU': 1500, 'AU':\n '非期货公司会员:5400, 客户:2700', 'AG':\n '非期货公司会员:5400手, 客户:2700手', 'SP': 900}\n else:\n limit_dict = {'CU': 1000, 'AL': 1000, 'ZN': 800, 'PB': \n 600, 'NI': 600, 'SN': 200, 'RB': 900, 'WR': 360,\n 'HC': 1800, 'RU': 50, 'BU': 500, 'AU':\n '非期货公司会员:1800手, 客户:900手', 'AG':\n '非期货公司会员:1800手, 客户:900手', 'SP': 300}\n data_dict[symbol] = limit_dict[product]\n elif product in ['FU', 'SC']:\n expireDate = pd.to_datetime(expireDate)\n last_change_day = ((expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1) - datetime.timedelta(\n days=1)).replace(day=1).strftime('%Y-%m-%d')\n change_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')\n if today < last_change_day:\n limit_dict = {'FU': 7500, 'SC': 3000}\n elif change_day > today >= last_change_day:\n limit_dict = {'FU': 1500, 'SC': 1500}\n else:\n limit_dict = {'FU': 500, 'SC': 500}\n data_dict[symbol] = limit_dict[product]\n elif product in ['TF', 'TS', 'T']:\n expireDate = pd.to_datetime(expireDate)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today < change_day:\n limit_dict = {'TF': 2000, 'TS': 2000, 'T': 2000}\n else:\n limit_dict = {'TF': 600, 'TS': 600, 'T': 600}\n data_dict[symbol] = limit_dict[product]\n return data_dict\n",
"<import token>\nauth(JOINQUANT_USER, JOINQUANT_PW)\n\n\nclass Future:\n \"\"\"\n 全系统Future单例,只需创建一次,合约、品种基础信息类\n \"\"\"\n\n def __init__(self):\n self.products = list()\n self.products_base_msg = dict()\n self.products_symbol_msg = dict()\n self.tradedays_msg = dict()\n self.main_contract_msg = dict()\n self.__get_product_mongomsg()\n self.__get_trading_sessions()\n self.__get_tradedays()\n self.__get_main_contract()\n\n def get_VolumeMultiple(self, contract_lst=None):\n \"\"\"\n 获取合约单位\n \"\"\"\n info_lst = ['VolumeMultiple']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ExchangeInstID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeInstID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER, MONGDB_PW, MONGDB_IP)\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex':\n '^[a-zA-Z]{1,2}$'}}))\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex':\n '^[a-zA-Z]+[0-9]+$'}, 'ASSET_TYPE': 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(\n '(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n\n def __get_main_contract(self):\n \"\"\"\n 获取主力合约\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['MARKET']\n for mark in ['', '_OI', '_VOL']:\n mark = 'main_contract' + mark\n col = db[mark]\n df = pd.DataFrame(col.find({}, {'_id': 0}))\n df = df.set_index('date').sort_index()\n df.index = pd.to_datetime(df.index)\n self.main_contract_msg[mark] = df\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n\n def __get_trading_sessions(self):\n \"\"\"\n 获取期货历史交易时间窗口\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['TradingSessions']\n trading_sessions_df = pd.DataFrame(col.find())\n del trading_sessions_df['_id']\n trading_sessions_df['DateRange_Start'] = trading_sessions_df[\n 'DateRange_Start'].fillna('1990/01/01')\n trading_sessions_df['DateRange_End'] = trading_sessions_df[\n 'DateRange_End'].fillna('2099/01/01')\n for product in self.products_base_msg.keys():\n product_trading_sessions = trading_sessions_df.loc[\n trading_sessions_df['Market'] == product]\n self.products_base_msg[product]['trading_session'\n ] = product_trading_sessions\n\n def get_product_trading_sessions(self, product, date: str=None):\n \"\"\"\n 获取交易的时间窗口\n :param product: 品种\n :param date: 日期, 默认今日,如果是'all',返回全周期的\n :return:\n \"\"\"\n trade_sessions = self.products_base_msg[product]['trading_session']\n if date != 'all':\n date = pd.to_datetime(date) if date else datetime.date.today()\n date = date.strftime('%Y/%m/%d')\n trade_sessions = trade_sessions.loc[(trade_sessions[\n 'DateRange_Start'] <= date) & (trade_sessions[\n 'DateRange_End'] >= date),]\n trade_sessions = trade_sessions\n return trade_sessions\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n\n def get_limit_position(self, symbols):\n \"\"\"\n 获取合约今日的最大持仓限制\n :param symbol: jz格式\n :return:\n \"\"\"\n symbols = symbols if isinstance(symbols, list) else [symbols]\n data_dict = dict()\n for symbol in symbols:\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(symbol).group(0).upper()\n expireDate = self.products_symbol_msg[product][symbol]['ExpireDate'\n ]\n exchange_id = self.products_base_msg[product]['ExchangeID']\n today = datetime.date.today().strftime('%Y-%m-%d')\n if pd.to_datetime(today).strftime('%Y%m%d') >= expireDate:\n data_dict[symbol] = 'expired'\n elif product in ['SA', 'CF', 'SR', 'TA', 'OI', 'MA', 'FG', 'RM',\n 'ZC', 'PM', 'WH', 'RS', 'RI', 'JR', 'LR', 'SF', 'SM', 'CY',\n 'AP']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if product == 'AP' and symbol[-1] == '7':\n product = product + '7'\n if today <= last_change_day:\n limit_dict = {'SA':\n '单边持仓量<20万:20000手, 单边持仓量≥20万:单边持仓量×10%', 'CF':\n '单边持仓量<15万:15000手,单边持仓量≥15万:单边持仓量×10%', 'SR':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'TA':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'OI':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'MA':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'FG':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'RM':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'ZC':\n '单边持仓量<60万:60000手,单边持仓量≥60万:单边持仓量×10%', 'PM': 2000,\n 'WH': 2500, 'RS': 10000, 'RI': 7500, 'JR': 20000,\n 'LR': 20000, 'SF': 8000, 'SM': 30000, 'CY': 5000,\n 'AP': 500, 'AP7': 100}\n elif change_day > today > last_change_day:\n limit_dict = {'SA': 4000, 'CF': 4000, 'SR': 5000, 'TA':\n 10000, 'OI': 3000, 'MA': 2000, 'FG': 5000, 'RM': \n 2000, 'ZC': 20000, 'PM': 600, 'WH': 1000, 'RS': \n 1000, 'RI': 2000, 'JR': 3000, 'LR': 3000, 'SF': \n 2000, 'SM': 10000, 'CY': 500, 'AP': 100, 'AP7': 20}\n else:\n limit_dict = {'SA': 800, 'CF': 800, 'SR': 1000, 'TA': \n 5000, 'OI': 1000, 'MA': 1000, 'FG': 1000, 'RM': \n 1000, 'ZC': 4000, 'PM': 200, 'WH': 300, 'RS': 500,\n 'RI': 400, 'JR': 500, 'LR': 500, 'SF': 500, 'SM': \n 2000, 'CY': 100, 'AP': 10, 'AP7': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CJ']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n lst_lst_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= lst_lst_change_day:\n limit_dict = {'CJ': 300}\n elif last_change_day >= today > lst_lst_change_day:\n limit_dict = {'CJ': 60}\n elif change_day > today > last_change_day:\n limit_dict = {'CJ': 20}\n else:\n limit_dict = {'CJ': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['A', 'V', 'PP', 'C', 'B', 'L', 'P', 'J', 'JM',\n 'I', 'FB', 'BB', 'CS', 'Y', 'M', 'EG']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(14, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today <= last_change_day:\n limit_dict = {'A':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'V':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'PP':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'C':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'B':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'L':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'P':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'J':\n '单边持仓量<=5万:5000手,单边持仓量>5万:单边持仓量×10%', 'JM':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%', 'I':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'FB':\n '单边持仓量<=16万:16000手,单边持仓量>16万:单边持仓量×10%', 'BB':\n '单边持仓量<=6万:6000手,单边持仓量>6万:单边持仓量×10%', 'CS':\n '单边持仓量<=15万:15000手,单边持仓量>15万:单边持仓量×10%', 'Y':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'M':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'EG':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%, 单边持仓>12万:3000手, 提高保证金合约价值×10%'\n }\n elif change_day >= today > last_change_day:\n limit_dict = {'A': 5000, 'V': 5000, 'PP': 5000, 'C': \n 15000, 'B': 4500, 'L': 3000, 'P': 1500, 'J': 900,\n 'JM': 1500, 'I': 6000, 'FB': 400, 'BB': 80, 'CS': \n 4500, 'Y': 3000, 'M': 7500, 'EG':\n '3000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n else:\n limit_dict = {'A': 2500, 'V': 2500, 'PP': 2500, 'C': \n 5000, 'B': 1500, 'L': 1000, 'P': 500, 'J': 300,\n 'JM': 500, 'I': 2000, 'FB': 100, 'BB': 20, 'CS': \n 1500, 'Y': 1000, 'M': 2500, 'EG':\n '1000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n data_dict[symbol] = limit_dict[product]\n elif product in ['JD']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_lst_change_day = self.find_tradeday(1, date=\n lst_lst_month_last_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(10, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today < lst_lst_change_day:\n limit_dict = {'JD': 1200}\n elif last_change_day > today >= lst_lst_change_day:\n limit_dict = {'JD': 400}\n elif change_day > today >= last_change_day:\n limit_dict = {'JD': 120}\n else:\n limit_dict = {'JD': 20}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CU', 'AL', 'ZN', 'PB', 'NI', 'SN', 'RB', 'WR',\n 'HC', 'RU', 'BU', 'AU', 'AG', 'SP']:\n expireDate = pd.to_datetime(expireDate)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n last_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= last_change_day:\n limit_dict = {'CU':\n '单边持仓量<16万:8000, 单边持仓量>=16万:单边持仓量×10%', 'AL':\n '单边持仓量<20万:10000, 单边持仓量>=20万:单边持仓量×10%', 'ZN':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'PB':\n '单边持仓量<10万:5000, 单边持仓量>=10万:单边持仓量×10%', 'NI':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'SN':\n '单边持仓量<3万:1500, 单边持仓量>=3万:单边持仓量×10%', 'RB':\n '单边持仓量<180万:90000, 单边持仓量>=180万:单边持仓量×10%', 'WR':\n '单边持仓量<45万:22500, 单边持仓量>=45万:单边持仓量×10%', 'HC':\n '单边持仓量<240万:120000, 单边持仓量>=240万:单边持仓量×10%', 'RU': \n 500, 'BU': 8000, 'AU': '非期货公司会员:18000, 客户:9000',\n 'AG': '非期货公司会员:18000手, 客户:9000手', 'SP': 4500}\n elif change_day > today > last_change_day:\n limit_dict = {'CU': 3000, 'AL': 3000, 'ZN': 2400, 'PB':\n 1800, 'NI': 1800, 'SN': 600, 'RB': 4500, 'WR': 1800,\n 'HC': 9000, 'RU': 150, 'BU': 1500, 'AU':\n '非期货公司会员:5400, 客户:2700', 'AG':\n '非期货公司会员:5400手, 客户:2700手', 'SP': 900}\n else:\n limit_dict = {'CU': 1000, 'AL': 1000, 'ZN': 800, 'PB': \n 600, 'NI': 600, 'SN': 200, 'RB': 900, 'WR': 360,\n 'HC': 1800, 'RU': 50, 'BU': 500, 'AU':\n '非期货公司会员:1800手, 客户:900手', 'AG':\n '非期货公司会员:1800手, 客户:900手', 'SP': 300}\n data_dict[symbol] = limit_dict[product]\n elif product in ['FU', 'SC']:\n expireDate = pd.to_datetime(expireDate)\n last_change_day = ((expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1) - datetime.timedelta(\n days=1)).replace(day=1).strftime('%Y-%m-%d')\n change_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')\n if today < last_change_day:\n limit_dict = {'FU': 7500, 'SC': 3000}\n elif change_day > today >= last_change_day:\n limit_dict = {'FU': 1500, 'SC': 1500}\n else:\n limit_dict = {'FU': 500, 'SC': 500}\n data_dict[symbol] = limit_dict[product]\n elif product in ['TF', 'TS', 'T']:\n expireDate = pd.to_datetime(expireDate)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today < change_day:\n limit_dict = {'TF': 2000, 'TS': 2000, 'T': 2000}\n else:\n limit_dict = {'TF': 600, 'TS': 600, 'T': 600}\n data_dict[symbol] = limit_dict[product]\n return data_dict\n",
"<import token>\n<code token>\n\n\nclass Future:\n \"\"\"\n 全系统Future单例,只需创建一次,合约、品种基础信息类\n \"\"\"\n\n def __init__(self):\n self.products = list()\n self.products_base_msg = dict()\n self.products_symbol_msg = dict()\n self.tradedays_msg = dict()\n self.main_contract_msg = dict()\n self.__get_product_mongomsg()\n self.__get_trading_sessions()\n self.__get_tradedays()\n self.__get_main_contract()\n\n def get_VolumeMultiple(self, contract_lst=None):\n \"\"\"\n 获取合约单位\n \"\"\"\n info_lst = ['VolumeMultiple']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ExchangeInstID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeInstID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER, MONGDB_PW, MONGDB_IP)\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex':\n '^[a-zA-Z]{1,2}$'}}))\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex':\n '^[a-zA-Z]+[0-9]+$'}, 'ASSET_TYPE': 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(\n '(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n\n def __get_main_contract(self):\n \"\"\"\n 获取主力合约\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['MARKET']\n for mark in ['', '_OI', '_VOL']:\n mark = 'main_contract' + mark\n col = db[mark]\n df = pd.DataFrame(col.find({}, {'_id': 0}))\n df = df.set_index('date').sort_index()\n df.index = pd.to_datetime(df.index)\n self.main_contract_msg[mark] = df\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n\n def __get_trading_sessions(self):\n \"\"\"\n 获取期货历史交易时间窗口\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['TradingSessions']\n trading_sessions_df = pd.DataFrame(col.find())\n del trading_sessions_df['_id']\n trading_sessions_df['DateRange_Start'] = trading_sessions_df[\n 'DateRange_Start'].fillna('1990/01/01')\n trading_sessions_df['DateRange_End'] = trading_sessions_df[\n 'DateRange_End'].fillna('2099/01/01')\n for product in self.products_base_msg.keys():\n product_trading_sessions = trading_sessions_df.loc[\n trading_sessions_df['Market'] == product]\n self.products_base_msg[product]['trading_session'\n ] = product_trading_sessions\n\n def get_product_trading_sessions(self, product, date: str=None):\n \"\"\"\n 获取交易的时间窗口\n :param product: 品种\n :param date: 日期, 默认今日,如果是'all',返回全周期的\n :return:\n \"\"\"\n trade_sessions = self.products_base_msg[product]['trading_session']\n if date != 'all':\n date = pd.to_datetime(date) if date else datetime.date.today()\n date = date.strftime('%Y/%m/%d')\n trade_sessions = trade_sessions.loc[(trade_sessions[\n 'DateRange_Start'] <= date) & (trade_sessions[\n 'DateRange_End'] >= date),]\n trade_sessions = trade_sessions\n return trade_sessions\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n\n def get_limit_position(self, symbols):\n \"\"\"\n 获取合约今日的最大持仓限制\n :param symbol: jz格式\n :return:\n \"\"\"\n symbols = symbols if isinstance(symbols, list) else [symbols]\n data_dict = dict()\n for symbol in symbols:\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(symbol).group(0).upper()\n expireDate = self.products_symbol_msg[product][symbol]['ExpireDate'\n ]\n exchange_id = self.products_base_msg[product]['ExchangeID']\n today = datetime.date.today().strftime('%Y-%m-%d')\n if pd.to_datetime(today).strftime('%Y%m%d') >= expireDate:\n data_dict[symbol] = 'expired'\n elif product in ['SA', 'CF', 'SR', 'TA', 'OI', 'MA', 'FG', 'RM',\n 'ZC', 'PM', 'WH', 'RS', 'RI', 'JR', 'LR', 'SF', 'SM', 'CY',\n 'AP']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if product == 'AP' and symbol[-1] == '7':\n product = product + '7'\n if today <= last_change_day:\n limit_dict = {'SA':\n '单边持仓量<20万:20000手, 单边持仓量≥20万:单边持仓量×10%', 'CF':\n '单边持仓量<15万:15000手,单边持仓量≥15万:单边持仓量×10%', 'SR':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'TA':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'OI':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'MA':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'FG':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'RM':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'ZC':\n '单边持仓量<60万:60000手,单边持仓量≥60万:单边持仓量×10%', 'PM': 2000,\n 'WH': 2500, 'RS': 10000, 'RI': 7500, 'JR': 20000,\n 'LR': 20000, 'SF': 8000, 'SM': 30000, 'CY': 5000,\n 'AP': 500, 'AP7': 100}\n elif change_day > today > last_change_day:\n limit_dict = {'SA': 4000, 'CF': 4000, 'SR': 5000, 'TA':\n 10000, 'OI': 3000, 'MA': 2000, 'FG': 5000, 'RM': \n 2000, 'ZC': 20000, 'PM': 600, 'WH': 1000, 'RS': \n 1000, 'RI': 2000, 'JR': 3000, 'LR': 3000, 'SF': \n 2000, 'SM': 10000, 'CY': 500, 'AP': 100, 'AP7': 20}\n else:\n limit_dict = {'SA': 800, 'CF': 800, 'SR': 1000, 'TA': \n 5000, 'OI': 1000, 'MA': 1000, 'FG': 1000, 'RM': \n 1000, 'ZC': 4000, 'PM': 200, 'WH': 300, 'RS': 500,\n 'RI': 400, 'JR': 500, 'LR': 500, 'SF': 500, 'SM': \n 2000, 'CY': 100, 'AP': 10, 'AP7': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CJ']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n lst_lst_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= lst_lst_change_day:\n limit_dict = {'CJ': 300}\n elif last_change_day >= today > lst_lst_change_day:\n limit_dict = {'CJ': 60}\n elif change_day > today > last_change_day:\n limit_dict = {'CJ': 20}\n else:\n limit_dict = {'CJ': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['A', 'V', 'PP', 'C', 'B', 'L', 'P', 'J', 'JM',\n 'I', 'FB', 'BB', 'CS', 'Y', 'M', 'EG']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(14, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today <= last_change_day:\n limit_dict = {'A':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'V':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'PP':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'C':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'B':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'L':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'P':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'J':\n '单边持仓量<=5万:5000手,单边持仓量>5万:单边持仓量×10%', 'JM':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%', 'I':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'FB':\n '单边持仓量<=16万:16000手,单边持仓量>16万:单边持仓量×10%', 'BB':\n '单边持仓量<=6万:6000手,单边持仓量>6万:单边持仓量×10%', 'CS':\n '单边持仓量<=15万:15000手,单边持仓量>15万:单边持仓量×10%', 'Y':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'M':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'EG':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%, 单边持仓>12万:3000手, 提高保证金合约价值×10%'\n }\n elif change_day >= today > last_change_day:\n limit_dict = {'A': 5000, 'V': 5000, 'PP': 5000, 'C': \n 15000, 'B': 4500, 'L': 3000, 'P': 1500, 'J': 900,\n 'JM': 1500, 'I': 6000, 'FB': 400, 'BB': 80, 'CS': \n 4500, 'Y': 3000, 'M': 7500, 'EG':\n '3000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n else:\n limit_dict = {'A': 2500, 'V': 2500, 'PP': 2500, 'C': \n 5000, 'B': 1500, 'L': 1000, 'P': 500, 'J': 300,\n 'JM': 500, 'I': 2000, 'FB': 100, 'BB': 20, 'CS': \n 1500, 'Y': 1000, 'M': 2500, 'EG':\n '1000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n data_dict[symbol] = limit_dict[product]\n elif product in ['JD']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_lst_change_day = self.find_tradeday(1, date=\n lst_lst_month_last_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(10, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today < lst_lst_change_day:\n limit_dict = {'JD': 1200}\n elif last_change_day > today >= lst_lst_change_day:\n limit_dict = {'JD': 400}\n elif change_day > today >= last_change_day:\n limit_dict = {'JD': 120}\n else:\n limit_dict = {'JD': 20}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CU', 'AL', 'ZN', 'PB', 'NI', 'SN', 'RB', 'WR',\n 'HC', 'RU', 'BU', 'AU', 'AG', 'SP']:\n expireDate = pd.to_datetime(expireDate)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n last_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= last_change_day:\n limit_dict = {'CU':\n '单边持仓量<16万:8000, 单边持仓量>=16万:单边持仓量×10%', 'AL':\n '单边持仓量<20万:10000, 单边持仓量>=20万:单边持仓量×10%', 'ZN':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'PB':\n '单边持仓量<10万:5000, 单边持仓量>=10万:单边持仓量×10%', 'NI':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'SN':\n '单边持仓量<3万:1500, 单边持仓量>=3万:单边持仓量×10%', 'RB':\n '单边持仓量<180万:90000, 单边持仓量>=180万:单边持仓量×10%', 'WR':\n '单边持仓量<45万:22500, 单边持仓量>=45万:单边持仓量×10%', 'HC':\n '单边持仓量<240万:120000, 单边持仓量>=240万:单边持仓量×10%', 'RU': \n 500, 'BU': 8000, 'AU': '非期货公司会员:18000, 客户:9000',\n 'AG': '非期货公司会员:18000手, 客户:9000手', 'SP': 4500}\n elif change_day > today > last_change_day:\n limit_dict = {'CU': 3000, 'AL': 3000, 'ZN': 2400, 'PB':\n 1800, 'NI': 1800, 'SN': 600, 'RB': 4500, 'WR': 1800,\n 'HC': 9000, 'RU': 150, 'BU': 1500, 'AU':\n '非期货公司会员:5400, 客户:2700', 'AG':\n '非期货公司会员:5400手, 客户:2700手', 'SP': 900}\n else:\n limit_dict = {'CU': 1000, 'AL': 1000, 'ZN': 800, 'PB': \n 600, 'NI': 600, 'SN': 200, 'RB': 900, 'WR': 360,\n 'HC': 1800, 'RU': 50, 'BU': 500, 'AU':\n '非期货公司会员:1800手, 客户:900手', 'AG':\n '非期货公司会员:1800手, 客户:900手', 'SP': 300}\n data_dict[symbol] = limit_dict[product]\n elif product in ['FU', 'SC']:\n expireDate = pd.to_datetime(expireDate)\n last_change_day = ((expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1) - datetime.timedelta(\n days=1)).replace(day=1).strftime('%Y-%m-%d')\n change_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')\n if today < last_change_day:\n limit_dict = {'FU': 7500, 'SC': 3000}\n elif change_day > today >= last_change_day:\n limit_dict = {'FU': 1500, 'SC': 1500}\n else:\n limit_dict = {'FU': 500, 'SC': 500}\n data_dict[symbol] = limit_dict[product]\n elif product in ['TF', 'TS', 'T']:\n expireDate = pd.to_datetime(expireDate)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today < change_day:\n limit_dict = {'TF': 2000, 'TS': 2000, 'T': 2000}\n else:\n limit_dict = {'TF': 600, 'TS': 600, 'T': 600}\n data_dict[symbol] = limit_dict[product]\n return data_dict\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n\n def __init__(self):\n self.products = list()\n self.products_base_msg = dict()\n self.products_symbol_msg = dict()\n self.tradedays_msg = dict()\n self.main_contract_msg = dict()\n self.__get_product_mongomsg()\n self.__get_trading_sessions()\n self.__get_tradedays()\n self.__get_main_contract()\n\n def get_VolumeMultiple(self, contract_lst=None):\n \"\"\"\n 获取合约单位\n \"\"\"\n info_lst = ['VolumeMultiple']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ExchangeInstID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeInstID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER, MONGDB_PW, MONGDB_IP)\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex':\n '^[a-zA-Z]{1,2}$'}}))\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex':\n '^[a-zA-Z]+[0-9]+$'}, 'ASSET_TYPE': 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(\n '(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n\n def __get_main_contract(self):\n \"\"\"\n 获取主力合约\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['MARKET']\n for mark in ['', '_OI', '_VOL']:\n mark = 'main_contract' + mark\n col = db[mark]\n df = pd.DataFrame(col.find({}, {'_id': 0}))\n df = df.set_index('date').sort_index()\n df.index = pd.to_datetime(df.index)\n self.main_contract_msg[mark] = df\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n\n def __get_trading_sessions(self):\n \"\"\"\n 获取期货历史交易时间窗口\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['TradingSessions']\n trading_sessions_df = pd.DataFrame(col.find())\n del trading_sessions_df['_id']\n trading_sessions_df['DateRange_Start'] = trading_sessions_df[\n 'DateRange_Start'].fillna('1990/01/01')\n trading_sessions_df['DateRange_End'] = trading_sessions_df[\n 'DateRange_End'].fillna('2099/01/01')\n for product in self.products_base_msg.keys():\n product_trading_sessions = trading_sessions_df.loc[\n trading_sessions_df['Market'] == product]\n self.products_base_msg[product]['trading_session'\n ] = product_trading_sessions\n\n def get_product_trading_sessions(self, product, date: str=None):\n \"\"\"\n 获取交易的时间窗口\n :param product: 品种\n :param date: 日期, 默认今日,如果是'all',返回全周期的\n :return:\n \"\"\"\n trade_sessions = self.products_base_msg[product]['trading_session']\n if date != 'all':\n date = pd.to_datetime(date) if date else datetime.date.today()\n date = date.strftime('%Y/%m/%d')\n trade_sessions = trade_sessions.loc[(trade_sessions[\n 'DateRange_Start'] <= date) & (trade_sessions[\n 'DateRange_End'] >= date),]\n trade_sessions = trade_sessions\n return trade_sessions\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n\n def get_limit_position(self, symbols):\n \"\"\"\n 获取合约今日的最大持仓限制\n :param symbol: jz格式\n :return:\n \"\"\"\n symbols = symbols if isinstance(symbols, list) else [symbols]\n data_dict = dict()\n for symbol in symbols:\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(symbol).group(0).upper()\n expireDate = self.products_symbol_msg[product][symbol]['ExpireDate'\n ]\n exchange_id = self.products_base_msg[product]['ExchangeID']\n today = datetime.date.today().strftime('%Y-%m-%d')\n if pd.to_datetime(today).strftime('%Y%m%d') >= expireDate:\n data_dict[symbol] = 'expired'\n elif product in ['SA', 'CF', 'SR', 'TA', 'OI', 'MA', 'FG', 'RM',\n 'ZC', 'PM', 'WH', 'RS', 'RI', 'JR', 'LR', 'SF', 'SM', 'CY',\n 'AP']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if product == 'AP' and symbol[-1] == '7':\n product = product + '7'\n if today <= last_change_day:\n limit_dict = {'SA':\n '单边持仓量<20万:20000手, 单边持仓量≥20万:单边持仓量×10%', 'CF':\n '单边持仓量<15万:15000手,单边持仓量≥15万:单边持仓量×10%', 'SR':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'TA':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'OI':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'MA':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'FG':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'RM':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'ZC':\n '单边持仓量<60万:60000手,单边持仓量≥60万:单边持仓量×10%', 'PM': 2000,\n 'WH': 2500, 'RS': 10000, 'RI': 7500, 'JR': 20000,\n 'LR': 20000, 'SF': 8000, 'SM': 30000, 'CY': 5000,\n 'AP': 500, 'AP7': 100}\n elif change_day > today > last_change_day:\n limit_dict = {'SA': 4000, 'CF': 4000, 'SR': 5000, 'TA':\n 10000, 'OI': 3000, 'MA': 2000, 'FG': 5000, 'RM': \n 2000, 'ZC': 20000, 'PM': 600, 'WH': 1000, 'RS': \n 1000, 'RI': 2000, 'JR': 3000, 'LR': 3000, 'SF': \n 2000, 'SM': 10000, 'CY': 500, 'AP': 100, 'AP7': 20}\n else:\n limit_dict = {'SA': 800, 'CF': 800, 'SR': 1000, 'TA': \n 5000, 'OI': 1000, 'MA': 1000, 'FG': 1000, 'RM': \n 1000, 'ZC': 4000, 'PM': 200, 'WH': 300, 'RS': 500,\n 'RI': 400, 'JR': 500, 'LR': 500, 'SF': 500, 'SM': \n 2000, 'CY': 100, 'AP': 10, 'AP7': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CJ']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n lst_lst_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= lst_lst_change_day:\n limit_dict = {'CJ': 300}\n elif last_change_day >= today > lst_lst_change_day:\n limit_dict = {'CJ': 60}\n elif change_day > today > last_change_day:\n limit_dict = {'CJ': 20}\n else:\n limit_dict = {'CJ': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['A', 'V', 'PP', 'C', 'B', 'L', 'P', 'J', 'JM',\n 'I', 'FB', 'BB', 'CS', 'Y', 'M', 'EG']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(14, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today <= last_change_day:\n limit_dict = {'A':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'V':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'PP':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'C':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'B':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'L':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'P':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'J':\n '单边持仓量<=5万:5000手,单边持仓量>5万:单边持仓量×10%', 'JM':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%', 'I':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'FB':\n '单边持仓量<=16万:16000手,单边持仓量>16万:单边持仓量×10%', 'BB':\n '单边持仓量<=6万:6000手,单边持仓量>6万:单边持仓量×10%', 'CS':\n '单边持仓量<=15万:15000手,单边持仓量>15万:单边持仓量×10%', 'Y':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'M':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'EG':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%, 单边持仓>12万:3000手, 提高保证金合约价值×10%'\n }\n elif change_day >= today > last_change_day:\n limit_dict = {'A': 5000, 'V': 5000, 'PP': 5000, 'C': \n 15000, 'B': 4500, 'L': 3000, 'P': 1500, 'J': 900,\n 'JM': 1500, 'I': 6000, 'FB': 400, 'BB': 80, 'CS': \n 4500, 'Y': 3000, 'M': 7500, 'EG':\n '3000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n else:\n limit_dict = {'A': 2500, 'V': 2500, 'PP': 2500, 'C': \n 5000, 'B': 1500, 'L': 1000, 'P': 500, 'J': 300,\n 'JM': 500, 'I': 2000, 'FB': 100, 'BB': 20, 'CS': \n 1500, 'Y': 1000, 'M': 2500, 'EG':\n '1000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n data_dict[symbol] = limit_dict[product]\n elif product in ['JD']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_lst_change_day = self.find_tradeday(1, date=\n lst_lst_month_last_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(10, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today < lst_lst_change_day:\n limit_dict = {'JD': 1200}\n elif last_change_day > today >= lst_lst_change_day:\n limit_dict = {'JD': 400}\n elif change_day > today >= last_change_day:\n limit_dict = {'JD': 120}\n else:\n limit_dict = {'JD': 20}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CU', 'AL', 'ZN', 'PB', 'NI', 'SN', 'RB', 'WR',\n 'HC', 'RU', 'BU', 'AU', 'AG', 'SP']:\n expireDate = pd.to_datetime(expireDate)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n last_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= last_change_day:\n limit_dict = {'CU':\n '单边持仓量<16万:8000, 单边持仓量>=16万:单边持仓量×10%', 'AL':\n '单边持仓量<20万:10000, 单边持仓量>=20万:单边持仓量×10%', 'ZN':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'PB':\n '单边持仓量<10万:5000, 单边持仓量>=10万:单边持仓量×10%', 'NI':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'SN':\n '单边持仓量<3万:1500, 单边持仓量>=3万:单边持仓量×10%', 'RB':\n '单边持仓量<180万:90000, 单边持仓量>=180万:单边持仓量×10%', 'WR':\n '单边持仓量<45万:22500, 单边持仓量>=45万:单边持仓量×10%', 'HC':\n '单边持仓量<240万:120000, 单边持仓量>=240万:单边持仓量×10%', 'RU': \n 500, 'BU': 8000, 'AU': '非期货公司会员:18000, 客户:9000',\n 'AG': '非期货公司会员:18000手, 客户:9000手', 'SP': 4500}\n elif change_day > today > last_change_day:\n limit_dict = {'CU': 3000, 'AL': 3000, 'ZN': 2400, 'PB':\n 1800, 'NI': 1800, 'SN': 600, 'RB': 4500, 'WR': 1800,\n 'HC': 9000, 'RU': 150, 'BU': 1500, 'AU':\n '非期货公司会员:5400, 客户:2700', 'AG':\n '非期货公司会员:5400手, 客户:2700手', 'SP': 900}\n else:\n limit_dict = {'CU': 1000, 'AL': 1000, 'ZN': 800, 'PB': \n 600, 'NI': 600, 'SN': 200, 'RB': 900, 'WR': 360,\n 'HC': 1800, 'RU': 50, 'BU': 500, 'AU':\n '非期货公司会员:1800手, 客户:900手', 'AG':\n '非期货公司会员:1800手, 客户:900手', 'SP': 300}\n data_dict[symbol] = limit_dict[product]\n elif product in ['FU', 'SC']:\n expireDate = pd.to_datetime(expireDate)\n last_change_day = ((expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1) - datetime.timedelta(\n days=1)).replace(day=1).strftime('%Y-%m-%d')\n change_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')\n if today < last_change_day:\n limit_dict = {'FU': 7500, 'SC': 3000}\n elif change_day > today >= last_change_day:\n limit_dict = {'FU': 1500, 'SC': 1500}\n else:\n limit_dict = {'FU': 500, 'SC': 500}\n data_dict[symbol] = limit_dict[product]\n elif product in ['TF', 'TS', 'T']:\n expireDate = pd.to_datetime(expireDate)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today < change_day:\n limit_dict = {'TF': 2000, 'TS': 2000, 'T': 2000}\n else:\n limit_dict = {'TF': 600, 'TS': 600, 'T': 600}\n data_dict[symbol] = limit_dict[product]\n return data_dict\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n\n def __init__(self):\n self.products = list()\n self.products_base_msg = dict()\n self.products_symbol_msg = dict()\n self.tradedays_msg = dict()\n self.main_contract_msg = dict()\n self.__get_product_mongomsg()\n self.__get_trading_sessions()\n self.__get_tradedays()\n self.__get_main_contract()\n\n def get_VolumeMultiple(self, contract_lst=None):\n \"\"\"\n 获取合约单位\n \"\"\"\n info_lst = ['VolumeMultiple']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ExchangeInstID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeInstID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER, MONGDB_PW, MONGDB_IP)\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex':\n '^[a-zA-Z]{1,2}$'}}))\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex':\n '^[a-zA-Z]+[0-9]+$'}, 'ASSET_TYPE': 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(\n '(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n\n def __get_main_contract(self):\n \"\"\"\n 获取主力合约\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['MARKET']\n for mark in ['', '_OI', '_VOL']:\n mark = 'main_contract' + mark\n col = db[mark]\n df = pd.DataFrame(col.find({}, {'_id': 0}))\n df = df.set_index('date').sort_index()\n df.index = pd.to_datetime(df.index)\n self.main_contract_msg[mark] = df\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n\n def get_product_trading_sessions(self, product, date: str=None):\n \"\"\"\n 获取交易的时间窗口\n :param product: 品种\n :param date: 日期, 默认今日,如果是'all',返回全周期的\n :return:\n \"\"\"\n trade_sessions = self.products_base_msg[product]['trading_session']\n if date != 'all':\n date = pd.to_datetime(date) if date else datetime.date.today()\n date = date.strftime('%Y/%m/%d')\n trade_sessions = trade_sessions.loc[(trade_sessions[\n 'DateRange_Start'] <= date) & (trade_sessions[\n 'DateRange_End'] >= date),]\n trade_sessions = trade_sessions\n return trade_sessions\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n\n def get_limit_position(self, symbols):\n \"\"\"\n 获取合约今日的最大持仓限制\n :param symbol: jz格式\n :return:\n \"\"\"\n symbols = symbols if isinstance(symbols, list) else [symbols]\n data_dict = dict()\n for symbol in symbols:\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(symbol).group(0).upper()\n expireDate = self.products_symbol_msg[product][symbol]['ExpireDate'\n ]\n exchange_id = self.products_base_msg[product]['ExchangeID']\n today = datetime.date.today().strftime('%Y-%m-%d')\n if pd.to_datetime(today).strftime('%Y%m%d') >= expireDate:\n data_dict[symbol] = 'expired'\n elif product in ['SA', 'CF', 'SR', 'TA', 'OI', 'MA', 'FG', 'RM',\n 'ZC', 'PM', 'WH', 'RS', 'RI', 'JR', 'LR', 'SF', 'SM', 'CY',\n 'AP']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if product == 'AP' and symbol[-1] == '7':\n product = product + '7'\n if today <= last_change_day:\n limit_dict = {'SA':\n '单边持仓量<20万:20000手, 单边持仓量≥20万:单边持仓量×10%', 'CF':\n '单边持仓量<15万:15000手,单边持仓量≥15万:单边持仓量×10%', 'SR':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'TA':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'OI':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'MA':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'FG':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'RM':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'ZC':\n '单边持仓量<60万:60000手,单边持仓量≥60万:单边持仓量×10%', 'PM': 2000,\n 'WH': 2500, 'RS': 10000, 'RI': 7500, 'JR': 20000,\n 'LR': 20000, 'SF': 8000, 'SM': 30000, 'CY': 5000,\n 'AP': 500, 'AP7': 100}\n elif change_day > today > last_change_day:\n limit_dict = {'SA': 4000, 'CF': 4000, 'SR': 5000, 'TA':\n 10000, 'OI': 3000, 'MA': 2000, 'FG': 5000, 'RM': \n 2000, 'ZC': 20000, 'PM': 600, 'WH': 1000, 'RS': \n 1000, 'RI': 2000, 'JR': 3000, 'LR': 3000, 'SF': \n 2000, 'SM': 10000, 'CY': 500, 'AP': 100, 'AP7': 20}\n else:\n limit_dict = {'SA': 800, 'CF': 800, 'SR': 1000, 'TA': \n 5000, 'OI': 1000, 'MA': 1000, 'FG': 1000, 'RM': \n 1000, 'ZC': 4000, 'PM': 200, 'WH': 300, 'RS': 500,\n 'RI': 400, 'JR': 500, 'LR': 500, 'SF': 500, 'SM': \n 2000, 'CY': 100, 'AP': 10, 'AP7': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CJ']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n lst_lst_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= lst_lst_change_day:\n limit_dict = {'CJ': 300}\n elif last_change_day >= today > lst_lst_change_day:\n limit_dict = {'CJ': 60}\n elif change_day > today > last_change_day:\n limit_dict = {'CJ': 20}\n else:\n limit_dict = {'CJ': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['A', 'V', 'PP', 'C', 'B', 'L', 'P', 'J', 'JM',\n 'I', 'FB', 'BB', 'CS', 'Y', 'M', 'EG']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(14, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today <= last_change_day:\n limit_dict = {'A':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'V':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'PP':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'C':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'B':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'L':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'P':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'J':\n '单边持仓量<=5万:5000手,单边持仓量>5万:单边持仓量×10%', 'JM':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%', 'I':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'FB':\n '单边持仓量<=16万:16000手,单边持仓量>16万:单边持仓量×10%', 'BB':\n '单边持仓量<=6万:6000手,单边持仓量>6万:单边持仓量×10%', 'CS':\n '单边持仓量<=15万:15000手,单边持仓量>15万:单边持仓量×10%', 'Y':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'M':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'EG':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%, 单边持仓>12万:3000手, 提高保证金合约价值×10%'\n }\n elif change_day >= today > last_change_day:\n limit_dict = {'A': 5000, 'V': 5000, 'PP': 5000, 'C': \n 15000, 'B': 4500, 'L': 3000, 'P': 1500, 'J': 900,\n 'JM': 1500, 'I': 6000, 'FB': 400, 'BB': 80, 'CS': \n 4500, 'Y': 3000, 'M': 7500, 'EG':\n '3000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n else:\n limit_dict = {'A': 2500, 'V': 2500, 'PP': 2500, 'C': \n 5000, 'B': 1500, 'L': 1000, 'P': 500, 'J': 300,\n 'JM': 500, 'I': 2000, 'FB': 100, 'BB': 20, 'CS': \n 1500, 'Y': 1000, 'M': 2500, 'EG':\n '1000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n data_dict[symbol] = limit_dict[product]\n elif product in ['JD']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_lst_change_day = self.find_tradeday(1, date=\n lst_lst_month_last_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(10, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today < lst_lst_change_day:\n limit_dict = {'JD': 1200}\n elif last_change_day > today >= lst_lst_change_day:\n limit_dict = {'JD': 400}\n elif change_day > today >= last_change_day:\n limit_dict = {'JD': 120}\n else:\n limit_dict = {'JD': 20}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CU', 'AL', 'ZN', 'PB', 'NI', 'SN', 'RB', 'WR',\n 'HC', 'RU', 'BU', 'AU', 'AG', 'SP']:\n expireDate = pd.to_datetime(expireDate)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n last_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= last_change_day:\n limit_dict = {'CU':\n '单边持仓量<16万:8000, 单边持仓量>=16万:单边持仓量×10%', 'AL':\n '单边持仓量<20万:10000, 单边持仓量>=20万:单边持仓量×10%', 'ZN':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'PB':\n '单边持仓量<10万:5000, 单边持仓量>=10万:单边持仓量×10%', 'NI':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'SN':\n '单边持仓量<3万:1500, 单边持仓量>=3万:单边持仓量×10%', 'RB':\n '单边持仓量<180万:90000, 单边持仓量>=180万:单边持仓量×10%', 'WR':\n '单边持仓量<45万:22500, 单边持仓量>=45万:单边持仓量×10%', 'HC':\n '单边持仓量<240万:120000, 单边持仓量>=240万:单边持仓量×10%', 'RU': \n 500, 'BU': 8000, 'AU': '非期货公司会员:18000, 客户:9000',\n 'AG': '非期货公司会员:18000手, 客户:9000手', 'SP': 4500}\n elif change_day > today > last_change_day:\n limit_dict = {'CU': 3000, 'AL': 3000, 'ZN': 2400, 'PB':\n 1800, 'NI': 1800, 'SN': 600, 'RB': 4500, 'WR': 1800,\n 'HC': 9000, 'RU': 150, 'BU': 1500, 'AU':\n '非期货公司会员:5400, 客户:2700', 'AG':\n '非期货公司会员:5400手, 客户:2700手', 'SP': 900}\n else:\n limit_dict = {'CU': 1000, 'AL': 1000, 'ZN': 800, 'PB': \n 600, 'NI': 600, 'SN': 200, 'RB': 900, 'WR': 360,\n 'HC': 1800, 'RU': 50, 'BU': 500, 'AU':\n '非期货公司会员:1800手, 客户:900手', 'AG':\n '非期货公司会员:1800手, 客户:900手', 'SP': 300}\n data_dict[symbol] = limit_dict[product]\n elif product in ['FU', 'SC']:\n expireDate = pd.to_datetime(expireDate)\n last_change_day = ((expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1) - datetime.timedelta(\n days=1)).replace(day=1).strftime('%Y-%m-%d')\n change_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')\n if today < last_change_day:\n limit_dict = {'FU': 7500, 'SC': 3000}\n elif change_day > today >= last_change_day:\n limit_dict = {'FU': 1500, 'SC': 1500}\n else:\n limit_dict = {'FU': 500, 'SC': 500}\n data_dict[symbol] = limit_dict[product]\n elif product in ['TF', 'TS', 'T']:\n expireDate = pd.to_datetime(expireDate)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today < change_day:\n limit_dict = {'TF': 2000, 'TS': 2000, 'T': 2000}\n else:\n limit_dict = {'TF': 600, 'TS': 600, 'T': 600}\n data_dict[symbol] = limit_dict[product]\n return data_dict\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n\n def __init__(self):\n self.products = list()\n self.products_base_msg = dict()\n self.products_symbol_msg = dict()\n self.tradedays_msg = dict()\n self.main_contract_msg = dict()\n self.__get_product_mongomsg()\n self.__get_trading_sessions()\n self.__get_tradedays()\n self.__get_main_contract()\n\n def get_VolumeMultiple(self, contract_lst=None):\n \"\"\"\n 获取合约单位\n \"\"\"\n info_lst = ['VolumeMultiple']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER, MONGDB_PW, MONGDB_IP)\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex':\n '^[a-zA-Z]{1,2}$'}}))\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex':\n '^[a-zA-Z]+[0-9]+$'}, 'ASSET_TYPE': 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(\n '(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n\n def __get_main_contract(self):\n \"\"\"\n 获取主力合约\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['MARKET']\n for mark in ['', '_OI', '_VOL']:\n mark = 'main_contract' + mark\n col = db[mark]\n df = pd.DataFrame(col.find({}, {'_id': 0}))\n df = df.set_index('date').sort_index()\n df.index = pd.to_datetime(df.index)\n self.main_contract_msg[mark] = df\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n\n def get_product_trading_sessions(self, product, date: str=None):\n \"\"\"\n 获取交易的时间窗口\n :param product: 品种\n :param date: 日期, 默认今日,如果是'all',返回全周期的\n :return:\n \"\"\"\n trade_sessions = self.products_base_msg[product]['trading_session']\n if date != 'all':\n date = pd.to_datetime(date) if date else datetime.date.today()\n date = date.strftime('%Y/%m/%d')\n trade_sessions = trade_sessions.loc[(trade_sessions[\n 'DateRange_Start'] <= date) & (trade_sessions[\n 'DateRange_End'] >= date),]\n trade_sessions = trade_sessions\n return trade_sessions\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n\n def get_limit_position(self, symbols):\n \"\"\"\n 获取合约今日的最大持仓限制\n :param symbol: jz格式\n :return:\n \"\"\"\n symbols = symbols if isinstance(symbols, list) else [symbols]\n data_dict = dict()\n for symbol in symbols:\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(symbol).group(0).upper()\n expireDate = self.products_symbol_msg[product][symbol]['ExpireDate'\n ]\n exchange_id = self.products_base_msg[product]['ExchangeID']\n today = datetime.date.today().strftime('%Y-%m-%d')\n if pd.to_datetime(today).strftime('%Y%m%d') >= expireDate:\n data_dict[symbol] = 'expired'\n elif product in ['SA', 'CF', 'SR', 'TA', 'OI', 'MA', 'FG', 'RM',\n 'ZC', 'PM', 'WH', 'RS', 'RI', 'JR', 'LR', 'SF', 'SM', 'CY',\n 'AP']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if product == 'AP' and symbol[-1] == '7':\n product = product + '7'\n if today <= last_change_day:\n limit_dict = {'SA':\n '单边持仓量<20万:20000手, 单边持仓量≥20万:单边持仓量×10%', 'CF':\n '单边持仓量<15万:15000手,单边持仓量≥15万:单边持仓量×10%', 'SR':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'TA':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'OI':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'MA':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'FG':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'RM':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'ZC':\n '单边持仓量<60万:60000手,单边持仓量≥60万:单边持仓量×10%', 'PM': 2000,\n 'WH': 2500, 'RS': 10000, 'RI': 7500, 'JR': 20000,\n 'LR': 20000, 'SF': 8000, 'SM': 30000, 'CY': 5000,\n 'AP': 500, 'AP7': 100}\n elif change_day > today > last_change_day:\n limit_dict = {'SA': 4000, 'CF': 4000, 'SR': 5000, 'TA':\n 10000, 'OI': 3000, 'MA': 2000, 'FG': 5000, 'RM': \n 2000, 'ZC': 20000, 'PM': 600, 'WH': 1000, 'RS': \n 1000, 'RI': 2000, 'JR': 3000, 'LR': 3000, 'SF': \n 2000, 'SM': 10000, 'CY': 500, 'AP': 100, 'AP7': 20}\n else:\n limit_dict = {'SA': 800, 'CF': 800, 'SR': 1000, 'TA': \n 5000, 'OI': 1000, 'MA': 1000, 'FG': 1000, 'RM': \n 1000, 'ZC': 4000, 'PM': 200, 'WH': 300, 'RS': 500,\n 'RI': 400, 'JR': 500, 'LR': 500, 'SF': 500, 'SM': \n 2000, 'CY': 100, 'AP': 10, 'AP7': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CJ']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n lst_lst_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= lst_lst_change_day:\n limit_dict = {'CJ': 300}\n elif last_change_day >= today > lst_lst_change_day:\n limit_dict = {'CJ': 60}\n elif change_day > today > last_change_day:\n limit_dict = {'CJ': 20}\n else:\n limit_dict = {'CJ': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['A', 'V', 'PP', 'C', 'B', 'L', 'P', 'J', 'JM',\n 'I', 'FB', 'BB', 'CS', 'Y', 'M', 'EG']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(14, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today <= last_change_day:\n limit_dict = {'A':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'V':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'PP':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'C':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'B':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'L':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'P':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'J':\n '单边持仓量<=5万:5000手,单边持仓量>5万:单边持仓量×10%', 'JM':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%', 'I':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'FB':\n '单边持仓量<=16万:16000手,单边持仓量>16万:单边持仓量×10%', 'BB':\n '单边持仓量<=6万:6000手,单边持仓量>6万:单边持仓量×10%', 'CS':\n '单边持仓量<=15万:15000手,单边持仓量>15万:单边持仓量×10%', 'Y':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'M':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'EG':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%, 单边持仓>12万:3000手, 提高保证金合约价值×10%'\n }\n elif change_day >= today > last_change_day:\n limit_dict = {'A': 5000, 'V': 5000, 'PP': 5000, 'C': \n 15000, 'B': 4500, 'L': 3000, 'P': 1500, 'J': 900,\n 'JM': 1500, 'I': 6000, 'FB': 400, 'BB': 80, 'CS': \n 4500, 'Y': 3000, 'M': 7500, 'EG':\n '3000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n else:\n limit_dict = {'A': 2500, 'V': 2500, 'PP': 2500, 'C': \n 5000, 'B': 1500, 'L': 1000, 'P': 500, 'J': 300,\n 'JM': 500, 'I': 2000, 'FB': 100, 'BB': 20, 'CS': \n 1500, 'Y': 1000, 'M': 2500, 'EG':\n '1000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n data_dict[symbol] = limit_dict[product]\n elif product in ['JD']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_lst_change_day = self.find_tradeday(1, date=\n lst_lst_month_last_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(10, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today < lst_lst_change_day:\n limit_dict = {'JD': 1200}\n elif last_change_day > today >= lst_lst_change_day:\n limit_dict = {'JD': 400}\n elif change_day > today >= last_change_day:\n limit_dict = {'JD': 120}\n else:\n limit_dict = {'JD': 20}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CU', 'AL', 'ZN', 'PB', 'NI', 'SN', 'RB', 'WR',\n 'HC', 'RU', 'BU', 'AU', 'AG', 'SP']:\n expireDate = pd.to_datetime(expireDate)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n last_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= last_change_day:\n limit_dict = {'CU':\n '单边持仓量<16万:8000, 单边持仓量>=16万:单边持仓量×10%', 'AL':\n '单边持仓量<20万:10000, 单边持仓量>=20万:单边持仓量×10%', 'ZN':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'PB':\n '单边持仓量<10万:5000, 单边持仓量>=10万:单边持仓量×10%', 'NI':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'SN':\n '单边持仓量<3万:1500, 单边持仓量>=3万:单边持仓量×10%', 'RB':\n '单边持仓量<180万:90000, 单边持仓量>=180万:单边持仓量×10%', 'WR':\n '单边持仓量<45万:22500, 单边持仓量>=45万:单边持仓量×10%', 'HC':\n '单边持仓量<240万:120000, 单边持仓量>=240万:单边持仓量×10%', 'RU': \n 500, 'BU': 8000, 'AU': '非期货公司会员:18000, 客户:9000',\n 'AG': '非期货公司会员:18000手, 客户:9000手', 'SP': 4500}\n elif change_day > today > last_change_day:\n limit_dict = {'CU': 3000, 'AL': 3000, 'ZN': 2400, 'PB':\n 1800, 'NI': 1800, 'SN': 600, 'RB': 4500, 'WR': 1800,\n 'HC': 9000, 'RU': 150, 'BU': 1500, 'AU':\n '非期货公司会员:5400, 客户:2700', 'AG':\n '非期货公司会员:5400手, 客户:2700手', 'SP': 900}\n else:\n limit_dict = {'CU': 1000, 'AL': 1000, 'ZN': 800, 'PB': \n 600, 'NI': 600, 'SN': 200, 'RB': 900, 'WR': 360,\n 'HC': 1800, 'RU': 50, 'BU': 500, 'AU':\n '非期货公司会员:1800手, 客户:900手', 'AG':\n '非期货公司会员:1800手, 客户:900手', 'SP': 300}\n data_dict[symbol] = limit_dict[product]\n elif product in ['FU', 'SC']:\n expireDate = pd.to_datetime(expireDate)\n last_change_day = ((expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1) - datetime.timedelta(\n days=1)).replace(day=1).strftime('%Y-%m-%d')\n change_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')\n if today < last_change_day:\n limit_dict = {'FU': 7500, 'SC': 3000}\n elif change_day > today >= last_change_day:\n limit_dict = {'FU': 1500, 'SC': 1500}\n else:\n limit_dict = {'FU': 500, 'SC': 500}\n data_dict[symbol] = limit_dict[product]\n elif product in ['TF', 'TS', 'T']:\n expireDate = pd.to_datetime(expireDate)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today < change_day:\n limit_dict = {'TF': 2000, 'TS': 2000, 'T': 2000}\n else:\n limit_dict = {'TF': 600, 'TS': 600, 'T': 600}\n data_dict[symbol] = limit_dict[product]\n return data_dict\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n\n def get_VolumeMultiple(self, contract_lst=None):\n \"\"\"\n 获取合约单位\n \"\"\"\n info_lst = ['VolumeMultiple']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER, MONGDB_PW, MONGDB_IP)\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex':\n '^[a-zA-Z]{1,2}$'}}))\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex':\n '^[a-zA-Z]+[0-9]+$'}, 'ASSET_TYPE': 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(\n '(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n\n def __get_main_contract(self):\n \"\"\"\n 获取主力合约\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['MARKET']\n for mark in ['', '_OI', '_VOL']:\n mark = 'main_contract' + mark\n col = db[mark]\n df = pd.DataFrame(col.find({}, {'_id': 0}))\n df = df.set_index('date').sort_index()\n df.index = pd.to_datetime(df.index)\n self.main_contract_msg[mark] = df\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n\n def get_product_trading_sessions(self, product, date: str=None):\n \"\"\"\n 获取交易的时间窗口\n :param product: 品种\n :param date: 日期, 默认今日,如果是'all',返回全周期的\n :return:\n \"\"\"\n trade_sessions = self.products_base_msg[product]['trading_session']\n if date != 'all':\n date = pd.to_datetime(date) if date else datetime.date.today()\n date = date.strftime('%Y/%m/%d')\n trade_sessions = trade_sessions.loc[(trade_sessions[\n 'DateRange_Start'] <= date) & (trade_sessions[\n 'DateRange_End'] >= date),]\n trade_sessions = trade_sessions\n return trade_sessions\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n\n def get_limit_position(self, symbols):\n \"\"\"\n 获取合约今日的最大持仓限制\n :param symbol: jz格式\n :return:\n \"\"\"\n symbols = symbols if isinstance(symbols, list) else [symbols]\n data_dict = dict()\n for symbol in symbols:\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(symbol).group(0).upper()\n expireDate = self.products_symbol_msg[product][symbol]['ExpireDate'\n ]\n exchange_id = self.products_base_msg[product]['ExchangeID']\n today = datetime.date.today().strftime('%Y-%m-%d')\n if pd.to_datetime(today).strftime('%Y%m%d') >= expireDate:\n data_dict[symbol] = 'expired'\n elif product in ['SA', 'CF', 'SR', 'TA', 'OI', 'MA', 'FG', 'RM',\n 'ZC', 'PM', 'WH', 'RS', 'RI', 'JR', 'LR', 'SF', 'SM', 'CY',\n 'AP']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if product == 'AP' and symbol[-1] == '7':\n product = product + '7'\n if today <= last_change_day:\n limit_dict = {'SA':\n '单边持仓量<20万:20000手, 单边持仓量≥20万:单边持仓量×10%', 'CF':\n '单边持仓量<15万:15000手,单边持仓量≥15万:单边持仓量×10%', 'SR':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'TA':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'OI':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'MA':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'FG':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'RM':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'ZC':\n '单边持仓量<60万:60000手,单边持仓量≥60万:单边持仓量×10%', 'PM': 2000,\n 'WH': 2500, 'RS': 10000, 'RI': 7500, 'JR': 20000,\n 'LR': 20000, 'SF': 8000, 'SM': 30000, 'CY': 5000,\n 'AP': 500, 'AP7': 100}\n elif change_day > today > last_change_day:\n limit_dict = {'SA': 4000, 'CF': 4000, 'SR': 5000, 'TA':\n 10000, 'OI': 3000, 'MA': 2000, 'FG': 5000, 'RM': \n 2000, 'ZC': 20000, 'PM': 600, 'WH': 1000, 'RS': \n 1000, 'RI': 2000, 'JR': 3000, 'LR': 3000, 'SF': \n 2000, 'SM': 10000, 'CY': 500, 'AP': 100, 'AP7': 20}\n else:\n limit_dict = {'SA': 800, 'CF': 800, 'SR': 1000, 'TA': \n 5000, 'OI': 1000, 'MA': 1000, 'FG': 1000, 'RM': \n 1000, 'ZC': 4000, 'PM': 200, 'WH': 300, 'RS': 500,\n 'RI': 400, 'JR': 500, 'LR': 500, 'SF': 500, 'SM': \n 2000, 'CY': 100, 'AP': 10, 'AP7': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CJ']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n lst_lst_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= lst_lst_change_day:\n limit_dict = {'CJ': 300}\n elif last_change_day >= today > lst_lst_change_day:\n limit_dict = {'CJ': 60}\n elif change_day > today > last_change_day:\n limit_dict = {'CJ': 20}\n else:\n limit_dict = {'CJ': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['A', 'V', 'PP', 'C', 'B', 'L', 'P', 'J', 'JM',\n 'I', 'FB', 'BB', 'CS', 'Y', 'M', 'EG']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(14, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today <= last_change_day:\n limit_dict = {'A':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'V':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'PP':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'C':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'B':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'L':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'P':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'J':\n '单边持仓量<=5万:5000手,单边持仓量>5万:单边持仓量×10%', 'JM':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%', 'I':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'FB':\n '单边持仓量<=16万:16000手,单边持仓量>16万:单边持仓量×10%', 'BB':\n '单边持仓量<=6万:6000手,单边持仓量>6万:单边持仓量×10%', 'CS':\n '单边持仓量<=15万:15000手,单边持仓量>15万:单边持仓量×10%', 'Y':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'M':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'EG':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%, 单边持仓>12万:3000手, 提高保证金合约价值×10%'\n }\n elif change_day >= today > last_change_day:\n limit_dict = {'A': 5000, 'V': 5000, 'PP': 5000, 'C': \n 15000, 'B': 4500, 'L': 3000, 'P': 1500, 'J': 900,\n 'JM': 1500, 'I': 6000, 'FB': 400, 'BB': 80, 'CS': \n 4500, 'Y': 3000, 'M': 7500, 'EG':\n '3000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n else:\n limit_dict = {'A': 2500, 'V': 2500, 'PP': 2500, 'C': \n 5000, 'B': 1500, 'L': 1000, 'P': 500, 'J': 300,\n 'JM': 500, 'I': 2000, 'FB': 100, 'BB': 20, 'CS': \n 1500, 'Y': 1000, 'M': 2500, 'EG':\n '1000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n data_dict[symbol] = limit_dict[product]\n elif product in ['JD']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_lst_change_day = self.find_tradeday(1, date=\n lst_lst_month_last_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(10, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today < lst_lst_change_day:\n limit_dict = {'JD': 1200}\n elif last_change_day > today >= lst_lst_change_day:\n limit_dict = {'JD': 400}\n elif change_day > today >= last_change_day:\n limit_dict = {'JD': 120}\n else:\n limit_dict = {'JD': 20}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CU', 'AL', 'ZN', 'PB', 'NI', 'SN', 'RB', 'WR',\n 'HC', 'RU', 'BU', 'AU', 'AG', 'SP']:\n expireDate = pd.to_datetime(expireDate)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n last_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= last_change_day:\n limit_dict = {'CU':\n '单边持仓量<16万:8000, 单边持仓量>=16万:单边持仓量×10%', 'AL':\n '单边持仓量<20万:10000, 单边持仓量>=20万:单边持仓量×10%', 'ZN':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'PB':\n '单边持仓量<10万:5000, 单边持仓量>=10万:单边持仓量×10%', 'NI':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'SN':\n '单边持仓量<3万:1500, 单边持仓量>=3万:单边持仓量×10%', 'RB':\n '单边持仓量<180万:90000, 单边持仓量>=180万:单边持仓量×10%', 'WR':\n '单边持仓量<45万:22500, 单边持仓量>=45万:单边持仓量×10%', 'HC':\n '单边持仓量<240万:120000, 单边持仓量>=240万:单边持仓量×10%', 'RU': \n 500, 'BU': 8000, 'AU': '非期货公司会员:18000, 客户:9000',\n 'AG': '非期货公司会员:18000手, 客户:9000手', 'SP': 4500}\n elif change_day > today > last_change_day:\n limit_dict = {'CU': 3000, 'AL': 3000, 'ZN': 2400, 'PB':\n 1800, 'NI': 1800, 'SN': 600, 'RB': 4500, 'WR': 1800,\n 'HC': 9000, 'RU': 150, 'BU': 1500, 'AU':\n '非期货公司会员:5400, 客户:2700', 'AG':\n '非期货公司会员:5400手, 客户:2700手', 'SP': 900}\n else:\n limit_dict = {'CU': 1000, 'AL': 1000, 'ZN': 800, 'PB': \n 600, 'NI': 600, 'SN': 200, 'RB': 900, 'WR': 360,\n 'HC': 1800, 'RU': 50, 'BU': 500, 'AU':\n '非期货公司会员:1800手, 客户:900手', 'AG':\n '非期货公司会员:1800手, 客户:900手', 'SP': 300}\n data_dict[symbol] = limit_dict[product]\n elif product in ['FU', 'SC']:\n expireDate = pd.to_datetime(expireDate)\n last_change_day = ((expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1) - datetime.timedelta(\n days=1)).replace(day=1).strftime('%Y-%m-%d')\n change_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')\n if today < last_change_day:\n limit_dict = {'FU': 7500, 'SC': 3000}\n elif change_day > today >= last_change_day:\n limit_dict = {'FU': 1500, 'SC': 1500}\n else:\n limit_dict = {'FU': 500, 'SC': 500}\n data_dict[symbol] = limit_dict[product]\n elif product in ['TF', 'TS', 'T']:\n expireDate = pd.to_datetime(expireDate)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today < change_day:\n limit_dict = {'TF': 2000, 'TS': 2000, 'T': 2000}\n else:\n limit_dict = {'TF': 600, 'TS': 600, 'T': 600}\n data_dict[symbol] = limit_dict[product]\n return data_dict\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER, MONGDB_PW, MONGDB_IP)\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex':\n '^[a-zA-Z]{1,2}$'}}))\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex':\n '^[a-zA-Z]+[0-9]+$'}, 'ASSET_TYPE': 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(\n '(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n\n def __get_main_contract(self):\n \"\"\"\n 获取主力合约\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['MARKET']\n for mark in ['', '_OI', '_VOL']:\n mark = 'main_contract' + mark\n col = db[mark]\n df = pd.DataFrame(col.find({}, {'_id': 0}))\n df = df.set_index('date').sort_index()\n df.index = pd.to_datetime(df.index)\n self.main_contract_msg[mark] = df\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n\n def get_product_trading_sessions(self, product, date: str=None):\n \"\"\"\n 获取交易的时间窗口\n :param product: 品种\n :param date: 日期, 默认今日,如果是'all',返回全周期的\n :return:\n \"\"\"\n trade_sessions = self.products_base_msg[product]['trading_session']\n if date != 'all':\n date = pd.to_datetime(date) if date else datetime.date.today()\n date = date.strftime('%Y/%m/%d')\n trade_sessions = trade_sessions.loc[(trade_sessions[\n 'DateRange_Start'] <= date) & (trade_sessions[\n 'DateRange_End'] >= date),]\n trade_sessions = trade_sessions\n return trade_sessions\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n\n def get_limit_position(self, symbols):\n \"\"\"\n 获取合约今日的最大持仓限制\n :param symbol: jz格式\n :return:\n \"\"\"\n symbols = symbols if isinstance(symbols, list) else [symbols]\n data_dict = dict()\n for symbol in symbols:\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(symbol).group(0).upper()\n expireDate = self.products_symbol_msg[product][symbol]['ExpireDate'\n ]\n exchange_id = self.products_base_msg[product]['ExchangeID']\n today = datetime.date.today().strftime('%Y-%m-%d')\n if pd.to_datetime(today).strftime('%Y%m%d') >= expireDate:\n data_dict[symbol] = 'expired'\n elif product in ['SA', 'CF', 'SR', 'TA', 'OI', 'MA', 'FG', 'RM',\n 'ZC', 'PM', 'WH', 'RS', 'RI', 'JR', 'LR', 'SF', 'SM', 'CY',\n 'AP']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if product == 'AP' and symbol[-1] == '7':\n product = product + '7'\n if today <= last_change_day:\n limit_dict = {'SA':\n '单边持仓量<20万:20000手, 单边持仓量≥20万:单边持仓量×10%', 'CF':\n '单边持仓量<15万:15000手,单边持仓量≥15万:单边持仓量×10%', 'SR':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'TA':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'OI':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'MA':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'FG':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'RM':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'ZC':\n '单边持仓量<60万:60000手,单边持仓量≥60万:单边持仓量×10%', 'PM': 2000,\n 'WH': 2500, 'RS': 10000, 'RI': 7500, 'JR': 20000,\n 'LR': 20000, 'SF': 8000, 'SM': 30000, 'CY': 5000,\n 'AP': 500, 'AP7': 100}\n elif change_day > today > last_change_day:\n limit_dict = {'SA': 4000, 'CF': 4000, 'SR': 5000, 'TA':\n 10000, 'OI': 3000, 'MA': 2000, 'FG': 5000, 'RM': \n 2000, 'ZC': 20000, 'PM': 600, 'WH': 1000, 'RS': \n 1000, 'RI': 2000, 'JR': 3000, 'LR': 3000, 'SF': \n 2000, 'SM': 10000, 'CY': 500, 'AP': 100, 'AP7': 20}\n else:\n limit_dict = {'SA': 800, 'CF': 800, 'SR': 1000, 'TA': \n 5000, 'OI': 1000, 'MA': 1000, 'FG': 1000, 'RM': \n 1000, 'ZC': 4000, 'PM': 200, 'WH': 300, 'RS': 500,\n 'RI': 400, 'JR': 500, 'LR': 500, 'SF': 500, 'SM': \n 2000, 'CY': 100, 'AP': 10, 'AP7': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CJ']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n lst_lst_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= lst_lst_change_day:\n limit_dict = {'CJ': 300}\n elif last_change_day >= today > lst_lst_change_day:\n limit_dict = {'CJ': 60}\n elif change_day > today > last_change_day:\n limit_dict = {'CJ': 20}\n else:\n limit_dict = {'CJ': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['A', 'V', 'PP', 'C', 'B', 'L', 'P', 'J', 'JM',\n 'I', 'FB', 'BB', 'CS', 'Y', 'M', 'EG']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(14, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today <= last_change_day:\n limit_dict = {'A':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'V':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'PP':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'C':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'B':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'L':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'P':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'J':\n '单边持仓量<=5万:5000手,单边持仓量>5万:单边持仓量×10%', 'JM':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%', 'I':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'FB':\n '单边持仓量<=16万:16000手,单边持仓量>16万:单边持仓量×10%', 'BB':\n '单边持仓量<=6万:6000手,单边持仓量>6万:单边持仓量×10%', 'CS':\n '单边持仓量<=15万:15000手,单边持仓量>15万:单边持仓量×10%', 'Y':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'M':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'EG':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%, 单边持仓>12万:3000手, 提高保证金合约价值×10%'\n }\n elif change_day >= today > last_change_day:\n limit_dict = {'A': 5000, 'V': 5000, 'PP': 5000, 'C': \n 15000, 'B': 4500, 'L': 3000, 'P': 1500, 'J': 900,\n 'JM': 1500, 'I': 6000, 'FB': 400, 'BB': 80, 'CS': \n 4500, 'Y': 3000, 'M': 7500, 'EG':\n '3000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n else:\n limit_dict = {'A': 2500, 'V': 2500, 'PP': 2500, 'C': \n 5000, 'B': 1500, 'L': 1000, 'P': 500, 'J': 300,\n 'JM': 500, 'I': 2000, 'FB': 100, 'BB': 20, 'CS': \n 1500, 'Y': 1000, 'M': 2500, 'EG':\n '1000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n data_dict[symbol] = limit_dict[product]\n elif product in ['JD']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_lst_change_day = self.find_tradeday(1, date=\n lst_lst_month_last_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(10, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today < lst_lst_change_day:\n limit_dict = {'JD': 1200}\n elif last_change_day > today >= lst_lst_change_day:\n limit_dict = {'JD': 400}\n elif change_day > today >= last_change_day:\n limit_dict = {'JD': 120}\n else:\n limit_dict = {'JD': 20}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CU', 'AL', 'ZN', 'PB', 'NI', 'SN', 'RB', 'WR',\n 'HC', 'RU', 'BU', 'AU', 'AG', 'SP']:\n expireDate = pd.to_datetime(expireDate)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n last_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= last_change_day:\n limit_dict = {'CU':\n '单边持仓量<16万:8000, 单边持仓量>=16万:单边持仓量×10%', 'AL':\n '单边持仓量<20万:10000, 单边持仓量>=20万:单边持仓量×10%', 'ZN':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'PB':\n '单边持仓量<10万:5000, 单边持仓量>=10万:单边持仓量×10%', 'NI':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'SN':\n '单边持仓量<3万:1500, 单边持仓量>=3万:单边持仓量×10%', 'RB':\n '单边持仓量<180万:90000, 单边持仓量>=180万:单边持仓量×10%', 'WR':\n '单边持仓量<45万:22500, 单边持仓量>=45万:单边持仓量×10%', 'HC':\n '单边持仓量<240万:120000, 单边持仓量>=240万:单边持仓量×10%', 'RU': \n 500, 'BU': 8000, 'AU': '非期货公司会员:18000, 客户:9000',\n 'AG': '非期货公司会员:18000手, 客户:9000手', 'SP': 4500}\n elif change_day > today > last_change_day:\n limit_dict = {'CU': 3000, 'AL': 3000, 'ZN': 2400, 'PB':\n 1800, 'NI': 1800, 'SN': 600, 'RB': 4500, 'WR': 1800,\n 'HC': 9000, 'RU': 150, 'BU': 1500, 'AU':\n '非期货公司会员:5400, 客户:2700', 'AG':\n '非期货公司会员:5400手, 客户:2700手', 'SP': 900}\n else:\n limit_dict = {'CU': 1000, 'AL': 1000, 'ZN': 800, 'PB': \n 600, 'NI': 600, 'SN': 200, 'RB': 900, 'WR': 360,\n 'HC': 1800, 'RU': 50, 'BU': 500, 'AU':\n '非期货公司会员:1800手, 客户:900手', 'AG':\n '非期货公司会员:1800手, 客户:900手', 'SP': 300}\n data_dict[symbol] = limit_dict[product]\n elif product in ['FU', 'SC']:\n expireDate = pd.to_datetime(expireDate)\n last_change_day = ((expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1) - datetime.timedelta(\n days=1)).replace(day=1).strftime('%Y-%m-%d')\n change_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')\n if today < last_change_day:\n limit_dict = {'FU': 7500, 'SC': 3000}\n elif change_day > today >= last_change_day:\n limit_dict = {'FU': 1500, 'SC': 1500}\n else:\n limit_dict = {'FU': 500, 'SC': 500}\n data_dict[symbol] = limit_dict[product]\n elif product in ['TF', 'TS', 'T']:\n expireDate = pd.to_datetime(expireDate)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today < change_day:\n limit_dict = {'TF': 2000, 'TS': 2000, 'T': 2000}\n else:\n limit_dict = {'TF': 600, 'TS': 600, 'T': 600}\n data_dict[symbol] = limit_dict[product]\n return data_dict\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER, MONGDB_PW, MONGDB_IP)\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex':\n '^[a-zA-Z]{1,2}$'}}))\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex':\n '^[a-zA-Z]+[0-9]+$'}, 'ASSET_TYPE': 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(\n '(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n\n def __get_main_contract(self):\n \"\"\"\n 获取主力合约\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['MARKET']\n for mark in ['', '_OI', '_VOL']:\n mark = 'main_contract' + mark\n col = db[mark]\n df = pd.DataFrame(col.find({}, {'_id': 0}))\n df = df.set_index('date').sort_index()\n df.index = pd.to_datetime(df.index)\n self.main_contract_msg[mark] = df\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n <function token>\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n\n def get_limit_position(self, symbols):\n \"\"\"\n 获取合约今日的最大持仓限制\n :param symbol: jz格式\n :return:\n \"\"\"\n symbols = symbols if isinstance(symbols, list) else [symbols]\n data_dict = dict()\n for symbol in symbols:\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(symbol).group(0).upper()\n expireDate = self.products_symbol_msg[product][symbol]['ExpireDate'\n ]\n exchange_id = self.products_base_msg[product]['ExchangeID']\n today = datetime.date.today().strftime('%Y-%m-%d')\n if pd.to_datetime(today).strftime('%Y%m%d') >= expireDate:\n data_dict[symbol] = 'expired'\n elif product in ['SA', 'CF', 'SR', 'TA', 'OI', 'MA', 'FG', 'RM',\n 'ZC', 'PM', 'WH', 'RS', 'RI', 'JR', 'LR', 'SF', 'SM', 'CY',\n 'AP']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if product == 'AP' and symbol[-1] == '7':\n product = product + '7'\n if today <= last_change_day:\n limit_dict = {'SA':\n '单边持仓量<20万:20000手, 单边持仓量≥20万:单边持仓量×10%', 'CF':\n '单边持仓量<15万:15000手,单边持仓量≥15万:单边持仓量×10%', 'SR':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'TA':\n '单边持仓量<25万:25000手,单边持仓量≥25万:单边持仓量×10%', 'OI':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'MA':\n '单边持仓量<10万:10000手,单边持仓量≥10万:单边持仓量×10%', 'FG':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'RM':\n '单边持仓量<20万:20000手,单边持仓量≥20万:单边持仓量×10%', 'ZC':\n '单边持仓量<60万:60000手,单边持仓量≥60万:单边持仓量×10%', 'PM': 2000,\n 'WH': 2500, 'RS': 10000, 'RI': 7500, 'JR': 20000,\n 'LR': 20000, 'SF': 8000, 'SM': 30000, 'CY': 5000,\n 'AP': 500, 'AP7': 100}\n elif change_day > today > last_change_day:\n limit_dict = {'SA': 4000, 'CF': 4000, 'SR': 5000, 'TA':\n 10000, 'OI': 3000, 'MA': 2000, 'FG': 5000, 'RM': \n 2000, 'ZC': 20000, 'PM': 600, 'WH': 1000, 'RS': \n 1000, 'RI': 2000, 'JR': 3000, 'LR': 3000, 'SF': \n 2000, 'SM': 10000, 'CY': 500, 'AP': 100, 'AP7': 20}\n else:\n limit_dict = {'SA': 800, 'CF': 800, 'SR': 1000, 'TA': \n 5000, 'OI': 1000, 'MA': 1000, 'FG': 1000, 'RM': \n 1000, 'ZC': 4000, 'PM': 200, 'WH': 300, 'RS': 500,\n 'RI': 400, 'JR': 500, 'LR': 500, 'SF': 500, 'SM': \n 2000, 'CY': 100, 'AP': 10, 'AP7': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CJ']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n lst_lst_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(15, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= lst_lst_change_day:\n limit_dict = {'CJ': 300}\n elif last_change_day >= today > lst_lst_change_day:\n limit_dict = {'CJ': 60}\n elif change_day > today > last_change_day:\n limit_dict = {'CJ': 20}\n else:\n limit_dict = {'CJ': 6}\n data_dict[symbol] = limit_dict[product]\n elif product in ['A', 'V', 'PP', 'C', 'B', 'L', 'P', 'J', 'JM',\n 'I', 'FB', 'BB', 'CS', 'Y', 'M', 'EG']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n last_change_day = self.find_tradeday(14, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today <= last_change_day:\n limit_dict = {'A':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'V':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'PP':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'C':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'B':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'L':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'P':\n '单边持仓量<=10万:10000手,单边持仓量>10万:单边持仓量×10%', 'J':\n '单边持仓量<=5万:5000手,单边持仓量>5万:单边持仓量×10%', 'JM':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%', 'I':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'FB':\n '单边持仓量<=16万:16000手,单边持仓量>16万:单边持仓量×10%', 'BB':\n '单边持仓量<=6万:6000手,单边持仓量>6万:单边持仓量×10%', 'CS':\n '单边持仓量<=15万:15000手,单边持仓量>15万:单边持仓量×10%', 'Y':\n '单边持仓量<=20万:20000手,单边持仓量>20万:单边持仓量×10%', 'M':\n '单边持仓量<=40万:40000手,单边持仓量>40万:单边持仓量×10%', 'EG':\n '单边持仓量<=8万:8000手,单边持仓量>8万:单边持仓量×10%, 单边持仓>12万:3000手, 提高保证金合约价值×10%'\n }\n elif change_day >= today > last_change_day:\n limit_dict = {'A': 5000, 'V': 5000, 'PP': 5000, 'C': \n 15000, 'B': 4500, 'L': 3000, 'P': 1500, 'J': 900,\n 'JM': 1500, 'I': 6000, 'FB': 400, 'BB': 80, 'CS': \n 4500, 'Y': 3000, 'M': 7500, 'EG':\n '3000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n else:\n limit_dict = {'A': 2500, 'V': 2500, 'PP': 2500, 'C': \n 5000, 'B': 1500, 'L': 1000, 'P': 500, 'J': 300,\n 'JM': 500, 'I': 2000, 'FB': 100, 'BB': 20, 'CS': \n 1500, 'Y': 1000, 'M': 2500, 'EG':\n '1000手,单边持仓>8万:1000手, 提高保证金合约价值×20% '}\n data_dict[symbol] = limit_dict[product]\n elif product in ['JD']:\n expireDate = pd.to_datetime(expireDate)\n lst_lst_month_last_day = (expireDate.replace(day=1) -\n datetime.timedelta(days=1)).replace(day=1\n ) - datetime.timedelta(days=1)\n lst_lst_change_day = self.find_tradeday(1, date=\n lst_lst_month_last_day, exchange=exchange_id)\n last_change_day = self.find_tradeday(10, date=\n lst_lst_month_last_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today < lst_lst_change_day:\n limit_dict = {'JD': 1200}\n elif last_change_day > today >= lst_lst_change_day:\n limit_dict = {'JD': 400}\n elif change_day > today >= last_change_day:\n limit_dict = {'JD': 120}\n else:\n limit_dict = {'JD': 20}\n data_dict[symbol] = limit_dict[product]\n elif product in ['CU', 'AL', 'ZN', 'PB', 'NI', 'SN', 'RB', 'WR',\n 'HC', 'RU', 'BU', 'AU', 'AG', 'SP']:\n expireDate = pd.to_datetime(expireDate)\n lst_month_fst_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1)\n last_change_day = self.find_tradeday(-1, date=\n lst_month_fst_day, exchange=exchange_id)\n change_day = expireDate.replace(day=1).strftime('%Y-%m-%d')\n if today <= last_change_day:\n limit_dict = {'CU':\n '单边持仓量<16万:8000, 单边持仓量>=16万:单边持仓量×10%', 'AL':\n '单边持仓量<20万:10000, 单边持仓量>=20万:单边持仓量×10%', 'ZN':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'PB':\n '单边持仓量<10万:5000, 单边持仓量>=10万:单边持仓量×10%', 'NI':\n '单边持仓量<12万:6000, 单边持仓量>=12万:单边持仓量×10%', 'SN':\n '单边持仓量<3万:1500, 单边持仓量>=3万:单边持仓量×10%', 'RB':\n '单边持仓量<180万:90000, 单边持仓量>=180万:单边持仓量×10%', 'WR':\n '单边持仓量<45万:22500, 单边持仓量>=45万:单边持仓量×10%', 'HC':\n '单边持仓量<240万:120000, 单边持仓量>=240万:单边持仓量×10%', 'RU': \n 500, 'BU': 8000, 'AU': '非期货公司会员:18000, 客户:9000',\n 'AG': '非期货公司会员:18000手, 客户:9000手', 'SP': 4500}\n elif change_day > today > last_change_day:\n limit_dict = {'CU': 3000, 'AL': 3000, 'ZN': 2400, 'PB':\n 1800, 'NI': 1800, 'SN': 600, 'RB': 4500, 'WR': 1800,\n 'HC': 9000, 'RU': 150, 'BU': 1500, 'AU':\n '非期货公司会员:5400, 客户:2700', 'AG':\n '非期货公司会员:5400手, 客户:2700手', 'SP': 900}\n else:\n limit_dict = {'CU': 1000, 'AL': 1000, 'ZN': 800, 'PB': \n 600, 'NI': 600, 'SN': 200, 'RB': 900, 'WR': 360,\n 'HC': 1800, 'RU': 50, 'BU': 500, 'AU':\n '非期货公司会员:1800手, 客户:900手', 'AG':\n '非期货公司会员:1800手, 客户:900手', 'SP': 300}\n data_dict[symbol] = limit_dict[product]\n elif product in ['FU', 'SC']:\n expireDate = pd.to_datetime(expireDate)\n last_change_day = ((expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1) - datetime.timedelta(\n days=1)).replace(day=1).strftime('%Y-%m-%d')\n change_day = (expireDate.replace(day=1) - datetime.\n timedelta(days=1)).replace(day=1).strftime('%Y-%m-%d')\n if today < last_change_day:\n limit_dict = {'FU': 7500, 'SC': 3000}\n elif change_day > today >= last_change_day:\n limit_dict = {'FU': 1500, 'SC': 1500}\n else:\n limit_dict = {'FU': 500, 'SC': 500}\n data_dict[symbol] = limit_dict[product]\n elif product in ['TF', 'TS', 'T']:\n expireDate = pd.to_datetime(expireDate)\n change_day = self.find_tradeday(-1, date=expireDate.replace\n (day=1), exchange=exchange_id)\n if today < change_day:\n limit_dict = {'TF': 2000, 'TS': 2000, 'T': 2000}\n else:\n limit_dict = {'TF': 600, 'TS': 600, 'T': 600}\n data_dict[symbol] = limit_dict[product]\n return data_dict\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER, MONGDB_PW, MONGDB_IP)\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex':\n '^[a-zA-Z]{1,2}$'}}))\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex':\n '^[a-zA-Z]+[0-9]+$'}, 'ASSET_TYPE': 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(\n '(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n\n def __get_main_contract(self):\n \"\"\"\n 获取主力合约\n :return:\n \"\"\"\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['MARKET']\n for mark in ['', '_OI', '_VOL']:\n mark = 'main_contract' + mark\n col = db[mark]\n df = pd.DataFrame(col.find({}, {'_id': 0}))\n df = df.set_index('date').sort_index()\n df.index = pd.to_datetime(df.index)\n self.main_contract_msg[mark] = df\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n <function token>\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def __get_product_mongomsg(self):\n \"\"\"\n 获取mongo里的product数据\n :return:\n \"\"\"\n print(MONGDB_USER, MONGDB_PW, MONGDB_IP)\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n col = m_cl['MARKET']['product']\n df_product = pd.DataFrame(col.find({'ProductID': {'$regex':\n '^[a-zA-Z]{1,2}$'}}))\n self.products = list(df_product['ProductID'])\n self.products.remove('OI')\n df_product.index = df_product['ProductID']\n self.products_base_msg = df_product.T.to_dict()\n col = m_cl['MARKET']['instruments']\n del df_product\n df_symbols = pd.DataFrame(col.find({'symbol': {'$regex':\n '^[a-zA-Z]+[0-9]+$'}, 'ASSET_TYPE': 'Future'}))\n df_symbols['product'] = df_symbols['symbol'].str.extract(\n '(^[a-zA-Z]+)', expand=False).str.upper()\n for product, symbols in df_symbols.groupby('product'):\n symbols.index = symbols['symbol']\n symbols_dict = symbols.T.to_dict()\n self.products_symbol_msg[product] = symbols_dict\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n <function token>\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n\n def get_LongMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['LongMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n <function token>\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_contract_info(self, contract_lst=None, info_lst=None):\n \"\"\"\n 获取主力合约\n :return:\n :info_lst:['product', 'symbol', 'ProductID', 'ExchangeID', 'MaxLimitOrderVolume', 'MinLimitOrderVolume',\n 'MaxMarketOrderVolume', 'MinMarketOrderVolume', 'LongMarginRatio', 'ShortMarginRatio', 'VolumeMultiple',\n 'ExchangeInstID', 'IsTrading']\n \"\"\"\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n <function token>\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n\n def get_jq_code(self, code):\n \"\"\"\n 其他code转jq的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'XDCE', 'CZCE': 'XZCE', 'SHFE': 'XSGE', 'INE':\n 'XINE', 'CFFEX': 'CCFX'}\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n <function token>\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n <function token>\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n <function token>\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n\n def find_tradeday(self, day: int, date=None, exchange: str='DCE'):\n \"\"\"\n 根据date查询距此日day天的交易日\n :param date: None 默认是今日\n :param day: day为0 时为判断今天是否是交易日,返回Bool\n :param exchange:\n :return: date:str\n \"\"\"\n date = pd.to_datetime(datetime.date.today()\n ) if not date else pd.to_datetime(date).replace(hour=0, minute=\n 0, second=0)\n tradeday_df = self.tradedays_msg[exchange]\n if day == 0:\n return date.strftime('%Y-%m-%d') in tradeday_df['Tradedays_str'\n ].values\n if day > 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index > date]\n return tradeday_df.iloc[day - 1]['Tradedays_str']\n if day < 0:\n tradeday_df = tradeday_df.loc[tradeday_df.index < date]\n return tradeday_df.iloc[day]['Tradedays_str']\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n <function token>\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n <function token>\n\n def __get_tradedays(self):\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'SHF', 'CFFEX': 'CFE'}\n with pymongo.MongoClient(\n f'mongodb://{MONGDB_USER}:{MONGDB_PW}@{MONGDB_IP}:27017/') as m_cl:\n db = m_cl['Tradedays']\n for jz_code, mongo_code in change_dict.items():\n col = db[mongo_code]\n tradedays_df = pd.DataFrame(col.find({'isTradingday': True}))\n tradedays_df = tradedays_df[['Tradedays_str', 'Tradedays']]\n tradedays_df['Tradedays'] = pd.to_datetime(tradedays_df[\n 'Tradedays'].dt.strftime('%Y-%m-%d %H:%M:%S'))\n tradedays_df.drop_duplicates(subset=['Tradedays_str'],\n inplace=True)\n tradedays_df.set_index('Tradedays', inplace=True)\n tradedays_df.sort_index(inplace=True)\n self.tradedays_msg[jz_code] = tradedays_df\n <function token>\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n\n def get_ShortMarginRatio(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ShortMarginRatio']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n <function token>\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n\n def get_MaxLimitOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxLimitOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n <function token>\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n <function token>\n\n def get_main_symbol(self, product=None, date=None):\n \"\"\"\n :param product: str 或者list\n :param date:\n :return:\n \"\"\"\n if product:\n product = product if isinstance(product, list) else [product]\n date = pd.to_datetime(date) if date else pd.to_datetime(datetime.\n date.today())\n df = {}\n for symbol in product:\n print(symbol)\n df[symbol] = get_dominant_future(symbol, date)[:-5]\n return df\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n\n def get_MaxMarketOrderVolume(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['MaxMarketOrderVolume']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n\n def get_ExchangeID(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['ExchangeID']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n dict_all[contract] = {i: self.products_symbol_msg[''.join(re.\n split('[^A-Za-z]', contract))][contract][i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n\n def get_PriceTick(self, contract_lst=None):\n \"\"\"\n 获取交易所名称\n \"\"\"\n info_lst = ['PriceTick']\n dict_all = dict()\n contract_lst = [i.upper() for i in contract_lst]\n for contract in contract_lst:\n contract_temp = contract\n if len(contract) > 3:\n contract_temp = contract[:-4]\n dict_all[contract] = {i: self.products_base_msg[contract_temp][\n i] for i in info_lst}\n return dict_all\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_windy_code(self, code):\n \"\"\"\n 其他code转windy的code\n :param code: 需要转的code\n :return:\n \"\"\"\n change_dict = {'DCE': 'DCE', 'CZCE': 'CZC', 'SHFE': 'SHF', 'INE':\n 'INE', 'CFFEX': 'CFE'}\n pattern = re.compile('^[a-zA-Z]{1,2}')\n product = pattern.match(code).group(0).upper()\n exchange_id = self.products_base_msg[product]['ExchangeID']\n if exchange_id is np.NaN:\n return\n windy_code = code + '.' + change_dict[exchange_id]\n return windy_code\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Future:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<class token>\n"
] | false |
98,513 |
b104d230d4979b778ac61630220660a935aa0004
|
from django.contrib import admin
from .models import Post
class PostAdmin(admin.ModelAdmin):
#readonly_fields = ["user","Student","Entreprise","promo","title","introduction","tools","details","tags"]
list_display = ('title', "user","Student","Entreprise",'promo','creating_date','approved')
list_filter = ('promo','approved')
def has_add_permission(self, request):
return True
def has_delete_permission(self, request, obj=None):
return True
admin.site.register(Post,PostAdmin)
#admin.site.register(StudentPost)
|
[
"from django.contrib import admin\r\nfrom .models import Post\r\n\r\nclass PostAdmin(admin.ModelAdmin):\r\n #readonly_fields = [\"user\",\"Student\",\"Entreprise\",\"promo\",\"title\",\"introduction\",\"tools\",\"details\",\"tags\"]\r\n list_display = ('title', \"user\",\"Student\",\"Entreprise\",'promo','creating_date','approved')\r\n list_filter = ('promo','approved')\r\n \r\n def has_add_permission(self, request):\r\n return True\r\n def has_delete_permission(self, request, obj=None):\r\n return True\r\n\r\nadmin.site.register(Post,PostAdmin)\r\n#admin.site.register(StudentPost)",
"from django.contrib import admin\nfrom .models import Post\n\n\nclass PostAdmin(admin.ModelAdmin):\n list_display = ('title', 'user', 'Student', 'Entreprise', 'promo',\n 'creating_date', 'approved')\n list_filter = 'promo', 'approved'\n\n def has_add_permission(self, request):\n return True\n\n def has_delete_permission(self, request, obj=None):\n return True\n\n\nadmin.site.register(Post, PostAdmin)\n",
"<import token>\n\n\nclass PostAdmin(admin.ModelAdmin):\n list_display = ('title', 'user', 'Student', 'Entreprise', 'promo',\n 'creating_date', 'approved')\n list_filter = 'promo', 'approved'\n\n def has_add_permission(self, request):\n return True\n\n def has_delete_permission(self, request, obj=None):\n return True\n\n\nadmin.site.register(Post, PostAdmin)\n",
"<import token>\n\n\nclass PostAdmin(admin.ModelAdmin):\n list_display = ('title', 'user', 'Student', 'Entreprise', 'promo',\n 'creating_date', 'approved')\n list_filter = 'promo', 'approved'\n\n def has_add_permission(self, request):\n return True\n\n def has_delete_permission(self, request, obj=None):\n return True\n\n\n<code token>\n",
"<import token>\n\n\nclass PostAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n\n def has_add_permission(self, request):\n return True\n\n def has_delete_permission(self, request, obj=None):\n return True\n\n\n<code token>\n",
"<import token>\n\n\nclass PostAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n\n def has_add_permission(self, request):\n return True\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass PostAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
98,514 |
75da4c23a4e909f3e273a07fb98cec7c5d15e435
|
""" Core tensorflow model that basically encapsulates all the basic ops
in order to run an experiment.
"""
import os
from absl import logging
import tensorflow as tf
from tensorflow.contrib import tpu
from .core_datamanager_estimator import DataManagerTPU as DataManager
class CoreModelTPU(object):
def __init__(self,
tf_session: tf.Session,
learning_rate: float,
training_dataset: DataManager = None,
validation_dataset: DataManager = None,
output_path: str = '../outputs',
use_tpu: str = False,
tpu_name: list = [],
data_dir= '/mnt/iowa_bucket/cifar10/data/'
):
self.data_dir = data_dir
if output_path[-1] == '/':
output_path = output_path[:-1]
self.output_path = output_path + '/' + self.__class__.__name__
self.session = tf_session
# TODO Get rid of the .datasource thing
self.dataset = {}
# if training_dataset: self.dataset['train'] = training_dataset.datasource
# if validation_dataset: self.dataset['validation'] = validation_dataset.datasource
self.datasource = {}
self.datasource['train'] = training_dataset
self.datasource['validation'] = validation_dataset
self._train_model = True if training_dataset is not None else False
self._validate_model = True if validation_dataset is not None else False
self.learning_rate = learning_rate
self.use_tpu = use_tpu
def define_model(self, data_source: DataManager , mode: str): #pylint: disable=E0202
"""Definition of the model to use. Do not modify the function here
placeholder for the actual definition in `model/` (see example)
Args:
data_source (DataManager): Data manager object for the input data
mode (str): Training and testing? # TODO Properly implement
Raises:
NotImplementedError: Model has to be implemented yet (in a separate instance in model/)
"""
raise NotImplementedError('No model defined.')
def build_model(self):
""" Build the model. """
if self.use_tpu:
self._tpu_build()
else:
self._regular_build()
def _tpu_build(self):
"""Build with TPUEstimators for TPU usage"""
def _define_model(features, labels, mode, params):
data_source = (features, labels)
self.outputs = {}
self.losses = {}
self.otters = {}
outputs, losses, others = self.define_model(data_source, mode)
if mode == tf.estimator.ModeKeys.EVAL:
return tpu.TPUEstimatorSpec(
mode=mode, loss=losses, eval_metrics=others)
if mode == tf.estimator.ModeKeys.PREDICT:
return tpu.TPUEstimatorSpec(
mode=mode, predictions=outputs
)
if mode == tf.estimator.ModeKeys.TRAIN:
self.losses['train'] = losses
self._build_optimizer(tpu_support=True)
if not len(self.optimize_ops) == 1:
logging.error('Implementati Error: More than one optimizer defined')
logging.warning(' [*] Selecting only the first optimizer')
return tpu.TPUEstimatorSpec(
mode=mode, loss=losses[0], train_op=self.optimize_ops[0]
)
tpu_name = ['node-1'] # TODO Bring outside
tpu_iterations = 500 # TODO Bring outside
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu_name)
run_config = tf.contrib.tpu.RunConfig(
model_dir=self.output_path,
cluster=tpu_cluster_resolver,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tpu.TPUConfig(tpu_iterations),
)
self.estimator = tpu.TPUEstimator(
model_fn=_define_model,
use_tpu=True,
train_batch_size=32*4, #self.dataset['train'].batch_size,
eval_batch_size=32*4, #self.dataset['validation'].batch_size,
config=run_config,
params={"data_dir": self.data_dir}
)
def _regular_build(self):
"""Normal build for CPU/GPU usage"""
# This overwrites define_model, is that ok?
self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101
self.define_model,
create_scope_now_=True)
self.outputs = {}
self.losses = {}
self.otters = {}
def _build(mode):
outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)
self.outputs[mode] = outputs
self.losses[mode] = losses
self.otters[mode] = others
if mode == 'train':
self._build_optimizer()
# TODO Move clean and summary to proper section
self.summary_ops = {}
if self._train_model:
_build('train')
summary = []
for idx, loss in enumerate(self.losses['train']):
summary.append(
tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))
for idx, element in enumerate(self.otters['train']):
summary.append(
tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))
self.summary_ops['train'] = tf.summary.merge(summary)
if self._validate_model:
_build('validation')
summary = []
for idx, loss in enumerate(self.losses['validation']):
summary.append(
tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss))
for idx, element in enumerate(self.otters['validation']):
summary.append(
tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))
self.summary_ops['validation'] = tf.summary.merge(summary)
self.writer = tf.summary.FileWriter(self.output_path,
self.session.graph)
self.saver = tf.train.Saver()
# TODO Add routine to save
logging.info('Model construction complete.')
def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer, tpu_support=False):
"""Buids the optimizer(s) to minimize the loss(es) of the model.
Args:
optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which
optimizer to use.
tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard
optimier, required for TPU usage.
"""
self.optimize_ops = []
for loss in self.losses['train']: # TODO Create apropoiate external training scheme
optimize_op = optimizer_to_use(
learning_rate=self.learning_rate
)
if tpu_support:
optimize_op = tpu.CrossShardOptimizer(optimize_op)
optimize_op = optimize_op.minimize(
loss=loss,
global_step=tf.train.get_global_step()
)
self.optimize_ops.append(optimize_op)
logging.info('Optimizers built')
def train(self, steps, input_fn=None):
if self.use_tpu:
self._tpu_train(steps, input_fn)
else:
self._regular_train(steps)
def _tpu_train(self, steps, input_fn):
# def _input_fn(params):
# featuers, labels = self.datasource['train'].input_fn(params['batch_size'])
# return featuers, labels
self.estimator.train(
input_fn=input_fn,
max_steps=steps)
logging.info('Es ist train?')
self.estimator.evaluate(
input_fn=self.dataset['validation'],
steps=steps/50
)
print("\nTest set accuracy: {accuracy:0.3f}\n".format(**eval_result))
def _regular_train(self, steps):
# Initialize or check if checkpoint # TODO add checkpoint manager
self.session.run(tf.global_variables_initializer())
initial_step = self._restore()
fetches = {}
fetches['optimize_ops'] = self.optimize_ops
# fetches['losses'] = self.losses['train']
# if self.otters['train']:
# fetches['others'] = self.otters['train']
fetches['summary_ops'] = self.summary_ops['train']
for step in range(initial_step, steps): # TODO start from checkpoint steps
# TODO clean code and optimize ops
train_out = self.session.run(fetches=fetches)
self.writer.add_summary(train_out['summary_ops'], global_step=step)
if step % 50 == 0: # TODO every how many steps? Automate?
val = self._validate(step)
logging.info('Step {} -- Validation result: {}'.format(step, val))
if step % 1000 == 0: # For now just another arbitrary number (how heavy is saving?)
self._save(step)
logging.info('Done training.')
def _validate(self, global_step):
""" Run network on validation set """
# Todo clean summaries and add example outputs
fetches = {}
fetches['losses'] = self.losses['validation']
if self.otters['train']:
fetches['others'] = self.otters['validation']
fetches['summary_ops'] = self.summary_ops['validation']
validation_out = self.session.run(fetches=fetches)
self.writer.add_summary(validation_out['summary_ops'], global_step=global_step)
del validation_out['summary_ops']
return validation_out
def _save(self, step):
"""Save the model weights.
Args:
step (int): Training step.
"""
output_path = self.output_path + '/checkpoints/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
self.saver.save(self.session, save_path=output_path,global_step=step)
def _restore(self):
"""Restore the trained variables from the last stored checkpoint
Returns:
int: The training step when this model was saved.
"""
output_path = self.output_path + '/checkpoints/'
checkpoint = tf.train.latest_checkpoint(output_path)
if checkpoint:
self.saver.restore(self.session, save_path=checkpoint)
restored_step = int(checkpoint.split('-')[-1]) # Robust enough?
return restored_step
logging.info('Starting training from scratch.')
return 0
def evaluate(self):
pass
|
[
"\"\"\" Core tensorflow model that basically encapsulates all the basic ops\n in order to run an experiment.\n\"\"\"\n\nimport os\nfrom absl import logging\n\nimport tensorflow as tf\nfrom tensorflow.contrib import tpu\n\nfrom .core_datamanager_estimator import DataManagerTPU as DataManager\n\nclass CoreModelTPU(object):\n\n def __init__(self,\n tf_session: tf.Session,\n learning_rate: float,\n training_dataset: DataManager = None,\n validation_dataset: DataManager = None,\n output_path: str = '../outputs',\n use_tpu: str = False,\n tpu_name: list = [],\n data_dir= '/mnt/iowa_bucket/cifar10/data/'\n ):\n self.data_dir = data_dir\n if output_path[-1] == '/':\n output_path = output_path[:-1]\n self.output_path = output_path + '/' + self.__class__.__name__\n\n self.session = tf_session\n\n # TODO Get rid of the .datasource thing\n self.dataset = {}\n # if training_dataset: self.dataset['train'] = training_dataset.datasource\n # if validation_dataset: self.dataset['validation'] = validation_dataset.datasource\n\n self.datasource = {}\n self.datasource['train'] = training_dataset\n self.datasource['validation'] = validation_dataset\n\n self._train_model = True if training_dataset is not None else False\n self._validate_model = True if validation_dataset is not None else False\n self.learning_rate = learning_rate\n\n self.use_tpu = use_tpu\n\n\n def define_model(self, data_source: DataManager , mode: str): #pylint: disable=E0202\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n\n def _tpu_build(self):\n \"\"\"Build with TPUEstimators for TPU usage\"\"\"\n def _define_model(features, labels, mode, params):\n data_source = (features, labels)\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses, eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(\n mode=mode, predictions=outputs\n )\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error('Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses[0], train_op=self.optimize_ops[0]\n )\n\n tpu_name = ['node-1'] # TODO Bring outside\n tpu_iterations = 500 # TODO Bring outside\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n\n run_config = tf.contrib.tpu.RunConfig(\n model_dir=self.output_path,\n cluster=tpu_cluster_resolver,\n session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations),\n )\n\n self.estimator = tpu.TPUEstimator(\n model_fn=_define_model,\n use_tpu=True,\n train_batch_size=32*4, #self.dataset['train'].batch_size,\n eval_batch_size=32*4, #self.dataset['validation'].batch_size,\n config=run_config,\n params={\"data_dir\": self.data_dir}\n )\n\n\n def _regular_build(self):\n \"\"\"Normal build for CPU/GPU usage\"\"\"\n # This overwrites define_model, is that ok?\n self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101\n self.define_model,\n create_scope_now_=True)\n\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n\n def _build(mode):\n outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)\n self.outputs[mode] = outputs\n self.losses[mode] = losses\n self.otters[mode] = others\n if mode == 'train':\n self._build_optimizer()\n\n # TODO Move clean and summary to proper section\n self.summary_ops = {}\n if self._train_model:\n _build('train')\n summary = []\n for idx, loss in enumerate(self.losses['train']):\n summary.append(\n tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['train']):\n summary.append(\n tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))\n self.summary_ops['train'] = tf.summary.merge(summary)\n\n if self._validate_model:\n _build('validation')\n summary = []\n for idx, loss in enumerate(self.losses['validation']):\n summary.append(\n tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['validation']):\n summary.append(\n tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))\n self.summary_ops['validation'] = tf.summary.merge(summary)\n\n self.writer = tf.summary.FileWriter(self.output_path,\n self.session.graph)\n self.saver = tf.train.Saver()\n # TODO Add routine to save\n logging.info('Model construction complete.')\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer, tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']: # TODO Create apropoiate external training scheme\n optimize_op = optimizer_to_use(\n learning_rate=self.learning_rate\n )\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(\n loss=loss,\n global_step=tf.train.get_global_step()\n )\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n\n def train(self, steps, input_fn=None):\n if self.use_tpu:\n self._tpu_train(steps, input_fn)\n else:\n self._regular_train(steps)\n\n def _tpu_train(self, steps, input_fn):\n\n # def _input_fn(params):\n # featuers, labels = self.datasource['train'].input_fn(params['batch_size'])\n # return featuers, labels\n\n self.estimator.train(\n input_fn=input_fn,\n max_steps=steps)\n logging.info('Es ist train?')\n self.estimator.evaluate(\n input_fn=self.dataset['validation'],\n steps=steps/50\n )\n print(\"\\nTest set accuracy: {accuracy:0.3f}\\n\".format(**eval_result))\n\n def _regular_train(self, steps):\n # Initialize or check if checkpoint # TODO add checkpoint manager\n self.session.run(tf.global_variables_initializer())\n initial_step = self._restore()\n\n fetches = {}\n fetches['optimize_ops'] = self.optimize_ops\n # fetches['losses'] = self.losses['train']\n # if self.otters['train']:\n # fetches['others'] = self.otters['train']\n fetches['summary_ops'] = self.summary_ops['train']\n\n for step in range(initial_step, steps): # TODO start from checkpoint steps\n # TODO clean code and optimize ops\n train_out = self.session.run(fetches=fetches)\n self.writer.add_summary(train_out['summary_ops'], global_step=step)\n if step % 50 == 0: # TODO every how many steps? Automate?\n val = self._validate(step)\n logging.info('Step {} -- Validation result: {}'.format(step, val))\n if step % 1000 == 0: # For now just another arbitrary number (how heavy is saving?)\n self._save(step)\n logging.info('Done training.')\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n # Todo clean summaries and add example outputs\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=global_step)\n del validation_out['summary_ops']\n return validation_out\n\n def _save(self, step):\n \"\"\"Save the model weights.\n\n Args:\n step (int): Training step.\n \"\"\"\n\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path,global_step=step)\n\n def _restore(self):\n \"\"\"Restore the trained variables from the last stored checkpoint\n\n Returns:\n int: The training step when this model was saved.\n \"\"\"\n\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1]) # Robust enough?\n return restored_step\n logging.info('Starting training from scratch.')\n return 0\n\n def evaluate(self):\n pass\n",
"<docstring token>\nimport os\nfrom absl import logging\nimport tensorflow as tf\nfrom tensorflow.contrib import tpu\nfrom .core_datamanager_estimator import DataManagerTPU as DataManager\n\n\nclass CoreModelTPU(object):\n\n def __init__(self, tf_session: tf.Session, learning_rate: float,\n training_dataset: DataManager=None, validation_dataset: DataManager\n =None, output_path: str='../outputs', use_tpu: str=False, tpu_name:\n list=[], data_dir='/mnt/iowa_bucket/cifar10/data/'):\n self.data_dir = data_dir\n if output_path[-1] == '/':\n output_path = output_path[:-1]\n self.output_path = output_path + '/' + self.__class__.__name__\n self.session = tf_session\n self.dataset = {}\n self.datasource = {}\n self.datasource['train'] = training_dataset\n self.datasource['validation'] = validation_dataset\n self._train_model = True if training_dataset is not None else False\n self._validate_model = (True if validation_dataset is not None else\n False)\n self.learning_rate = learning_rate\n self.use_tpu = use_tpu\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n\n def _tpu_build(self):\n \"\"\"Build with TPUEstimators for TPU usage\"\"\"\n\n def _define_model(features, labels, mode, params):\n data_source = features, labels\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses,\n eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(mode=mode, predictions=outputs)\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error(\n 'Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses[0],\n train_op=self.optimize_ops[0])\n tpu_name = ['node-1']\n tpu_iterations = 500\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n run_config = tf.contrib.tpu.RunConfig(model_dir=self.output_path,\n cluster=tpu_cluster_resolver, session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations))\n self.estimator = tpu.TPUEstimator(model_fn=_define_model, use_tpu=\n True, train_batch_size=32 * 4, eval_batch_size=32 * 4, config=\n run_config, params={'data_dir': self.data_dir})\n\n def _regular_build(self):\n \"\"\"Normal build for CPU/GPU usage\"\"\"\n self.define_model = tf.make_template(self.define_model.__name__,\n self.define_model, create_scope_now_=True)\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n\n def _build(mode):\n outputs, losses, others = self.define_model(data_source=self.\n dataset[mode], mode=mode)\n self.outputs[mode] = outputs\n self.losses[mode] = losses\n self.otters[mode] = others\n if mode == 'train':\n self._build_optimizer()\n self.summary_ops = {}\n if self._train_model:\n _build('train')\n summary = []\n for idx, loss in enumerate(self.losses['train']):\n summary.append(tf.summary.scalar(name='train/loss_{}'.\n format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['train']):\n summary.append(tf.summary.scalar(name='train/otter_{}'.\n format(idx), tensor=element))\n self.summary_ops['train'] = tf.summary.merge(summary)\n if self._validate_model:\n _build('validation')\n summary = []\n for idx, loss in enumerate(self.losses['validation']):\n summary.append(tf.summary.scalar(name='val/loss_{}'.format(\n idx), tensor=loss))\n for idx, element in enumerate(self.otters['validation']):\n summary.append(tf.summary.scalar(name='val/otter_{}'.format\n (idx), tensor=element))\n self.summary_ops['validation'] = tf.summary.merge(summary)\n self.writer = tf.summary.FileWriter(self.output_path, self.session.\n graph)\n self.saver = tf.train.Saver()\n logging.info('Model construction complete.')\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n\n def train(self, steps, input_fn=None):\n if self.use_tpu:\n self._tpu_train(steps, input_fn)\n else:\n self._regular_train(steps)\n\n def _tpu_train(self, steps, input_fn):\n self.estimator.train(input_fn=input_fn, max_steps=steps)\n logging.info('Es ist train?')\n self.estimator.evaluate(input_fn=self.dataset['validation'], steps=\n steps / 50)\n print('\\nTest set accuracy: {accuracy:0.3f}\\n'.format(**eval_result))\n\n def _regular_train(self, steps):\n self.session.run(tf.global_variables_initializer())\n initial_step = self._restore()\n fetches = {}\n fetches['optimize_ops'] = self.optimize_ops\n fetches['summary_ops'] = self.summary_ops['train']\n for step in range(initial_step, steps):\n train_out = self.session.run(fetches=fetches)\n self.writer.add_summary(train_out['summary_ops'], global_step=step)\n if step % 50 == 0:\n val = self._validate(step)\n logging.info('Step {} -- Validation result: {}'.format(step,\n val))\n if step % 1000 == 0:\n self._save(step)\n logging.info('Done training.')\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n\n def _save(self, step):\n \"\"\"Save the model weights.\n\n Args:\n step (int): Training step.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path, global_step=step)\n\n def _restore(self):\n \"\"\"Restore the trained variables from the last stored checkpoint\n\n Returns:\n int: The training step when this model was saved.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1])\n return restored_step\n logging.info('Starting training from scratch.')\n return 0\n\n def evaluate(self):\n pass\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n\n def __init__(self, tf_session: tf.Session, learning_rate: float,\n training_dataset: DataManager=None, validation_dataset: DataManager\n =None, output_path: str='../outputs', use_tpu: str=False, tpu_name:\n list=[], data_dir='/mnt/iowa_bucket/cifar10/data/'):\n self.data_dir = data_dir\n if output_path[-1] == '/':\n output_path = output_path[:-1]\n self.output_path = output_path + '/' + self.__class__.__name__\n self.session = tf_session\n self.dataset = {}\n self.datasource = {}\n self.datasource['train'] = training_dataset\n self.datasource['validation'] = validation_dataset\n self._train_model = True if training_dataset is not None else False\n self._validate_model = (True if validation_dataset is not None else\n False)\n self.learning_rate = learning_rate\n self.use_tpu = use_tpu\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n\n def _tpu_build(self):\n \"\"\"Build with TPUEstimators for TPU usage\"\"\"\n\n def _define_model(features, labels, mode, params):\n data_source = features, labels\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses,\n eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(mode=mode, predictions=outputs)\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error(\n 'Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses[0],\n train_op=self.optimize_ops[0])\n tpu_name = ['node-1']\n tpu_iterations = 500\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n run_config = tf.contrib.tpu.RunConfig(model_dir=self.output_path,\n cluster=tpu_cluster_resolver, session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations))\n self.estimator = tpu.TPUEstimator(model_fn=_define_model, use_tpu=\n True, train_batch_size=32 * 4, eval_batch_size=32 * 4, config=\n run_config, params={'data_dir': self.data_dir})\n\n def _regular_build(self):\n \"\"\"Normal build for CPU/GPU usage\"\"\"\n self.define_model = tf.make_template(self.define_model.__name__,\n self.define_model, create_scope_now_=True)\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n\n def _build(mode):\n outputs, losses, others = self.define_model(data_source=self.\n dataset[mode], mode=mode)\n self.outputs[mode] = outputs\n self.losses[mode] = losses\n self.otters[mode] = others\n if mode == 'train':\n self._build_optimizer()\n self.summary_ops = {}\n if self._train_model:\n _build('train')\n summary = []\n for idx, loss in enumerate(self.losses['train']):\n summary.append(tf.summary.scalar(name='train/loss_{}'.\n format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['train']):\n summary.append(tf.summary.scalar(name='train/otter_{}'.\n format(idx), tensor=element))\n self.summary_ops['train'] = tf.summary.merge(summary)\n if self._validate_model:\n _build('validation')\n summary = []\n for idx, loss in enumerate(self.losses['validation']):\n summary.append(tf.summary.scalar(name='val/loss_{}'.format(\n idx), tensor=loss))\n for idx, element in enumerate(self.otters['validation']):\n summary.append(tf.summary.scalar(name='val/otter_{}'.format\n (idx), tensor=element))\n self.summary_ops['validation'] = tf.summary.merge(summary)\n self.writer = tf.summary.FileWriter(self.output_path, self.session.\n graph)\n self.saver = tf.train.Saver()\n logging.info('Model construction complete.')\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n\n def train(self, steps, input_fn=None):\n if self.use_tpu:\n self._tpu_train(steps, input_fn)\n else:\n self._regular_train(steps)\n\n def _tpu_train(self, steps, input_fn):\n self.estimator.train(input_fn=input_fn, max_steps=steps)\n logging.info('Es ist train?')\n self.estimator.evaluate(input_fn=self.dataset['validation'], steps=\n steps / 50)\n print('\\nTest set accuracy: {accuracy:0.3f}\\n'.format(**eval_result))\n\n def _regular_train(self, steps):\n self.session.run(tf.global_variables_initializer())\n initial_step = self._restore()\n fetches = {}\n fetches['optimize_ops'] = self.optimize_ops\n fetches['summary_ops'] = self.summary_ops['train']\n for step in range(initial_step, steps):\n train_out = self.session.run(fetches=fetches)\n self.writer.add_summary(train_out['summary_ops'], global_step=step)\n if step % 50 == 0:\n val = self._validate(step)\n logging.info('Step {} -- Validation result: {}'.format(step,\n val))\n if step % 1000 == 0:\n self._save(step)\n logging.info('Done training.')\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n\n def _save(self, step):\n \"\"\"Save the model weights.\n\n Args:\n step (int): Training step.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path, global_step=step)\n\n def _restore(self):\n \"\"\"Restore the trained variables from the last stored checkpoint\n\n Returns:\n int: The training step when this model was saved.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1])\n return restored_step\n logging.info('Starting training from scratch.')\n return 0\n\n def evaluate(self):\n pass\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n\n def __init__(self, tf_session: tf.Session, learning_rate: float,\n training_dataset: DataManager=None, validation_dataset: DataManager\n =None, output_path: str='../outputs', use_tpu: str=False, tpu_name:\n list=[], data_dir='/mnt/iowa_bucket/cifar10/data/'):\n self.data_dir = data_dir\n if output_path[-1] == '/':\n output_path = output_path[:-1]\n self.output_path = output_path + '/' + self.__class__.__name__\n self.session = tf_session\n self.dataset = {}\n self.datasource = {}\n self.datasource['train'] = training_dataset\n self.datasource['validation'] = validation_dataset\n self._train_model = True if training_dataset is not None else False\n self._validate_model = (True if validation_dataset is not None else\n False)\n self.learning_rate = learning_rate\n self.use_tpu = use_tpu\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n\n def _tpu_build(self):\n \"\"\"Build with TPUEstimators for TPU usage\"\"\"\n\n def _define_model(features, labels, mode, params):\n data_source = features, labels\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses,\n eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(mode=mode, predictions=outputs)\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error(\n 'Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses[0],\n train_op=self.optimize_ops[0])\n tpu_name = ['node-1']\n tpu_iterations = 500\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n run_config = tf.contrib.tpu.RunConfig(model_dir=self.output_path,\n cluster=tpu_cluster_resolver, session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations))\n self.estimator = tpu.TPUEstimator(model_fn=_define_model, use_tpu=\n True, train_batch_size=32 * 4, eval_batch_size=32 * 4, config=\n run_config, params={'data_dir': self.data_dir})\n\n def _regular_build(self):\n \"\"\"Normal build for CPU/GPU usage\"\"\"\n self.define_model = tf.make_template(self.define_model.__name__,\n self.define_model, create_scope_now_=True)\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n\n def _build(mode):\n outputs, losses, others = self.define_model(data_source=self.\n dataset[mode], mode=mode)\n self.outputs[mode] = outputs\n self.losses[mode] = losses\n self.otters[mode] = others\n if mode == 'train':\n self._build_optimizer()\n self.summary_ops = {}\n if self._train_model:\n _build('train')\n summary = []\n for idx, loss in enumerate(self.losses['train']):\n summary.append(tf.summary.scalar(name='train/loss_{}'.\n format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['train']):\n summary.append(tf.summary.scalar(name='train/otter_{}'.\n format(idx), tensor=element))\n self.summary_ops['train'] = tf.summary.merge(summary)\n if self._validate_model:\n _build('validation')\n summary = []\n for idx, loss in enumerate(self.losses['validation']):\n summary.append(tf.summary.scalar(name='val/loss_{}'.format(\n idx), tensor=loss))\n for idx, element in enumerate(self.otters['validation']):\n summary.append(tf.summary.scalar(name='val/otter_{}'.format\n (idx), tensor=element))\n self.summary_ops['validation'] = tf.summary.merge(summary)\n self.writer = tf.summary.FileWriter(self.output_path, self.session.\n graph)\n self.saver = tf.train.Saver()\n logging.info('Model construction complete.')\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n\n def train(self, steps, input_fn=None):\n if self.use_tpu:\n self._tpu_train(steps, input_fn)\n else:\n self._regular_train(steps)\n <function token>\n\n def _regular_train(self, steps):\n self.session.run(tf.global_variables_initializer())\n initial_step = self._restore()\n fetches = {}\n fetches['optimize_ops'] = self.optimize_ops\n fetches['summary_ops'] = self.summary_ops['train']\n for step in range(initial_step, steps):\n train_out = self.session.run(fetches=fetches)\n self.writer.add_summary(train_out['summary_ops'], global_step=step)\n if step % 50 == 0:\n val = self._validate(step)\n logging.info('Step {} -- Validation result: {}'.format(step,\n val))\n if step % 1000 == 0:\n self._save(step)\n logging.info('Done training.')\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n\n def _save(self, step):\n \"\"\"Save the model weights.\n\n Args:\n step (int): Training step.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path, global_step=step)\n\n def _restore(self):\n \"\"\"Restore the trained variables from the last stored checkpoint\n\n Returns:\n int: The training step when this model was saved.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1])\n return restored_step\n logging.info('Starting training from scratch.')\n return 0\n\n def evaluate(self):\n pass\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n\n def __init__(self, tf_session: tf.Session, learning_rate: float,\n training_dataset: DataManager=None, validation_dataset: DataManager\n =None, output_path: str='../outputs', use_tpu: str=False, tpu_name:\n list=[], data_dir='/mnt/iowa_bucket/cifar10/data/'):\n self.data_dir = data_dir\n if output_path[-1] == '/':\n output_path = output_path[:-1]\n self.output_path = output_path + '/' + self.__class__.__name__\n self.session = tf_session\n self.dataset = {}\n self.datasource = {}\n self.datasource['train'] = training_dataset\n self.datasource['validation'] = validation_dataset\n self._train_model = True if training_dataset is not None else False\n self._validate_model = (True if validation_dataset is not None else\n False)\n self.learning_rate = learning_rate\n self.use_tpu = use_tpu\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n\n def _tpu_build(self):\n \"\"\"Build with TPUEstimators for TPU usage\"\"\"\n\n def _define_model(features, labels, mode, params):\n data_source = features, labels\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses,\n eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(mode=mode, predictions=outputs)\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error(\n 'Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses[0],\n train_op=self.optimize_ops[0])\n tpu_name = ['node-1']\n tpu_iterations = 500\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n run_config = tf.contrib.tpu.RunConfig(model_dir=self.output_path,\n cluster=tpu_cluster_resolver, session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations))\n self.estimator = tpu.TPUEstimator(model_fn=_define_model, use_tpu=\n True, train_batch_size=32 * 4, eval_batch_size=32 * 4, config=\n run_config, params={'data_dir': self.data_dir})\n\n def _regular_build(self):\n \"\"\"Normal build for CPU/GPU usage\"\"\"\n self.define_model = tf.make_template(self.define_model.__name__,\n self.define_model, create_scope_now_=True)\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n\n def _build(mode):\n outputs, losses, others = self.define_model(data_source=self.\n dataset[mode], mode=mode)\n self.outputs[mode] = outputs\n self.losses[mode] = losses\n self.otters[mode] = others\n if mode == 'train':\n self._build_optimizer()\n self.summary_ops = {}\n if self._train_model:\n _build('train')\n summary = []\n for idx, loss in enumerate(self.losses['train']):\n summary.append(tf.summary.scalar(name='train/loss_{}'.\n format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['train']):\n summary.append(tf.summary.scalar(name='train/otter_{}'.\n format(idx), tensor=element))\n self.summary_ops['train'] = tf.summary.merge(summary)\n if self._validate_model:\n _build('validation')\n summary = []\n for idx, loss in enumerate(self.losses['validation']):\n summary.append(tf.summary.scalar(name='val/loss_{}'.format(\n idx), tensor=loss))\n for idx, element in enumerate(self.otters['validation']):\n summary.append(tf.summary.scalar(name='val/otter_{}'.format\n (idx), tensor=element))\n self.summary_ops['validation'] = tf.summary.merge(summary)\n self.writer = tf.summary.FileWriter(self.output_path, self.session.\n graph)\n self.saver = tf.train.Saver()\n logging.info('Model construction complete.')\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n\n def train(self, steps, input_fn=None):\n if self.use_tpu:\n self._tpu_train(steps, input_fn)\n else:\n self._regular_train(steps)\n <function token>\n\n def _regular_train(self, steps):\n self.session.run(tf.global_variables_initializer())\n initial_step = self._restore()\n fetches = {}\n fetches['optimize_ops'] = self.optimize_ops\n fetches['summary_ops'] = self.summary_ops['train']\n for step in range(initial_step, steps):\n train_out = self.session.run(fetches=fetches)\n self.writer.add_summary(train_out['summary_ops'], global_step=step)\n if step % 50 == 0:\n val = self._validate(step)\n logging.info('Step {} -- Validation result: {}'.format(step,\n val))\n if step % 1000 == 0:\n self._save(step)\n logging.info('Done training.')\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n\n def _save(self, step):\n \"\"\"Save the model weights.\n\n Args:\n step (int): Training step.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path, global_step=step)\n\n def _restore(self):\n \"\"\"Restore the trained variables from the last stored checkpoint\n\n Returns:\n int: The training step when this model was saved.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1])\n return restored_step\n logging.info('Starting training from scratch.')\n return 0\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n\n def __init__(self, tf_session: tf.Session, learning_rate: float,\n training_dataset: DataManager=None, validation_dataset: DataManager\n =None, output_path: str='../outputs', use_tpu: str=False, tpu_name:\n list=[], data_dir='/mnt/iowa_bucket/cifar10/data/'):\n self.data_dir = data_dir\n if output_path[-1] == '/':\n output_path = output_path[:-1]\n self.output_path = output_path + '/' + self.__class__.__name__\n self.session = tf_session\n self.dataset = {}\n self.datasource = {}\n self.datasource['train'] = training_dataset\n self.datasource['validation'] = validation_dataset\n self._train_model = True if training_dataset is not None else False\n self._validate_model = (True if validation_dataset is not None else\n False)\n self.learning_rate = learning_rate\n self.use_tpu = use_tpu\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n\n def _tpu_build(self):\n \"\"\"Build with TPUEstimators for TPU usage\"\"\"\n\n def _define_model(features, labels, mode, params):\n data_source = features, labels\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses,\n eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(mode=mode, predictions=outputs)\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error(\n 'Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses[0],\n train_op=self.optimize_ops[0])\n tpu_name = ['node-1']\n tpu_iterations = 500\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n run_config = tf.contrib.tpu.RunConfig(model_dir=self.output_path,\n cluster=tpu_cluster_resolver, session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations))\n self.estimator = tpu.TPUEstimator(model_fn=_define_model, use_tpu=\n True, train_batch_size=32 * 4, eval_batch_size=32 * 4, config=\n run_config, params={'data_dir': self.data_dir})\n <function token>\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n\n def train(self, steps, input_fn=None):\n if self.use_tpu:\n self._tpu_train(steps, input_fn)\n else:\n self._regular_train(steps)\n <function token>\n\n def _regular_train(self, steps):\n self.session.run(tf.global_variables_initializer())\n initial_step = self._restore()\n fetches = {}\n fetches['optimize_ops'] = self.optimize_ops\n fetches['summary_ops'] = self.summary_ops['train']\n for step in range(initial_step, steps):\n train_out = self.session.run(fetches=fetches)\n self.writer.add_summary(train_out['summary_ops'], global_step=step)\n if step % 50 == 0:\n val = self._validate(step)\n logging.info('Step {} -- Validation result: {}'.format(step,\n val))\n if step % 1000 == 0:\n self._save(step)\n logging.info('Done training.')\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n\n def _save(self, step):\n \"\"\"Save the model weights.\n\n Args:\n step (int): Training step.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path, global_step=step)\n\n def _restore(self):\n \"\"\"Restore the trained variables from the last stored checkpoint\n\n Returns:\n int: The training step when this model was saved.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1])\n return restored_step\n logging.info('Starting training from scratch.')\n return 0\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n\n def __init__(self, tf_session: tf.Session, learning_rate: float,\n training_dataset: DataManager=None, validation_dataset: DataManager\n =None, output_path: str='../outputs', use_tpu: str=False, tpu_name:\n list=[], data_dir='/mnt/iowa_bucket/cifar10/data/'):\n self.data_dir = data_dir\n if output_path[-1] == '/':\n output_path = output_path[:-1]\n self.output_path = output_path + '/' + self.__class__.__name__\n self.session = tf_session\n self.dataset = {}\n self.datasource = {}\n self.datasource['train'] = training_dataset\n self.datasource['validation'] = validation_dataset\n self._train_model = True if training_dataset is not None else False\n self._validate_model = (True if validation_dataset is not None else\n False)\n self.learning_rate = learning_rate\n self.use_tpu = use_tpu\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n\n def _tpu_build(self):\n \"\"\"Build with TPUEstimators for TPU usage\"\"\"\n\n def _define_model(features, labels, mode, params):\n data_source = features, labels\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses,\n eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(mode=mode, predictions=outputs)\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error(\n 'Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses[0],\n train_op=self.optimize_ops[0])\n tpu_name = ['node-1']\n tpu_iterations = 500\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n run_config = tf.contrib.tpu.RunConfig(model_dir=self.output_path,\n cluster=tpu_cluster_resolver, session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations))\n self.estimator = tpu.TPUEstimator(model_fn=_define_model, use_tpu=\n True, train_batch_size=32 * 4, eval_batch_size=32 * 4, config=\n run_config, params={'data_dir': self.data_dir})\n <function token>\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n\n def train(self, steps, input_fn=None):\n if self.use_tpu:\n self._tpu_train(steps, input_fn)\n else:\n self._regular_train(steps)\n <function token>\n <function token>\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n\n def _save(self, step):\n \"\"\"Save the model weights.\n\n Args:\n step (int): Training step.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path, global_step=step)\n\n def _restore(self):\n \"\"\"Restore the trained variables from the last stored checkpoint\n\n Returns:\n int: The training step when this model was saved.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1])\n return restored_step\n logging.info('Starting training from scratch.')\n return 0\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n\n def __init__(self, tf_session: tf.Session, learning_rate: float,\n training_dataset: DataManager=None, validation_dataset: DataManager\n =None, output_path: str='../outputs', use_tpu: str=False, tpu_name:\n list=[], data_dir='/mnt/iowa_bucket/cifar10/data/'):\n self.data_dir = data_dir\n if output_path[-1] == '/':\n output_path = output_path[:-1]\n self.output_path = output_path + '/' + self.__class__.__name__\n self.session = tf_session\n self.dataset = {}\n self.datasource = {}\n self.datasource['train'] = training_dataset\n self.datasource['validation'] = validation_dataset\n self._train_model = True if training_dataset is not None else False\n self._validate_model = (True if validation_dataset is not None else\n False)\n self.learning_rate = learning_rate\n self.use_tpu = use_tpu\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n\n def _tpu_build(self):\n \"\"\"Build with TPUEstimators for TPU usage\"\"\"\n\n def _define_model(features, labels, mode, params):\n data_source = features, labels\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses,\n eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(mode=mode, predictions=outputs)\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error(\n 'Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(mode=mode, loss=losses[0],\n train_op=self.optimize_ops[0])\n tpu_name = ['node-1']\n tpu_iterations = 500\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n run_config = tf.contrib.tpu.RunConfig(model_dir=self.output_path,\n cluster=tpu_cluster_resolver, session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations))\n self.estimator = tpu.TPUEstimator(model_fn=_define_model, use_tpu=\n True, train_batch_size=32 * 4, eval_batch_size=32 * 4, config=\n run_config, params={'data_dir': self.data_dir})\n <function token>\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n <function token>\n <function token>\n <function token>\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n\n def _save(self, step):\n \"\"\"Save the model weights.\n\n Args:\n step (int): Training step.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path, global_step=step)\n\n def _restore(self):\n \"\"\"Restore the trained variables from the last stored checkpoint\n\n Returns:\n int: The training step when this model was saved.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1])\n return restored_step\n logging.info('Starting training from scratch.')\n return 0\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n\n def __init__(self, tf_session: tf.Session, learning_rate: float,\n training_dataset: DataManager=None, validation_dataset: DataManager\n =None, output_path: str='../outputs', use_tpu: str=False, tpu_name:\n list=[], data_dir='/mnt/iowa_bucket/cifar10/data/'):\n self.data_dir = data_dir\n if output_path[-1] == '/':\n output_path = output_path[:-1]\n self.output_path = output_path + '/' + self.__class__.__name__\n self.session = tf_session\n self.dataset = {}\n self.datasource = {}\n self.datasource['train'] = training_dataset\n self.datasource['validation'] = validation_dataset\n self._train_model = True if training_dataset is not None else False\n self._validate_model = (True if validation_dataset is not None else\n False)\n self.learning_rate = learning_rate\n self.use_tpu = use_tpu\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n <function token>\n <function token>\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n <function token>\n <function token>\n <function token>\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n\n def _save(self, step):\n \"\"\"Save the model weights.\n\n Args:\n step (int): Training step.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path, global_step=step)\n\n def _restore(self):\n \"\"\"Restore the trained variables from the last stored checkpoint\n\n Returns:\n int: The training step when this model was saved.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1])\n return restored_step\n logging.info('Starting training from scratch.')\n return 0\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n\n def __init__(self, tf_session: tf.Session, learning_rate: float,\n training_dataset: DataManager=None, validation_dataset: DataManager\n =None, output_path: str='../outputs', use_tpu: str=False, tpu_name:\n list=[], data_dir='/mnt/iowa_bucket/cifar10/data/'):\n self.data_dir = data_dir\n if output_path[-1] == '/':\n output_path = output_path[:-1]\n self.output_path = output_path + '/' + self.__class__.__name__\n self.session = tf_session\n self.dataset = {}\n self.datasource = {}\n self.datasource['train'] = training_dataset\n self.datasource['validation'] = validation_dataset\n self._train_model = True if training_dataset is not None else False\n self._validate_model = (True if validation_dataset is not None else\n False)\n self.learning_rate = learning_rate\n self.use_tpu = use_tpu\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n <function token>\n <function token>\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n <function token>\n <function token>\n <function token>\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n\n def _save(self, step):\n \"\"\"Save the model weights.\n\n Args:\n step (int): Training step.\n \"\"\"\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path, global_step=step)\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n\n def __init__(self, tf_session: tf.Session, learning_rate: float,\n training_dataset: DataManager=None, validation_dataset: DataManager\n =None, output_path: str='../outputs', use_tpu: str=False, tpu_name:\n list=[], data_dir='/mnt/iowa_bucket/cifar10/data/'):\n self.data_dir = data_dir\n if output_path[-1] == '/':\n output_path = output_path[:-1]\n self.output_path = output_path + '/' + self.__class__.__name__\n self.session = tf_session\n self.dataset = {}\n self.datasource = {}\n self.datasource['train'] = training_dataset\n self.datasource['validation'] = validation_dataset\n self._train_model = True if training_dataset is not None else False\n self._validate_model = (True if validation_dataset is not None else\n False)\n self.learning_rate = learning_rate\n self.use_tpu = use_tpu\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n <function token>\n <function token>\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n <function token>\n <function token>\n <function token>\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n <function token>\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n\n def build_model(self):\n \"\"\" Build the model. \"\"\"\n if self.use_tpu:\n self._tpu_build()\n else:\n self._regular_build()\n <function token>\n <function token>\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n <function token>\n <function token>\n <function token>\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n <function token>\n\n def define_model(self, data_source: DataManager, mode: str):\n \"\"\"Definition of the model to use. Do not modify the function here\n placeholder for the actual definition in `model/` (see example)\n\n Args:\n data_source (DataManager): Data manager object for the input data\n mode (str): Training and testing? # TODO Properly implement\n\n Raises:\n NotImplementedError: Model has to be implemented yet (in a separate instance in model/)\n \"\"\"\n raise NotImplementedError('No model defined.')\n <function token>\n <function token>\n <function token>\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n <function token>\n <function token>\n <function token>\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n <function token>\n <function token>\n <function token>\n\n def _validate(self, global_step):\n \"\"\" Run network on validation set \"\"\"\n fetches = {}\n fetches['losses'] = self.losses['validation']\n if self.otters['train']:\n fetches['others'] = self.otters['validation']\n fetches['summary_ops'] = self.summary_ops['validation']\n validation_out = self.session.run(fetches=fetches)\n self.writer.add_summary(validation_out['summary_ops'], global_step=\n global_step)\n del validation_out['summary_ops']\n return validation_out\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer,\n tpu_support=False):\n \"\"\"Buids the optimizer(s) to minimize the loss(es) of the model.\n\n Args:\n optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which\n optimizer to use.\n tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard\n optimier, required for TPU usage.\n \"\"\"\n self.optimize_ops = []\n for loss in self.losses['train']:\n optimize_op = optimizer_to_use(learning_rate=self.learning_rate)\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(loss=loss, global_step=tf.\n train.get_global_step())\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass CoreModelTPU(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
98,515 |
8ebf9626b062ddbae4d014a90fc9adeb52f54a44
|
from collections import deque
# r: 行数 c: 列数
r, c = map(int, input().split())
# スタート地点の座標
sy, sx = map(int, input().split())
# ゴール地点の座標
gy, gx = map(int, input().split())
# 迷路自体(行数だけinputする)
maze = [input() for _ in range(r)]
# 移動を表現
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
# dequeはキューを生成する [スタートy, スタートx, 距離=0]
q = deque([(sy-1, sx-1, 0)])
# メモ用の配列を生成
memo = [[0 for _ in range(c)] for __ in range(r)]
while q:
# popleft() 先頭要素の取り出し, dist: 距離
y, x, dist = q.popleft()
# dx, dy で移動を表現する
for x_, y_ in zip(dx, dy):
# 移動した先が . (=進めるところ)で、かつ、メモになかったら
if maze[y + y_][x + x_] == '.' and memo[y + y_][x + x_] == 0:
# メモに距離を記録
memo[y + y_][x + x_] = dist + 1
# キューに移動後の座標とそこまでの距離を記録
q.append((y + y_, x + x_, dist + 1))
print(memo[gy-1][gx-1])
|
[
"from collections import deque\n\n# r: 行数 c: 列数\nr, c = map(int, input().split())\n\n# スタート地点の座標\nsy, sx = map(int, input().split())\n\n# ゴール地点の座標\ngy, gx = map(int, input().split())\n\n# 迷路自体(行数だけinputする)\nmaze = [input() for _ in range(r)]\n\n# 移動を表現\ndx = [0, 1, 0, -1]\ndy = [1, 0, -1, 0]\n\n\n# dequeはキューを生成する [スタートy, スタートx, 距離=0]\nq = deque([(sy-1, sx-1, 0)])\n\n# メモ用の配列を生成\nmemo = [[0 for _ in range(c)] for __ in range(r)]\n\nwhile q:\n # popleft() 先頭要素の取り出し, dist: 距離\n y, x, dist = q.popleft()\n\n # dx, dy で移動を表現する\n for x_, y_ in zip(dx, dy):\n # 移動した先が . (=進めるところ)で、かつ、メモになかったら\n if maze[y + y_][x + x_] == '.' and memo[y + y_][x + x_] == 0:\n # メモに距離を記録\n memo[y + y_][x + x_] = dist + 1\n # キューに移動後の座標とそこまでの距離を記録\n q.append((y + y_, x + x_, dist + 1))\n\n\nprint(memo[gy-1][gx-1])\n",
"from collections import deque\nr, c = map(int, input().split())\nsy, sx = map(int, input().split())\ngy, gx = map(int, input().split())\nmaze = [input() for _ in range(r)]\ndx = [0, 1, 0, -1]\ndy = [1, 0, -1, 0]\nq = deque([(sy - 1, sx - 1, 0)])\nmemo = [[(0) for _ in range(c)] for __ in range(r)]\nwhile q:\n y, x, dist = q.popleft()\n for x_, y_ in zip(dx, dy):\n if maze[y + y_][x + x_] == '.' and memo[y + y_][x + x_] == 0:\n memo[y + y_][x + x_] = dist + 1\n q.append((y + y_, x + x_, dist + 1))\nprint(memo[gy - 1][gx - 1])\n",
"<import token>\nr, c = map(int, input().split())\nsy, sx = map(int, input().split())\ngy, gx = map(int, input().split())\nmaze = [input() for _ in range(r)]\ndx = [0, 1, 0, -1]\ndy = [1, 0, -1, 0]\nq = deque([(sy - 1, sx - 1, 0)])\nmemo = [[(0) for _ in range(c)] for __ in range(r)]\nwhile q:\n y, x, dist = q.popleft()\n for x_, y_ in zip(dx, dy):\n if maze[y + y_][x + x_] == '.' and memo[y + y_][x + x_] == 0:\n memo[y + y_][x + x_] = dist + 1\n q.append((y + y_, x + x_, dist + 1))\nprint(memo[gy - 1][gx - 1])\n",
"<import token>\n<assignment token>\nwhile q:\n y, x, dist = q.popleft()\n for x_, y_ in zip(dx, dy):\n if maze[y + y_][x + x_] == '.' and memo[y + y_][x + x_] == 0:\n memo[y + y_][x + x_] = dist + 1\n q.append((y + y_, x + x_, dist + 1))\nprint(memo[gy - 1][gx - 1])\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,516 |
6cc7a196e7c9ad261510008b18188830c7ae7790
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 1 17:19:42 2018
@author: jmonroe
This script exists to count area of JJ from SEM images
"""
'''
Open CV's modified watershed algorithm:
Watershed: given a potential landscape one slowly increases a height threshold.
As different local minima are surpassed, area on either side is combined.
Continuing gives a segmentation hierarchy
CV's modification:
Do a bit of filtering for "definite signal" and "definite background"
Enables smoother watershedding (one "flooding event")
'''
#import PIL
import os
import numpy as np
import matplotlib.pyplot as plt
import cv2 as cv
data_dir = "/Users/jmonroe/Projects/machineLearning/areaCounting/data/091718_paramp/"
data_name = "deviceC_JJ10,12.tif"
def my_data(show=False):
## load data
tif_data = cv.imread(data_dir+data_name)
## cut off SEM label
label_width = 64
tif_noLabel = tif_data[:-label_width, :]
left,right = 660, 760
up, down = 785, 810
single_squid = tif_data[up:down, left:left+40]
single_slice = single_squid[:, 20]
cluster_indices, dist = k_means()
if show:
#plt.figure()
#plt.imshow(tif_noLabel)
plt.figure()
plt.imshow(single_squid)
plt.figure()
plt.plot(single_slice)
return single_squid
##END my_data
def calc_distance(xy_tuple,centers_list):
## make a separate function for extensibility
dx = abs(centers_list[:, 0] - xy_tuple[0])
dy = abs(centers_list[:, 1] - xy_tuple[1])
#return np.sqrt( dx**2 + dy**2 )
return dx+dy
##END calc_distance
def k_means(data, k, show=False):
'''
data: (n, dim) array
k: number of clusters
'''
data = np.array(data)
if len(data.shape)>1:
dim = data.shape[1]
else:
dim = 1
numPoints = data.shape[0]
color_list = "rgbcmyk"
num_iter = 6
centers = np.zeros((k,dim))
cluster_counts = np.zeros(k)
cluster_ids_fullList = np.zeros((num_iter+1, numPoints) ,dtype="int")
distance_fullList = np.zeros(num_iter+1)
cluster_indices = np.random.randint(0,k,size=numPoints)
cluster_ids_fullList[0] = cluster_indices
## Initial calculations
# centers
for j,index in enumerate(cluster_indices):
centers[index] += data[j]
cluster_counts[index] += 1
for k_index in range(k):
centers[k_index] /= cluster_counts[k_index]
# figure
if show:
fig = plt.figure()
plt.title("Initial Assignment")
tot_dist = 0
for i,(x,y) in enumerate(data):
plt.scatter(x,y,color=color_list[cluster_indices[i]])
tot_dist += min(calc_distance((0,0), centers))
plt.scatter(centers[:, 0], centers[:, 1], marker='x',s=20,color='k')
distance_fullList[0] = tot_dist
## k-means assignment
for i in range(1,num_iter+1):
## reassign each point to nearest cluster
tot_distance = 0
#print(i, centers[0], centers[1])
for j,(x,y) in enumerate(data):
distances = calc_distance((x,y), centers)
new_cluster_index = np.argmin(distances)
cluster_indices[j] = new_cluster_index
tot_distance += min(distances)
##END data loop
## define clusters
cluster_list = [ [] for j in range(k)]
for j,index in enumerate(cluster_indices):
cluster_list[index].append(data[j])
for j in range(k):
if len(cluster_list[j]): centers[j] = np.mean(cluster_list[j],axis=0)
#print str(i)+ "\t", centers
#print cluster_list[1]
## track progress
distance_fullList[i] = tot_distance
cluster_ids_fullList[i] = cluster_indices
plt.show()
##END iterations
## iteration-wise plots
if show:
for i in range(1,num_iter+1):
plt.figure()
plt.title(str(i)+"th iteration")
for j,(x,y) in enumerate(data):
plt.scatter(x,y,color=color_list[cluster_ids_fullList[i][j]])
plt.scatter(centers[:,0], centers[:,1], marker='x',s=20,color='k')
return cluster_ids_fullList, distance_fullList;
##END k_means
def main():
img = cv.imread("water_coins.jpg")
img = my_data(True)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
## first estimation of noise
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)
# sure background area
sure_bg = cv.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)
ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv.subtract(sure_bg,sure_fg)
# Marker labelling
ret, markers = cv.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv.watershed(img,markers)
img[markers == -1] = [255,0,0]
plt.figure()
plt.imshow(markers)
plt.show()
##
if __name__ == '__main__':
my_data(True)
#main()
'''
kernel = np.ones((3,3),np.uint8)
opening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)
# sure background area
sure_bg = cv.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)
ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv.subtract(sure_bg,sure_fg)
'''
|
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 1 17:19:42 2018\n\n@author: jmonroe\n\nThis script exists to count area of JJ from SEM images\n\"\"\"\n\n\n'''\nOpen CV's modified watershed algorithm: \n Watershed: given a potential landscape one slowly increases a height threshold.\n As different local minima are surpassed, area on either side is combined.\n Continuing gives a segmentation hierarchy\n CV's modification:\n Do a bit of filtering for \"definite signal\" and \"definite background\"\n Enables smoother watershedding (one \"flooding event\")\n'''\n\n#import PIL\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport cv2 as cv\n\n\ndata_dir = \"/Users/jmonroe/Projects/machineLearning/areaCounting/data/091718_paramp/\"\ndata_name = \"deviceC_JJ10,12.tif\"\n\ndef my_data(show=False):\n ## load data\n tif_data = cv.imread(data_dir+data_name)\n\n ## cut off SEM label\n label_width = 64\n tif_noLabel = tif_data[:-label_width, :]\n \n left,right = 660, 760\n up, down = 785, 810\n single_squid = tif_data[up:down, left:left+40]\n single_slice = single_squid[:, 20]\n \n cluster_indices, dist = k_means()\n \n \n if show:\n #plt.figure()\n #plt.imshow(tif_noLabel)\n plt.figure()\n plt.imshow(single_squid)\n plt.figure()\n plt.plot(single_slice)\n return single_squid\n##END my_data\n \n\ndef calc_distance(xy_tuple,centers_list):\n ## make a separate function for extensibility\n dx = abs(centers_list[:, 0] - xy_tuple[0])\n dy = abs(centers_list[:, 1] - xy_tuple[1]) \n #return np.sqrt( dx**2 + dy**2 )\n return dx+dy \n##END calc_distance\n \n\ndef k_means(data, k, show=False):\n '''\n data: (n, dim) array \n k: number of clusters\n '''\n data = np.array(data)\n if len(data.shape)>1:\n dim = data.shape[1]\n else:\n dim = 1\n numPoints = data.shape[0]\n color_list = \"rgbcmyk\"\n \n num_iter = 6\n centers = np.zeros((k,dim))\n cluster_counts = np.zeros(k)\n cluster_ids_fullList = np.zeros((num_iter+1, numPoints) ,dtype=\"int\") \n distance_fullList = np.zeros(num_iter+1)\n \n cluster_indices = np.random.randint(0,k,size=numPoints)\n cluster_ids_fullList[0] = cluster_indices\n\n ## Initial calculations\n # centers\n for j,index in enumerate(cluster_indices):\n centers[index] += data[j]\n cluster_counts[index] += 1\n for k_index in range(k):\n centers[k_index] /= cluster_counts[k_index]\n\n # figure\n if show:\n fig = plt.figure()\n plt.title(\"Initial Assignment\")\n tot_dist = 0\n for i,(x,y) in enumerate(data):\n plt.scatter(x,y,color=color_list[cluster_indices[i]])\n tot_dist += min(calc_distance((0,0), centers))\n plt.scatter(centers[:, 0], centers[:, 1], marker='x',s=20,color='k')\n distance_fullList[0] = tot_dist\n \n ## k-means assignment\n for i in range(1,num_iter+1):\n ## reassign each point to nearest cluster \n tot_distance = 0 \n #print(i, centers[0], centers[1])\n for j,(x,y) in enumerate(data):\n distances = calc_distance((x,y), centers)\n new_cluster_index = np.argmin(distances)\n cluster_indices[j] = new_cluster_index\n tot_distance += min(distances)\n ##END data loop\n \n ## define clusters\n cluster_list = [ [] for j in range(k)]\n for j,index in enumerate(cluster_indices):\n cluster_list[index].append(data[j])\n for j in range(k):\n if len(cluster_list[j]): centers[j] = np.mean(cluster_list[j],axis=0)\n #print str(i)+ \"\\t\", centers\n #print cluster_list[1]\n ## track progress\n distance_fullList[i] = tot_distance\n cluster_ids_fullList[i] = cluster_indices\n plt.show()\n ##END iterations\n \n ## iteration-wise plots\n if show:\n for i in range(1,num_iter+1):\n plt.figure()\n plt.title(str(i)+\"th iteration\")\n for j,(x,y) in enumerate(data):\n plt.scatter(x,y,color=color_list[cluster_ids_fullList[i][j]])\n plt.scatter(centers[:,0], centers[:,1], marker='x',s=20,color='k')\n \n return cluster_ids_fullList, distance_fullList;\n##END k_means\n \n \ndef main():\n img = cv.imread(\"water_coins.jpg\")\n img = my_data(True)\n \n gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\n ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)\n \n ## first estimation of noise\n \n \n # noise removal\n kernel = np.ones((3,3),np.uint8)\n opening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)\n # sure background area\n sure_bg = cv.dilate(opening,kernel,iterations=3)\n # Finding sure foreground area\n dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)\n ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)\n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv.subtract(sure_bg,sure_fg)\n\n \n # Marker labelling\n ret, markers = cv.connectedComponents(sure_fg)\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers+1\n # Now, mark the region of unknown with zero\n markers[unknown==255] = 0\n \n markers = cv.watershed(img,markers)\n img[markers == -1] = [255,0,0]\n \n plt.figure()\n plt.imshow(markers)\n plt.show()\n##\n \nif __name__ == '__main__':\n my_data(True)\n #main()\n\n'''\nkernel = np.ones((3,3),np.uint8)\nopening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)\n# sure background area\nsure_bg = cv.dilate(opening,kernel,iterations=3)\n# Finding sure foreground area\ndist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)\nret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)\n# Finding unknown region\nsure_fg = np.uint8(sure_fg)\nunknown = cv.subtract(sure_bg,sure_fg)\n'''\n\n\n\n\n\n\n\n\n",
"<docstring token>\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2 as cv\ndata_dir = (\n '/Users/jmonroe/Projects/machineLearning/areaCounting/data/091718_paramp/')\ndata_name = 'deviceC_JJ10,12.tif'\n\n\ndef my_data(show=False):\n tif_data = cv.imread(data_dir + data_name)\n label_width = 64\n tif_noLabel = tif_data[:-label_width, :]\n left, right = 660, 760\n up, down = 785, 810\n single_squid = tif_data[up:down, left:left + 40]\n single_slice = single_squid[:, 20]\n cluster_indices, dist = k_means()\n if show:\n plt.figure()\n plt.imshow(single_squid)\n plt.figure()\n plt.plot(single_slice)\n return single_squid\n\n\ndef calc_distance(xy_tuple, centers_list):\n dx = abs(centers_list[:, 0] - xy_tuple[0])\n dy = abs(centers_list[:, 1] - xy_tuple[1])\n return dx + dy\n\n\ndef k_means(data, k, show=False):\n \"\"\"\n data: (n, dim) array \n k: number of clusters\n \"\"\"\n data = np.array(data)\n if len(data.shape) > 1:\n dim = data.shape[1]\n else:\n dim = 1\n numPoints = data.shape[0]\n color_list = 'rgbcmyk'\n num_iter = 6\n centers = np.zeros((k, dim))\n cluster_counts = np.zeros(k)\n cluster_ids_fullList = np.zeros((num_iter + 1, numPoints), dtype='int')\n distance_fullList = np.zeros(num_iter + 1)\n cluster_indices = np.random.randint(0, k, size=numPoints)\n cluster_ids_fullList[0] = cluster_indices\n for j, index in enumerate(cluster_indices):\n centers[index] += data[j]\n cluster_counts[index] += 1\n for k_index in range(k):\n centers[k_index] /= cluster_counts[k_index]\n if show:\n fig = plt.figure()\n plt.title('Initial Assignment')\n tot_dist = 0\n for i, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_indices[i]])\n tot_dist += min(calc_distance((0, 0), centers))\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20, color='k')\n distance_fullList[0] = tot_dist\n for i in range(1, num_iter + 1):\n tot_distance = 0\n for j, (x, y) in enumerate(data):\n distances = calc_distance((x, y), centers)\n new_cluster_index = np.argmin(distances)\n cluster_indices[j] = new_cluster_index\n tot_distance += min(distances)\n cluster_list = [[] for j in range(k)]\n for j, index in enumerate(cluster_indices):\n cluster_list[index].append(data[j])\n for j in range(k):\n if len(cluster_list[j]):\n centers[j] = np.mean(cluster_list[j], axis=0)\n distance_fullList[i] = tot_distance\n cluster_ids_fullList[i] = cluster_indices\n plt.show()\n if show:\n for i in range(1, num_iter + 1):\n plt.figure()\n plt.title(str(i) + 'th iteration')\n for j, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_ids_fullList[i][j]])\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20,\n color='k')\n return cluster_ids_fullList, distance_fullList\n\n\ndef main():\n img = cv.imread('water_coins.jpg')\n img = my_data(True)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.\n THRESH_OTSU)\n kernel = np.ones((3, 3), np.uint8)\n opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)\n sure_bg = cv.dilate(opening, kernel, iterations=3)\n dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)\n ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(),\n 255, 0)\n sure_fg = np.uint8(sure_fg)\n unknown = cv.subtract(sure_bg, sure_fg)\n ret, markers = cv.connectedComponents(sure_fg)\n markers = markers + 1\n markers[unknown == 255] = 0\n markers = cv.watershed(img, markers)\n img[markers == -1] = [255, 0, 0]\n plt.figure()\n plt.imshow(markers)\n plt.show()\n\n\nif __name__ == '__main__':\n my_data(True)\n<docstring token>\n",
"<docstring token>\n<import token>\ndata_dir = (\n '/Users/jmonroe/Projects/machineLearning/areaCounting/data/091718_paramp/')\ndata_name = 'deviceC_JJ10,12.tif'\n\n\ndef my_data(show=False):\n tif_data = cv.imread(data_dir + data_name)\n label_width = 64\n tif_noLabel = tif_data[:-label_width, :]\n left, right = 660, 760\n up, down = 785, 810\n single_squid = tif_data[up:down, left:left + 40]\n single_slice = single_squid[:, 20]\n cluster_indices, dist = k_means()\n if show:\n plt.figure()\n plt.imshow(single_squid)\n plt.figure()\n plt.plot(single_slice)\n return single_squid\n\n\ndef calc_distance(xy_tuple, centers_list):\n dx = abs(centers_list[:, 0] - xy_tuple[0])\n dy = abs(centers_list[:, 1] - xy_tuple[1])\n return dx + dy\n\n\ndef k_means(data, k, show=False):\n \"\"\"\n data: (n, dim) array \n k: number of clusters\n \"\"\"\n data = np.array(data)\n if len(data.shape) > 1:\n dim = data.shape[1]\n else:\n dim = 1\n numPoints = data.shape[0]\n color_list = 'rgbcmyk'\n num_iter = 6\n centers = np.zeros((k, dim))\n cluster_counts = np.zeros(k)\n cluster_ids_fullList = np.zeros((num_iter + 1, numPoints), dtype='int')\n distance_fullList = np.zeros(num_iter + 1)\n cluster_indices = np.random.randint(0, k, size=numPoints)\n cluster_ids_fullList[0] = cluster_indices\n for j, index in enumerate(cluster_indices):\n centers[index] += data[j]\n cluster_counts[index] += 1\n for k_index in range(k):\n centers[k_index] /= cluster_counts[k_index]\n if show:\n fig = plt.figure()\n plt.title('Initial Assignment')\n tot_dist = 0\n for i, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_indices[i]])\n tot_dist += min(calc_distance((0, 0), centers))\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20, color='k')\n distance_fullList[0] = tot_dist\n for i in range(1, num_iter + 1):\n tot_distance = 0\n for j, (x, y) in enumerate(data):\n distances = calc_distance((x, y), centers)\n new_cluster_index = np.argmin(distances)\n cluster_indices[j] = new_cluster_index\n tot_distance += min(distances)\n cluster_list = [[] for j in range(k)]\n for j, index in enumerate(cluster_indices):\n cluster_list[index].append(data[j])\n for j in range(k):\n if len(cluster_list[j]):\n centers[j] = np.mean(cluster_list[j], axis=0)\n distance_fullList[i] = tot_distance\n cluster_ids_fullList[i] = cluster_indices\n plt.show()\n if show:\n for i in range(1, num_iter + 1):\n plt.figure()\n plt.title(str(i) + 'th iteration')\n for j, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_ids_fullList[i][j]])\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20,\n color='k')\n return cluster_ids_fullList, distance_fullList\n\n\ndef main():\n img = cv.imread('water_coins.jpg')\n img = my_data(True)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.\n THRESH_OTSU)\n kernel = np.ones((3, 3), np.uint8)\n opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)\n sure_bg = cv.dilate(opening, kernel, iterations=3)\n dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)\n ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(),\n 255, 0)\n sure_fg = np.uint8(sure_fg)\n unknown = cv.subtract(sure_bg, sure_fg)\n ret, markers = cv.connectedComponents(sure_fg)\n markers = markers + 1\n markers[unknown == 255] = 0\n markers = cv.watershed(img, markers)\n img[markers == -1] = [255, 0, 0]\n plt.figure()\n plt.imshow(markers)\n plt.show()\n\n\nif __name__ == '__main__':\n my_data(True)\n<docstring token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef my_data(show=False):\n tif_data = cv.imread(data_dir + data_name)\n label_width = 64\n tif_noLabel = tif_data[:-label_width, :]\n left, right = 660, 760\n up, down = 785, 810\n single_squid = tif_data[up:down, left:left + 40]\n single_slice = single_squid[:, 20]\n cluster_indices, dist = k_means()\n if show:\n plt.figure()\n plt.imshow(single_squid)\n plt.figure()\n plt.plot(single_slice)\n return single_squid\n\n\ndef calc_distance(xy_tuple, centers_list):\n dx = abs(centers_list[:, 0] - xy_tuple[0])\n dy = abs(centers_list[:, 1] - xy_tuple[1])\n return dx + dy\n\n\ndef k_means(data, k, show=False):\n \"\"\"\n data: (n, dim) array \n k: number of clusters\n \"\"\"\n data = np.array(data)\n if len(data.shape) > 1:\n dim = data.shape[1]\n else:\n dim = 1\n numPoints = data.shape[0]\n color_list = 'rgbcmyk'\n num_iter = 6\n centers = np.zeros((k, dim))\n cluster_counts = np.zeros(k)\n cluster_ids_fullList = np.zeros((num_iter + 1, numPoints), dtype='int')\n distance_fullList = np.zeros(num_iter + 1)\n cluster_indices = np.random.randint(0, k, size=numPoints)\n cluster_ids_fullList[0] = cluster_indices\n for j, index in enumerate(cluster_indices):\n centers[index] += data[j]\n cluster_counts[index] += 1\n for k_index in range(k):\n centers[k_index] /= cluster_counts[k_index]\n if show:\n fig = plt.figure()\n plt.title('Initial Assignment')\n tot_dist = 0\n for i, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_indices[i]])\n tot_dist += min(calc_distance((0, 0), centers))\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20, color='k')\n distance_fullList[0] = tot_dist\n for i in range(1, num_iter + 1):\n tot_distance = 0\n for j, (x, y) in enumerate(data):\n distances = calc_distance((x, y), centers)\n new_cluster_index = np.argmin(distances)\n cluster_indices[j] = new_cluster_index\n tot_distance += min(distances)\n cluster_list = [[] for j in range(k)]\n for j, index in enumerate(cluster_indices):\n cluster_list[index].append(data[j])\n for j in range(k):\n if len(cluster_list[j]):\n centers[j] = np.mean(cluster_list[j], axis=0)\n distance_fullList[i] = tot_distance\n cluster_ids_fullList[i] = cluster_indices\n plt.show()\n if show:\n for i in range(1, num_iter + 1):\n plt.figure()\n plt.title(str(i) + 'th iteration')\n for j, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_ids_fullList[i][j]])\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20,\n color='k')\n return cluster_ids_fullList, distance_fullList\n\n\ndef main():\n img = cv.imread('water_coins.jpg')\n img = my_data(True)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.\n THRESH_OTSU)\n kernel = np.ones((3, 3), np.uint8)\n opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)\n sure_bg = cv.dilate(opening, kernel, iterations=3)\n dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)\n ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(),\n 255, 0)\n sure_fg = np.uint8(sure_fg)\n unknown = cv.subtract(sure_bg, sure_fg)\n ret, markers = cv.connectedComponents(sure_fg)\n markers = markers + 1\n markers[unknown == 255] = 0\n markers = cv.watershed(img, markers)\n img[markers == -1] = [255, 0, 0]\n plt.figure()\n plt.imshow(markers)\n plt.show()\n\n\nif __name__ == '__main__':\n my_data(True)\n<docstring token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef my_data(show=False):\n tif_data = cv.imread(data_dir + data_name)\n label_width = 64\n tif_noLabel = tif_data[:-label_width, :]\n left, right = 660, 760\n up, down = 785, 810\n single_squid = tif_data[up:down, left:left + 40]\n single_slice = single_squid[:, 20]\n cluster_indices, dist = k_means()\n if show:\n plt.figure()\n plt.imshow(single_squid)\n plt.figure()\n plt.plot(single_slice)\n return single_squid\n\n\ndef calc_distance(xy_tuple, centers_list):\n dx = abs(centers_list[:, 0] - xy_tuple[0])\n dy = abs(centers_list[:, 1] - xy_tuple[1])\n return dx + dy\n\n\ndef k_means(data, k, show=False):\n \"\"\"\n data: (n, dim) array \n k: number of clusters\n \"\"\"\n data = np.array(data)\n if len(data.shape) > 1:\n dim = data.shape[1]\n else:\n dim = 1\n numPoints = data.shape[0]\n color_list = 'rgbcmyk'\n num_iter = 6\n centers = np.zeros((k, dim))\n cluster_counts = np.zeros(k)\n cluster_ids_fullList = np.zeros((num_iter + 1, numPoints), dtype='int')\n distance_fullList = np.zeros(num_iter + 1)\n cluster_indices = np.random.randint(0, k, size=numPoints)\n cluster_ids_fullList[0] = cluster_indices\n for j, index in enumerate(cluster_indices):\n centers[index] += data[j]\n cluster_counts[index] += 1\n for k_index in range(k):\n centers[k_index] /= cluster_counts[k_index]\n if show:\n fig = plt.figure()\n plt.title('Initial Assignment')\n tot_dist = 0\n for i, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_indices[i]])\n tot_dist += min(calc_distance((0, 0), centers))\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20, color='k')\n distance_fullList[0] = tot_dist\n for i in range(1, num_iter + 1):\n tot_distance = 0\n for j, (x, y) in enumerate(data):\n distances = calc_distance((x, y), centers)\n new_cluster_index = np.argmin(distances)\n cluster_indices[j] = new_cluster_index\n tot_distance += min(distances)\n cluster_list = [[] for j in range(k)]\n for j, index in enumerate(cluster_indices):\n cluster_list[index].append(data[j])\n for j in range(k):\n if len(cluster_list[j]):\n centers[j] = np.mean(cluster_list[j], axis=0)\n distance_fullList[i] = tot_distance\n cluster_ids_fullList[i] = cluster_indices\n plt.show()\n if show:\n for i in range(1, num_iter + 1):\n plt.figure()\n plt.title(str(i) + 'th iteration')\n for j, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_ids_fullList[i][j]])\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20,\n color='k')\n return cluster_ids_fullList, distance_fullList\n\n\ndef main():\n img = cv.imread('water_coins.jpg')\n img = my_data(True)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.\n THRESH_OTSU)\n kernel = np.ones((3, 3), np.uint8)\n opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)\n sure_bg = cv.dilate(opening, kernel, iterations=3)\n dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)\n ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(),\n 255, 0)\n sure_fg = np.uint8(sure_fg)\n unknown = cv.subtract(sure_bg, sure_fg)\n ret, markers = cv.connectedComponents(sure_fg)\n markers = markers + 1\n markers[unknown == 255] = 0\n markers = cv.watershed(img, markers)\n img[markers == -1] = [255, 0, 0]\n plt.figure()\n plt.imshow(markers)\n plt.show()\n\n\n<code token>\n<docstring token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef calc_distance(xy_tuple, centers_list):\n dx = abs(centers_list[:, 0] - xy_tuple[0])\n dy = abs(centers_list[:, 1] - xy_tuple[1])\n return dx + dy\n\n\ndef k_means(data, k, show=False):\n \"\"\"\n data: (n, dim) array \n k: number of clusters\n \"\"\"\n data = np.array(data)\n if len(data.shape) > 1:\n dim = data.shape[1]\n else:\n dim = 1\n numPoints = data.shape[0]\n color_list = 'rgbcmyk'\n num_iter = 6\n centers = np.zeros((k, dim))\n cluster_counts = np.zeros(k)\n cluster_ids_fullList = np.zeros((num_iter + 1, numPoints), dtype='int')\n distance_fullList = np.zeros(num_iter + 1)\n cluster_indices = np.random.randint(0, k, size=numPoints)\n cluster_ids_fullList[0] = cluster_indices\n for j, index in enumerate(cluster_indices):\n centers[index] += data[j]\n cluster_counts[index] += 1\n for k_index in range(k):\n centers[k_index] /= cluster_counts[k_index]\n if show:\n fig = plt.figure()\n plt.title('Initial Assignment')\n tot_dist = 0\n for i, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_indices[i]])\n tot_dist += min(calc_distance((0, 0), centers))\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20, color='k')\n distance_fullList[0] = tot_dist\n for i in range(1, num_iter + 1):\n tot_distance = 0\n for j, (x, y) in enumerate(data):\n distances = calc_distance((x, y), centers)\n new_cluster_index = np.argmin(distances)\n cluster_indices[j] = new_cluster_index\n tot_distance += min(distances)\n cluster_list = [[] for j in range(k)]\n for j, index in enumerate(cluster_indices):\n cluster_list[index].append(data[j])\n for j in range(k):\n if len(cluster_list[j]):\n centers[j] = np.mean(cluster_list[j], axis=0)\n distance_fullList[i] = tot_distance\n cluster_ids_fullList[i] = cluster_indices\n plt.show()\n if show:\n for i in range(1, num_iter + 1):\n plt.figure()\n plt.title(str(i) + 'th iteration')\n for j, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_ids_fullList[i][j]])\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20,\n color='k')\n return cluster_ids_fullList, distance_fullList\n\n\ndef main():\n img = cv.imread('water_coins.jpg')\n img = my_data(True)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.\n THRESH_OTSU)\n kernel = np.ones((3, 3), np.uint8)\n opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)\n sure_bg = cv.dilate(opening, kernel, iterations=3)\n dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)\n ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(),\n 255, 0)\n sure_fg = np.uint8(sure_fg)\n unknown = cv.subtract(sure_bg, sure_fg)\n ret, markers = cv.connectedComponents(sure_fg)\n markers = markers + 1\n markers[unknown == 255] = 0\n markers = cv.watershed(img, markers)\n img[markers == -1] = [255, 0, 0]\n plt.figure()\n plt.imshow(markers)\n plt.show()\n\n\n<code token>\n<docstring token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef k_means(data, k, show=False):\n \"\"\"\n data: (n, dim) array \n k: number of clusters\n \"\"\"\n data = np.array(data)\n if len(data.shape) > 1:\n dim = data.shape[1]\n else:\n dim = 1\n numPoints = data.shape[0]\n color_list = 'rgbcmyk'\n num_iter = 6\n centers = np.zeros((k, dim))\n cluster_counts = np.zeros(k)\n cluster_ids_fullList = np.zeros((num_iter + 1, numPoints), dtype='int')\n distance_fullList = np.zeros(num_iter + 1)\n cluster_indices = np.random.randint(0, k, size=numPoints)\n cluster_ids_fullList[0] = cluster_indices\n for j, index in enumerate(cluster_indices):\n centers[index] += data[j]\n cluster_counts[index] += 1\n for k_index in range(k):\n centers[k_index] /= cluster_counts[k_index]\n if show:\n fig = plt.figure()\n plt.title('Initial Assignment')\n tot_dist = 0\n for i, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_indices[i]])\n tot_dist += min(calc_distance((0, 0), centers))\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20, color='k')\n distance_fullList[0] = tot_dist\n for i in range(1, num_iter + 1):\n tot_distance = 0\n for j, (x, y) in enumerate(data):\n distances = calc_distance((x, y), centers)\n new_cluster_index = np.argmin(distances)\n cluster_indices[j] = new_cluster_index\n tot_distance += min(distances)\n cluster_list = [[] for j in range(k)]\n for j, index in enumerate(cluster_indices):\n cluster_list[index].append(data[j])\n for j in range(k):\n if len(cluster_list[j]):\n centers[j] = np.mean(cluster_list[j], axis=0)\n distance_fullList[i] = tot_distance\n cluster_ids_fullList[i] = cluster_indices\n plt.show()\n if show:\n for i in range(1, num_iter + 1):\n plt.figure()\n plt.title(str(i) + 'th iteration')\n for j, (x, y) in enumerate(data):\n plt.scatter(x, y, color=color_list[cluster_ids_fullList[i][j]])\n plt.scatter(centers[:, 0], centers[:, 1], marker='x', s=20,\n color='k')\n return cluster_ids_fullList, distance_fullList\n\n\ndef main():\n img = cv.imread('water_coins.jpg')\n img = my_data(True)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.\n THRESH_OTSU)\n kernel = np.ones((3, 3), np.uint8)\n opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)\n sure_bg = cv.dilate(opening, kernel, iterations=3)\n dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)\n ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(),\n 255, 0)\n sure_fg = np.uint8(sure_fg)\n unknown = cv.subtract(sure_bg, sure_fg)\n ret, markers = cv.connectedComponents(sure_fg)\n markers = markers + 1\n markers[unknown == 255] = 0\n markers = cv.watershed(img, markers)\n img[markers == -1] = [255, 0, 0]\n plt.figure()\n plt.imshow(markers)\n plt.show()\n\n\n<code token>\n<docstring token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef main():\n img = cv.imread('water_coins.jpg')\n img = my_data(True)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.\n THRESH_OTSU)\n kernel = np.ones((3, 3), np.uint8)\n opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)\n sure_bg = cv.dilate(opening, kernel, iterations=3)\n dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)\n ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(),\n 255, 0)\n sure_fg = np.uint8(sure_fg)\n unknown = cv.subtract(sure_bg, sure_fg)\n ret, markers = cv.connectedComponents(sure_fg)\n markers = markers + 1\n markers[unknown == 255] = 0\n markers = cv.watershed(img, markers)\n img[markers == -1] = [255, 0, 0]\n plt.figure()\n plt.imshow(markers)\n plt.show()\n\n\n<code token>\n<docstring token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n"
] | false |
98,517 |
299e1ea141c81e75f1703b8b72f1740b1c01433c
|
s = input()
d = input()
print(s.count(d))
|
[
"s = input()\nd = input()\nprint(s.count(d))",
"s = input()\nd = input()\nprint(s.count(d))\n",
"<assignment token>\nprint(s.count(d))\n",
"<assignment token>\n<code token>\n"
] | false |
98,518 |
0e148a66f3df730427ff0b31aaa2f188bb21ab02
|
"""
This file helps in evaluating individual generation. This will be useful when you need to change the global variables
and see how the objective function value changes.
Do ensure you have the uncertainty.csv which will be obtained by running uncertainty_parameters.py
This is part of the uncertainty analysis
"""
from __future__ import division
import cea.inputlocator
import pandas as pd
import cea.optimization.distribution.network_opt_main as network_opt
import cea.optimization.master.evaluation as evaluation
import json
import csv
__author__ = "Sreepathi Bhargava Krishna"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Sreepathi Bhargava Krishna"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
def individual_evaluation(generation, level, size, variable_groups):
"""
:param generation: Generation of the optimization in which the individual evaluation is to be done
:type generation: int
:param level: Number of the uncertain scenario. For each scenario, the objectives are calculated
:type level: int
:param size: Total uncertain scenarios developed. See 'uncertainty.csv'
:type size: int
:return: Function saves the new objectives in a json file
"""
from cea.optimization.preprocessing.preprocessing_main import preproccessing
gv = cea.globalvar.GlobalVariables()
scenario_path = gv.scenario_reference
locator = cea.inputlocator.InputLocator(scenario_path)
weather_file = locator.get_default_weather()
with open(locator.get_optimization_master_results_folder() + "\CheckPoint_" + str(generation), "rb") as fp:
data = json.load(fp)
pop = data['population']
ntwList = data['networkList']
# # Uncertainty Part
row = []
with open(locator.get_uncertainty_results_folder() + '\uncertainty.csv') as f:
reader = csv.reader(f)
for i in xrange(size+1):
row.append(next(reader))
j = level + 1
for i in xrange(len(row[0])-1):
setattr(gv, row[0][i+1], float(row[j][i+1]))
total_demand = pd.read_csv(locator.get_total_demand())
building_names = total_demand.Name.values
gv.num_tot_buildings = total_demand.Name.count()
extra_costs, extra_CO2, extra_primary_energy, solarFeat = preproccessing(locator, total_demand,
building_names,
weather_file, gv)
network_features = network_opt.network_opt_main()
def objective_function(ind):
(costs, CO2, prim) = evaluation.evaluation_main(ind, building_names, locator, extra_costs, extra_CO2, extra_primary_energy, solarFeat,
network_features, gv)
# print (costs, CO2, prim)
return (costs, CO2, prim)
fitness = []
for i in xrange(gv.initialInd):
evaluation.checkNtw(pop[i], ntwList, locator, gv)
fitness.append(objective_function(pop[i]))
with open(locator.get_uncertainty_checkpoint(level), "wb") as fp:
cp = dict(population=pop, uncertainty_level=level, population_fitness=fitness)
json.dump(cp, fp)
if __name__ == '__main__':
generation = 50 # generation which you are interested in testing
size = 1000 # number of uncertain scenarios being tested
for i in xrange(size):
individual_evaluation(generation, i, size, variable_groups=('ECONOMIC',))
|
[
"\"\"\"\nThis file helps in evaluating individual generation. This will be useful when you need to change the global variables\nand see how the objective function value changes. \n\nDo ensure you have the uncertainty.csv which will be obtained by running uncertainty_parameters.py\n\nThis is part of the uncertainty analysis\n\"\"\"\nfrom __future__ import division\n\nimport cea.inputlocator\nimport pandas as pd\nimport cea.optimization.distribution.network_opt_main as network_opt\nimport cea.optimization.master.evaluation as evaluation\nimport json\nimport csv\n\n__author__ = \"Sreepathi Bhargava Krishna\"\n__copyright__ = \"Copyright 2017, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Sreepathi Bhargava Krishna\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\ndef individual_evaluation(generation, level, size, variable_groups):\n \"\"\"\n :param generation: Generation of the optimization in which the individual evaluation is to be done\n :type generation: int\n :param level: Number of the uncertain scenario. For each scenario, the objectives are calculated\n :type level: int\n :param size: Total uncertain scenarios developed. See 'uncertainty.csv'\n :type size: int\n :return: Function saves the new objectives in a json file \n \"\"\"\n\n from cea.optimization.preprocessing.preprocessing_main import preproccessing\n gv = cea.globalvar.GlobalVariables()\n scenario_path = gv.scenario_reference\n locator = cea.inputlocator.InputLocator(scenario_path)\n weather_file = locator.get_default_weather()\n\n with open(locator.get_optimization_master_results_folder() + \"\\CheckPoint_\" + str(generation), \"rb\") as fp:\n data = json.load(fp)\n\n pop = data['population']\n ntwList = data['networkList']\n\n # # Uncertainty Part\n row = []\n with open(locator.get_uncertainty_results_folder() + '\\uncertainty.csv') as f:\n reader = csv.reader(f)\n for i in xrange(size+1):\n row.append(next(reader))\n\n j = level + 1\n\n for i in xrange(len(row[0])-1):\n setattr(gv, row[0][i+1], float(row[j][i+1]))\n\n total_demand = pd.read_csv(locator.get_total_demand())\n building_names = total_demand.Name.values\n gv.num_tot_buildings = total_demand.Name.count()\n\n extra_costs, extra_CO2, extra_primary_energy, solarFeat = preproccessing(locator, total_demand,\n building_names,\n weather_file, gv)\n network_features = network_opt.network_opt_main()\n def objective_function(ind):\n (costs, CO2, prim) = evaluation.evaluation_main(ind, building_names, locator, extra_costs, extra_CO2, extra_primary_energy, solarFeat,\n network_features, gv)\n # print (costs, CO2, prim)\n return (costs, CO2, prim)\n\n fitness = []\n for i in xrange(gv.initialInd):\n evaluation.checkNtw(pop[i], ntwList, locator, gv)\n fitness.append(objective_function(pop[i]))\n\n with open(locator.get_uncertainty_checkpoint(level), \"wb\") as fp:\n cp = dict(population=pop, uncertainty_level=level, population_fitness=fitness)\n json.dump(cp, fp)\n\nif __name__ == '__main__':\n generation = 50 # generation which you are interested in testing\n size = 1000 # number of uncertain scenarios being tested\n\n for i in xrange(size):\n individual_evaluation(generation, i, size, variable_groups=('ECONOMIC',))\n"
] | true |
98,519 |
23528971cdd0f8888d5e09066a0c364e65ac1440
|
# coding=utf-8
from django.contrib.auth.models import User
from rest_framework import serializers
from oauth2_provider.models import Application
from tasking.apps.accounts.models import UserProfile
from tasking.common.fields import PhoneField
# 暂时没用
def validate_client(client_id, client_secret):
application = Application.objects.filter(client_id=client_id, client_secret=client_secret).first()
if not application:
raise serializers.ValidationError({'client': ['客户端认证失败']})
return application
class SignUpSerializer(serializers.Serializer):
email = serializers.EmailField(
required=True,
max_length=255,
allow_blank=True,
error_messages={
'invalid': '邮箱格式不正确',
'required': '请输入电子邮箱'
}
)
mobile = PhoneField(required=False, max_length=63, allow_blank=True)
password = serializers.CharField(required=True, max_length=255)
redirect_uri = serializers.CharField(required=False, max_length=255)
def validate(self, data):
email = data.get('email', None)
mobile = data.get('mobile', None)
if User.objects.filter(email=email).exists():
raise serializers.ValidationError({'email': '该邮箱已被注册'})
if mobile and UserProfile.objects.filter(mobile=mobile).exists():
raise serializers.ValidationError({'mobile': '该号码已被注册'})
return data
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('mobile',)
class UserSerializer(serializers.ModelSerializer):
user_profile = UserProfileSerializer(read_only=True)
class Meta:
model = User
fields = ('id', 'user_profile', 'email',)
read_only_fields = ('id', 'user_profile', 'email',)
|
[
"# coding=utf-8\n\nfrom django.contrib.auth.models import User\nfrom rest_framework import serializers\nfrom oauth2_provider.models import Application\n\nfrom tasking.apps.accounts.models import UserProfile\nfrom tasking.common.fields import PhoneField\n\n\n# 暂时没用\ndef validate_client(client_id, client_secret):\n application = Application.objects.filter(client_id=client_id, client_secret=client_secret).first()\n if not application:\n raise serializers.ValidationError({'client': ['客户端认证失败']})\n return application\n\n\nclass SignUpSerializer(serializers.Serializer):\n email = serializers.EmailField(\n required=True,\n max_length=255,\n allow_blank=True,\n error_messages={\n 'invalid': '邮箱格式不正确',\n 'required': '请输入电子邮箱'\n }\n )\n mobile = PhoneField(required=False, max_length=63, allow_blank=True)\n password = serializers.CharField(required=True, max_length=255)\n redirect_uri = serializers.CharField(required=False, max_length=255)\n\n def validate(self, data):\n email = data.get('email', None)\n mobile = data.get('mobile', None)\n if User.objects.filter(email=email).exists():\n raise serializers.ValidationError({'email': '该邮箱已被注册'})\n if mobile and UserProfile.objects.filter(mobile=mobile).exists():\n raise serializers.ValidationError({'mobile': '该号码已被注册'})\n\n return data\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = UserProfile\n fields = ('mobile',)\n\n\nclass UserSerializer(serializers.ModelSerializer):\n user_profile = UserProfileSerializer(read_only=True)\n\n class Meta:\n model = User\n fields = ('id', 'user_profile', 'email',)\n read_only_fields = ('id', 'user_profile', 'email',)\n",
"from django.contrib.auth.models import User\nfrom rest_framework import serializers\nfrom oauth2_provider.models import Application\nfrom tasking.apps.accounts.models import UserProfile\nfrom tasking.common.fields import PhoneField\n\n\ndef validate_client(client_id, client_secret):\n application = Application.objects.filter(client_id=client_id,\n client_secret=client_secret).first()\n if not application:\n raise serializers.ValidationError({'client': ['客户端认证失败']})\n return application\n\n\nclass SignUpSerializer(serializers.Serializer):\n email = serializers.EmailField(required=True, max_length=255,\n allow_blank=True, error_messages={'invalid': '邮箱格式不正确', 'required':\n '请输入电子邮箱'})\n mobile = PhoneField(required=False, max_length=63, allow_blank=True)\n password = serializers.CharField(required=True, max_length=255)\n redirect_uri = serializers.CharField(required=False, max_length=255)\n\n def validate(self, data):\n email = data.get('email', None)\n mobile = data.get('mobile', None)\n if User.objects.filter(email=email).exists():\n raise serializers.ValidationError({'email': '该邮箱已被注册'})\n if mobile and UserProfile.objects.filter(mobile=mobile).exists():\n raise serializers.ValidationError({'mobile': '该号码已被注册'})\n return data\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = UserProfile\n fields = 'mobile',\n\n\nclass UserSerializer(serializers.ModelSerializer):\n user_profile = UserProfileSerializer(read_only=True)\n\n\n class Meta:\n model = User\n fields = 'id', 'user_profile', 'email'\n read_only_fields = 'id', 'user_profile', 'email'\n",
"<import token>\n\n\ndef validate_client(client_id, client_secret):\n application = Application.objects.filter(client_id=client_id,\n client_secret=client_secret).first()\n if not application:\n raise serializers.ValidationError({'client': ['客户端认证失败']})\n return application\n\n\nclass SignUpSerializer(serializers.Serializer):\n email = serializers.EmailField(required=True, max_length=255,\n allow_blank=True, error_messages={'invalid': '邮箱格式不正确', 'required':\n '请输入电子邮箱'})\n mobile = PhoneField(required=False, max_length=63, allow_blank=True)\n password = serializers.CharField(required=True, max_length=255)\n redirect_uri = serializers.CharField(required=False, max_length=255)\n\n def validate(self, data):\n email = data.get('email', None)\n mobile = data.get('mobile', None)\n if User.objects.filter(email=email).exists():\n raise serializers.ValidationError({'email': '该邮箱已被注册'})\n if mobile and UserProfile.objects.filter(mobile=mobile).exists():\n raise serializers.ValidationError({'mobile': '该号码已被注册'})\n return data\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = UserProfile\n fields = 'mobile',\n\n\nclass UserSerializer(serializers.ModelSerializer):\n user_profile = UserProfileSerializer(read_only=True)\n\n\n class Meta:\n model = User\n fields = 'id', 'user_profile', 'email'\n read_only_fields = 'id', 'user_profile', 'email'\n",
"<import token>\n<function token>\n\n\nclass SignUpSerializer(serializers.Serializer):\n email = serializers.EmailField(required=True, max_length=255,\n allow_blank=True, error_messages={'invalid': '邮箱格式不正确', 'required':\n '请输入电子邮箱'})\n mobile = PhoneField(required=False, max_length=63, allow_blank=True)\n password = serializers.CharField(required=True, max_length=255)\n redirect_uri = serializers.CharField(required=False, max_length=255)\n\n def validate(self, data):\n email = data.get('email', None)\n mobile = data.get('mobile', None)\n if User.objects.filter(email=email).exists():\n raise serializers.ValidationError({'email': '该邮箱已被注册'})\n if mobile and UserProfile.objects.filter(mobile=mobile).exists():\n raise serializers.ValidationError({'mobile': '该号码已被注册'})\n return data\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = UserProfile\n fields = 'mobile',\n\n\nclass UserSerializer(serializers.ModelSerializer):\n user_profile = UserProfileSerializer(read_only=True)\n\n\n class Meta:\n model = User\n fields = 'id', 'user_profile', 'email'\n read_only_fields = 'id', 'user_profile', 'email'\n",
"<import token>\n<function token>\n\n\nclass SignUpSerializer(serializers.Serializer):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def validate(self, data):\n email = data.get('email', None)\n mobile = data.get('mobile', None)\n if User.objects.filter(email=email).exists():\n raise serializers.ValidationError({'email': '该邮箱已被注册'})\n if mobile and UserProfile.objects.filter(mobile=mobile).exists():\n raise serializers.ValidationError({'mobile': '该号码已被注册'})\n return data\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = UserProfile\n fields = 'mobile',\n\n\nclass UserSerializer(serializers.ModelSerializer):\n user_profile = UserProfileSerializer(read_only=True)\n\n\n class Meta:\n model = User\n fields = 'id', 'user_profile', 'email'\n read_only_fields = 'id', 'user_profile', 'email'\n",
"<import token>\n<function token>\n\n\nclass SignUpSerializer(serializers.Serializer):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = UserProfile\n fields = 'mobile',\n\n\nclass UserSerializer(serializers.ModelSerializer):\n user_profile = UserProfileSerializer(read_only=True)\n\n\n class Meta:\n model = User\n fields = 'id', 'user_profile', 'email'\n read_only_fields = 'id', 'user_profile', 'email'\n",
"<import token>\n<function token>\n<class token>\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = UserProfile\n fields = 'mobile',\n\n\nclass UserSerializer(serializers.ModelSerializer):\n user_profile = UserProfileSerializer(read_only=True)\n\n\n class Meta:\n model = User\n fields = 'id', 'user_profile', 'email'\n read_only_fields = 'id', 'user_profile', 'email'\n",
"<import token>\n<function token>\n<class token>\n<class token>\n\n\nclass UserSerializer(serializers.ModelSerializer):\n user_profile = UserProfileSerializer(read_only=True)\n\n\n class Meta:\n model = User\n fields = 'id', 'user_profile', 'email'\n read_only_fields = 'id', 'user_profile', 'email'\n",
"<import token>\n<function token>\n<class token>\n<class token>\n\n\nclass UserSerializer(serializers.ModelSerializer):\n <assignment token>\n\n\n class Meta:\n model = User\n fields = 'id', 'user_profile', 'email'\n read_only_fields = 'id', 'user_profile', 'email'\n",
"<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,520 |
58d121fb716e23c23ad6354f4c24f15f140230ad
|
from funcionario import Funcionario
class Gerente(Funcionario):
def __init__(self, nome, endereco, cpf, data_nascimento, cargo, matricula, senha, salario, qtd_funcionarios):
super().__init__(nome, endereco, cpf, data_nascimento,
cargo, matricula, senha, salario)
self._qtd_funcionarios = qtd_funcionarios
def calcular_gratificacao(self):
return self._salario * 0.15
|
[
"from funcionario import Funcionario\n\n\nclass Gerente(Funcionario):\n\n def __init__(self, nome, endereco, cpf, data_nascimento, cargo, matricula, senha, salario, qtd_funcionarios):\n super().__init__(nome, endereco, cpf, data_nascimento,\n cargo, matricula, senha, salario)\n self._qtd_funcionarios = qtd_funcionarios\n\n def calcular_gratificacao(self):\n return self._salario * 0.15\n",
"from funcionario import Funcionario\n\n\nclass Gerente(Funcionario):\n\n def __init__(self, nome, endereco, cpf, data_nascimento, cargo,\n matricula, senha, salario, qtd_funcionarios):\n super().__init__(nome, endereco, cpf, data_nascimento, cargo,\n matricula, senha, salario)\n self._qtd_funcionarios = qtd_funcionarios\n\n def calcular_gratificacao(self):\n return self._salario * 0.15\n",
"<import token>\n\n\nclass Gerente(Funcionario):\n\n def __init__(self, nome, endereco, cpf, data_nascimento, cargo,\n matricula, senha, salario, qtd_funcionarios):\n super().__init__(nome, endereco, cpf, data_nascimento, cargo,\n matricula, senha, salario)\n self._qtd_funcionarios = qtd_funcionarios\n\n def calcular_gratificacao(self):\n return self._salario * 0.15\n",
"<import token>\n\n\nclass Gerente(Funcionario):\n <function token>\n\n def calcular_gratificacao(self):\n return self._salario * 0.15\n",
"<import token>\n\n\nclass Gerente(Funcionario):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,521 |
38b57c36975040d76abfd05bbe23c2b23c3c4779
|
import requests
from lxml import html
import subprocess
import os
import re
from bs4 import BeautifulSoup
'''
ideas: change the course data structure toa a list of dictionaries. Then each
dictionary has a 's_type' (section type: just using 'type' is illadvised
because it is built in to Python) key-value pair and the TeX creator will know
how to treat them based on that type
course = [
{
s_type: 'title',
value: 'The Course Title',
},
{
s_type: 'section',
heading: 'A Basic Section',
value: 'Some text for the section.',
},
{
s_type: 'section_aims',
heading: 'Course Aims',
value: [
'An aim',
'Another aim',
'One more aim',
],
}
{
s_type: 'info',
heading: 'An Info Heading'
value: 'The info'
}
]
Problems this solves:
+ no need for weird counting in parsing the BSoup
Problems it causes:
- need to figure out how to determine what type data is as it is read in
'''
def get_coursepage(code):
"""Given a course code, requests the correspnding course page"""
url = 'http://gla.ac.uk/coursecatalogue/course/?code=' + code
print url
coursepage = requests.get(url)
return coursepage
def get_course_title_only(code):
"""Given a course code, requests the corresponding page and returns only
the name of that course. This is used to generate a list of courses which I
have opted to disclude with only their course codes
"""
coursepage = get_coursepage(code)
soup = BeautifulSoup(coursepage.content, 'lxml')
title = [soup.find_all('h1')[2].string][0]
return title
def new_dict(heading, value):
"""Creates a dictionary with a heading-value pair, which is the structure
of all the sections in the courses dictionary
"""
value = value.replace('%', '\%').replace('&', '\&').replace(u'\xa0', ' ')
# Currently encoding is causeing me problems - the quick fix below removes
# all the characters that have broken the code so far. This solution is not
# likely to work if more courses were added
value = value.replace(u'\u25a0', '\\break').replace(u'\u037e', ';')
return {
'heading': heading,
'value': value,
}
def get_info_list(info_string, course):
"""Each course page has a small info section at the beginning, which I had
to extract and formulate in a different way to the main sections. This
function constructs the dictionary entries for he course when given a
string with all the details required for the info section
TODO:
There's definitely a better way to do this.
"""
info_list = []
split_on_newline = info_string.split("\n")
for elem in split_on_newline:
split = elem.split(": ")
for s in split:
info_list.append(s)
info_list = info_list[1:-1]
info_tags = [
'session', 'school', 'credits', 'level', 'offered',
'visiting_students', 'erasmus_students',
]
i = 0
for info_tag in info_tags:
course[info_tag] = new_dict(
info_list[i] + ': ', info_list[i + 1])
i += 2
return course
def bsoup(coursepage):
"""Given a course page, takes the context and parses it to extract all the
useful information and construct a dictionary with the information
corresponding to assigned names ready to be written into the TeX file
TODO:
What a mess. There should be a way to do this by counting/incrementing.
"""
soup = BeautifulSoup(coursepage.content, 'lxml')
h1 = soup.find_all('h1')[2]
html = h1.find_next_siblings()
all_strings = [h1.string]
for div in html:
try:
text = div.get_text()
except:
text = div.string
if text is not None:
all_strings.append(text)
course = {'title': all_strings[0]}
course = get_info_list(all_strings[1], course)
course['description'] = new_dict(all_strings[2], all_strings[3])
course['timetable'] = new_dict(all_strings[4], all_strings[5])
course['requirements_of_entry'] = new_dict(all_strings[6], all_strings[7])
course['excluded_courses'] = new_dict(all_strings[8], all_strings[9])
course['co_requisites'] = new_dict(all_strings[10], all_strings[11])
course['assessment_weighting'] = new_dict(all_strings[12], all_strings[13])
course['aims'] = new_dict(all_strings[17], all_strings[18])
date = all_strings[14].split(': ')
course['assessment_date'] = new_dict(date[0] + ": ", date[1])
course['learning_outcomes'] = new_dict(all_strings[19], all_strings[20])
# TODO Doesn't parse Minimum Requirement for Award of Credit or
# Reassessment Options
return course
def create_not_included_list(codes):
"""Given a list of course codes, ge5t their corresponding titles and format
them in a bulletted TeX list. This is used to indicate in the abstract
which courses have been deliberately discluded from the document
"""
string = '\\begin{itemize}\n'
for code in codes:
title = get_course_title_only(code)
string += '\\item{' + title + '}\n'
string += '\\end{itemize}\n'
return string
def write_to_latex(codelist, unwanted_courses):
"""Constructs the entire TeX document from all the courses with key
document details (like author and table of contents)
"""
# TODO: investigate a way to add large amounts of text outside of the
# function
abstract01 = "I created this document to practice parsing html and using\
tools like Beautiful Soup which I've previously had little experience\
in. As a result, it's not perfect.\\newline\
It is also a slightly condensed all-in-one-place look at a selection\
of courses that are available for fourth year computer science\
students at the University of Glasgow. For the purposes of clarity I\
have removed several courses from this selection. The following\
courses have been omitted:"
abstract02 = "For more insight into the project, to report issues or to\
inspect the code, have a look at the GitHub:\
\\url{https://github.com/IS0metric/course-ripper}"
unincluded = create_not_included_list(unwanted_courses)
with open('courses.tex', 'w') as f:
# TODO Try and move all this to a separate function?
# TODO: Check if it's more efficient to write a single, massive string
# to file
f.write('\\documentclass{hitec}\n')
f.write('\\usepackage[document]{ragged2e}\n')
f.write('\\usepackage{url}\n')
f.write('\\usepackage{hyperref}\n')
f.write('\\setcounter{tocdepth}{4}\n')
f.write('\\begin{document}\n')
f.write('\\title{Fourth Year (2016-17) Courses}\n')
f.write('\\author{Jack Parkinson}\n')
f.write('\\date{August 2016}\n')
f.write('\\maketitle\n')
f.write('\\abstract{' + abstract01 + unincluded + abstract02 + '}\n')
f.write('\\newpage\n\n')
f.write('\\tableofcontents\n')
f.write('\\newpage\n\n')
# TODO: Look into alternatives to the three lists
all_courses = []
sem1_courses = []
sem2_courses = []
for code in codelist:
course = bsoup(get_coursepage(code))
if course['offered']['value'] == 'Runs Throughout Semesters 1 and 2':
all_courses.append(course)
elif "1" in course['offered']['value']:
sem1_courses.append(course)
elif "2" in course['offered']['value']:
sem2_courses.append(course)
f.write('\\section{Semester 1 and 2 Courses}\n\n')
for course in all_courses:
f.write(latex_course(course))
f.write('\\section{Semester 1 Only Courses}\n\n')
for course in sem1_courses:
f.write(latex_course(course))
f.write('\\section{Semester 2 Only Courses}\n\n')
for course in sem2_courses:
f.write(latex_course(course))
f.write('\\end{document}')
return None
def latex_info(info):
"""Provides the special treatment that the info section requires"""
return '\\textbf{' + info['heading'] + '} ' + info['value'] + ' \\break\n'
def latex_subsection(section):
"""Creates a TeX formatted string for a given subsubsection"""
string = '\\subsubsection*{' + section['heading'] + '}\n'
string += section['value'] + '\n'
return string
def latex_course(course):
"""Creates a TeX formatted string for a course"""
basic_info_list = [
'session', 'school', 'credits', 'level', 'offered',
'visiting_students', 'erasmus_students'
]
generic_subsection_list = [
'description', 'timetable', 'requirements_of_entry',
'excluded_courses', 'co_requisites', 'assessment_weighting'
]
string = '\\subsection{' + course["title"] + '}\n'
for info in basic_info_list:
string += latex_info(course[info])
for subsection in generic_subsection_list:
string += latex_subsection(course[subsection])
string += '\\break \\textbf{' + course['assessment_date'][
'heading'] + '}' + course['assessment_date']['value'] + '\n'
string += latex_subsection(course['aims'])
string += '\\subsubsection*{' + \
course['learning_outcomes']['heading'] + '}\n'
outcome_list = re.split(
'\d+\. ', course['learning_outcomes']['value'])
string += outcome_list[0] + '\n'
string += '\\begin{enumerate}\n'
for i in outcome_list[1:-1]:
string += '\\item ' + i + '\n'
string += '\\end{enumerate}\n'
return string
def create_tex(unwanted_courses, wanted_courses=None):
"""Creates the TeX document from the Computer Science Course Catalog"""
page = requests.get(
'http://gla.ac.uk/coursecatalogue/courselist/' +
'?code=REG30200000&name=School+of+Computing+Science')
tree = html.fromstring(page.content)
spans = tree.xpath('//span/text()')
codes = []
if wanted_courses is None:
for s in spans:
if s[0:4] == "COMP" and s[7] == '4' and s not in unwanted_courses:
codes.append(s)
else:
for s in wanted_courses:
codes.append(s)
write_to_latex(codes, unwanted_courses)
return None
def pdflatex(unwanted_courses):
"""Generates a TeX document and then runs the pdflatex command to create a
PDF from the TeX
"""
create_tex(unwanted_courses)
cmd = ['pdflatex', '-interaction', 'nonstopmode', 'courses.tex']
proc = subprocess.Popen(cmd)
proc.communicate()
return None
if __name__ == "__main__":
# List of deliberately excluded courses
unwanted_courses = [
'COMPSCI4010', 'COMPSCI4009', 'COMPSCI4013', 'COMPSCI4024P',
'COMPSCI4014', 'COMPSCI4012', 'COMPSCI4011', 'COMPSCI4038',
'COMPSCI4015', 'COMPSCI4016', 'COMPSCI4046', 'COMPSCI4047',
'COMPSCI4044', 'COMPSCI4070', 'COMPSCI4038',
]
create_tex(unwanted_courses)
|
[
"import requests\nfrom lxml import html\nimport subprocess\nimport os\nimport re\nfrom bs4 import BeautifulSoup\n\n\n'''\nideas: change the course data structure toa a list of dictionaries. Then each\ndictionary has a 's_type' (section type: just using 'type' is illadvised\nbecause it is built in to Python) key-value pair and the TeX creator will know\nhow to treat them based on that type\n\ncourse = [\n {\n s_type: 'title',\n value: 'The Course Title',\n },\n {\n s_type: 'section',\n heading: 'A Basic Section',\n value: 'Some text for the section.',\n },\n {\n s_type: 'section_aims',\n heading: 'Course Aims',\n value: [\n 'An aim',\n 'Another aim',\n 'One more aim',\n ],\n }\n {\n s_type: 'info',\n heading: 'An Info Heading'\n value: 'The info'\n }\n]\nProblems this solves:\n + no need for weird counting in parsing the BSoup\nProblems it causes:\n - need to figure out how to determine what type data is as it is read in\n'''\n\n\ndef get_coursepage(code):\n \"\"\"Given a course code, requests the correspnding course page\"\"\"\n url = 'http://gla.ac.uk/coursecatalogue/course/?code=' + code\n print url\n coursepage = requests.get(url)\n return coursepage\n\n\ndef get_course_title_only(code):\n \"\"\"Given a course code, requests the corresponding page and returns only\n the name of that course. This is used to generate a list of courses which I\n have opted to disclude with only their course codes\n \"\"\"\n coursepage = get_coursepage(code)\n soup = BeautifulSoup(coursepage.content, 'lxml')\n title = [soup.find_all('h1')[2].string][0]\n return title\n\n\ndef new_dict(heading, value):\n \"\"\"Creates a dictionary with a heading-value pair, which is the structure\n of all the sections in the courses dictionary\n \"\"\"\n value = value.replace('%', '\\%').replace('&', '\\&').replace(u'\\xa0', ' ')\n # Currently encoding is causeing me problems - the quick fix below removes\n # all the characters that have broken the code so far. This solution is not\n # likely to work if more courses were added\n value = value.replace(u'\\u25a0', '\\\\break').replace(u'\\u037e', ';')\n return {\n 'heading': heading,\n 'value': value,\n }\n\n\ndef get_info_list(info_string, course):\n \"\"\"Each course page has a small info section at the beginning, which I had\n to extract and formulate in a different way to the main sections. This\n function constructs the dictionary entries for he course when given a\n string with all the details required for the info section\n TODO:\n There's definitely a better way to do this.\n \"\"\"\n info_list = []\n split_on_newline = info_string.split(\"\\n\")\n for elem in split_on_newline:\n split = elem.split(\": \")\n for s in split:\n info_list.append(s)\n info_list = info_list[1:-1]\n info_tags = [\n 'session', 'school', 'credits', 'level', 'offered',\n 'visiting_students', 'erasmus_students',\n ]\n i = 0\n for info_tag in info_tags:\n course[info_tag] = new_dict(\n info_list[i] + ': ', info_list[i + 1])\n i += 2\n return course\n\n\ndef bsoup(coursepage):\n \"\"\"Given a course page, takes the context and parses it to extract all the\n useful information and construct a dictionary with the information\n corresponding to assigned names ready to be written into the TeX file\n TODO:\n What a mess. There should be a way to do this by counting/incrementing.\n \"\"\"\n soup = BeautifulSoup(coursepage.content, 'lxml')\n h1 = soup.find_all('h1')[2]\n html = h1.find_next_siblings()\n all_strings = [h1.string]\n for div in html:\n try:\n text = div.get_text()\n except:\n text = div.string\n if text is not None:\n all_strings.append(text)\n course = {'title': all_strings[0]}\n course = get_info_list(all_strings[1], course)\n course['description'] = new_dict(all_strings[2], all_strings[3])\n course['timetable'] = new_dict(all_strings[4], all_strings[5])\n course['requirements_of_entry'] = new_dict(all_strings[6], all_strings[7])\n course['excluded_courses'] = new_dict(all_strings[8], all_strings[9])\n course['co_requisites'] = new_dict(all_strings[10], all_strings[11])\n course['assessment_weighting'] = new_dict(all_strings[12], all_strings[13])\n course['aims'] = new_dict(all_strings[17], all_strings[18])\n date = all_strings[14].split(': ')\n course['assessment_date'] = new_dict(date[0] + \": \", date[1])\n course['learning_outcomes'] = new_dict(all_strings[19], all_strings[20])\n # TODO Doesn't parse Minimum Requirement for Award of Credit or\n # Reassessment Options\n return course\n\n\ndef create_not_included_list(codes):\n \"\"\"Given a list of course codes, ge5t their corresponding titles and format\n them in a bulletted TeX list. This is used to indicate in the abstract\n which courses have been deliberately discluded from the document\n \"\"\"\n string = '\\\\begin{itemize}\\n'\n for code in codes:\n title = get_course_title_only(code)\n string += '\\\\item{' + title + '}\\n'\n string += '\\\\end{itemize}\\n'\n return string\n\n\ndef write_to_latex(codelist, unwanted_courses):\n \"\"\"Constructs the entire TeX document from all the courses with key\n document details (like author and table of contents)\n \"\"\"\n # TODO: investigate a way to add large amounts of text outside of the\n # function\n abstract01 = \"I created this document to practice parsing html and using\\\n tools like Beautiful Soup which I've previously had little experience\\\n in. As a result, it's not perfect.\\\\newline\\\n It is also a slightly condensed all-in-one-place look at a selection\\\n of courses that are available for fourth year computer science\\\n students at the University of Glasgow. For the purposes of clarity I\\\n have removed several courses from this selection. The following\\\n courses have been omitted:\"\n abstract02 = \"For more insight into the project, to report issues or to\\\n inspect the code, have a look at the GitHub:\\\n \\\\url{https://github.com/IS0metric/course-ripper}\"\n unincluded = create_not_included_list(unwanted_courses)\n with open('courses.tex', 'w') as f:\n # TODO Try and move all this to a separate function?\n # TODO: Check if it's more efficient to write a single, massive string\n # to file\n f.write('\\\\documentclass{hitec}\\n')\n f.write('\\\\usepackage[document]{ragged2e}\\n')\n f.write('\\\\usepackage{url}\\n')\n f.write('\\\\usepackage{hyperref}\\n')\n f.write('\\\\setcounter{tocdepth}{4}\\n')\n f.write('\\\\begin{document}\\n')\n f.write('\\\\title{Fourth Year (2016-17) Courses}\\n')\n f.write('\\\\author{Jack Parkinson}\\n')\n f.write('\\\\date{August 2016}\\n')\n f.write('\\\\maketitle\\n')\n f.write('\\\\abstract{' + abstract01 + unincluded + abstract02 + '}\\n')\n f.write('\\\\newpage\\n\\n')\n f.write('\\\\tableofcontents\\n')\n f.write('\\\\newpage\\n\\n')\n # TODO: Look into alternatives to the three lists\n all_courses = []\n sem1_courses = []\n sem2_courses = []\n for code in codelist:\n course = bsoup(get_coursepage(code))\n if course['offered']['value'] == 'Runs Throughout Semesters 1 and 2':\n all_courses.append(course)\n elif \"1\" in course['offered']['value']:\n sem1_courses.append(course)\n elif \"2\" in course['offered']['value']:\n sem2_courses.append(course)\n f.write('\\\\section{Semester 1 and 2 Courses}\\n\\n')\n for course in all_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 1 Only Courses}\\n\\n')\n for course in sem1_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 2 Only Courses}\\n\\n')\n for course in sem2_courses:\n f.write(latex_course(course))\n f.write('\\\\end{document}')\n return None\n\n\ndef latex_info(info):\n \"\"\"Provides the special treatment that the info section requires\"\"\"\n return '\\\\textbf{' + info['heading'] + '} ' + info['value'] + ' \\\\break\\n'\n\n\ndef latex_subsection(section):\n \"\"\"Creates a TeX formatted string for a given subsubsection\"\"\"\n string = '\\\\subsubsection*{' + section['heading'] + '}\\n'\n string += section['value'] + '\\n'\n return string\n\n\ndef latex_course(course):\n \"\"\"Creates a TeX formatted string for a course\"\"\"\n basic_info_list = [\n 'session', 'school', 'credits', 'level', 'offered',\n 'visiting_students', 'erasmus_students'\n ]\n generic_subsection_list = [\n 'description', 'timetable', 'requirements_of_entry',\n 'excluded_courses', 'co_requisites', 'assessment_weighting'\n ]\n string = '\\\\subsection{' + course[\"title\"] + '}\\n'\n for info in basic_info_list:\n string += latex_info(course[info])\n for subsection in generic_subsection_list:\n string += latex_subsection(course[subsection])\n string += '\\\\break \\\\textbf{' + course['assessment_date'][\n 'heading'] + '}' + course['assessment_date']['value'] + '\\n'\n string += latex_subsection(course['aims'])\n string += '\\\\subsubsection*{' + \\\n course['learning_outcomes']['heading'] + '}\\n'\n outcome_list = re.split(\n '\\d+\\. ', course['learning_outcomes']['value'])\n string += outcome_list[0] + '\\n'\n string += '\\\\begin{enumerate}\\n'\n for i in outcome_list[1:-1]:\n string += '\\\\item ' + i + '\\n'\n string += '\\\\end{enumerate}\\n'\n return string\n\n\ndef create_tex(unwanted_courses, wanted_courses=None):\n \"\"\"Creates the TeX document from the Computer Science Course Catalog\"\"\"\n page = requests.get(\n 'http://gla.ac.uk/coursecatalogue/courselist/' +\n '?code=REG30200000&name=School+of+Computing+Science')\n tree = html.fromstring(page.content)\n spans = tree.xpath('//span/text()')\n codes = []\n if wanted_courses is None:\n for s in spans:\n if s[0:4] == \"COMP\" and s[7] == '4' and s not in unwanted_courses:\n codes.append(s)\n else:\n for s in wanted_courses:\n codes.append(s)\n write_to_latex(codes, unwanted_courses)\n return None\n\n\ndef pdflatex(unwanted_courses):\n \"\"\"Generates a TeX document and then runs the pdflatex command to create a\n PDF from the TeX\n \"\"\"\n create_tex(unwanted_courses)\n cmd = ['pdflatex', '-interaction', 'nonstopmode', 'courses.tex']\n proc = subprocess.Popen(cmd)\n proc.communicate()\n return None\n\nif __name__ == \"__main__\":\n # List of deliberately excluded courses\n unwanted_courses = [\n 'COMPSCI4010', 'COMPSCI4009', 'COMPSCI4013', 'COMPSCI4024P',\n 'COMPSCI4014', 'COMPSCI4012', 'COMPSCI4011', 'COMPSCI4038',\n 'COMPSCI4015', 'COMPSCI4016', 'COMPSCI4046', 'COMPSCI4047',\n 'COMPSCI4044', 'COMPSCI4070', 'COMPSCI4038',\n ]\n create_tex(unwanted_courses)\n"
] | true |
98,522 |
5e559c0092748c541ab739b71761579e0ac6f134
|
# Author:Zhang Yuan
goods1=['G1',256]
goods2=['G2',300]
goods3=['G3',400]
goods4=['G4',123]
goods5=['G5',56]
goods6=['G6',321]
wallet=1000
ShoppingCart=[]
print('your wallet have $', wallet)
while (wallet > 56):
buy=input("what do you want to buy? G1-G6 or Stop")
if buy==goods1[0]:
goods=goods1
if wallet>goods[1]:
ShoppingCart.append(goods1)
wallet=wallet-goods[1]
print("your shopping carts are:",ShoppingCart,' your wallet have',wallet)
else:
print("your need$", goods[1], 'but your have $', wallet)
elif buy==goods2[0]:
goods=goods2
if wallet>goods[1]:
ShoppingCart.append(goods2)
wallet=wallet-goods[1]
print("your shopping carts are:",ShoppingCart,' your wallet have',wallet)
else:
print("your need$", goods[1], 'but your have $', wallet)
elif buy==goods3[0]:
goods=goods3
if wallet>goods[1]:
ShoppingCart.append(goods3)
wallet=wallet-goods[1]
print("your shopping carts are:",ShoppingCart,' your wallet have',wallet)
else:
print("your need$", goods[1], 'but your have $', wallet)
elif buy==goods4[0]:
goods=goods4
if wallet>goods[1]:
ShoppingCart.append(goods4)
wallet=wallet-goods[1]
print("your shopping carts are:",ShoppingCart,' your wallet have',wallet)
else:
print("your need$", goods[1], 'but your have $', wallet)
elif buy==goods5[0]:
goods=goods5
if wallet>goods[1]:
ShoppingCart.append(goods5)
wallet=wallet-goods[1]
print("your shopping carts are:",ShoppingCart,' your wallet have',wallet)
else:
print("your need$", goods[1], 'but your have $', wallet)
elif buy==goods6[0]:
goods=goods6
if wallet>goods[1]:
ShoppingCart.append(goods6)
wallet=wallet-goods[1]
print("your shopping carts are:",ShoppingCart,' your wallet have',wallet)
else:
print("your need$",goods[1],'but your have $',wallet)
elif buy=='Stop':
print('your stop buy by yourself')
print("your shopping carts are:", ShoppingCart, ' your wallet have', wallet)
break
else:
print("your have spend most of your money, your can't buy any more")
|
[
"# Author:Zhang Yuan\ngoods1=['G1',256]\ngoods2=['G2',300]\ngoods3=['G3',400]\ngoods4=['G4',123]\ngoods5=['G5',56]\ngoods6=['G6',321]\n\nwallet=1000\nShoppingCart=[]\nprint('your wallet have $', wallet)\nwhile (wallet > 56):\n buy=input(\"what do you want to buy? G1-G6 or Stop\")\n if buy==goods1[0]:\n goods=goods1\n if wallet>goods[1]:\n ShoppingCart.append(goods1)\n wallet=wallet-goods[1]\n print(\"your shopping carts are:\",ShoppingCart,' your wallet have',wallet)\n else:\n print(\"your need$\", goods[1], 'but your have $', wallet)\n elif buy==goods2[0]:\n goods=goods2\n if wallet>goods[1]:\n ShoppingCart.append(goods2)\n wallet=wallet-goods[1]\n print(\"your shopping carts are:\",ShoppingCart,' your wallet have',wallet)\n else:\n print(\"your need$\", goods[1], 'but your have $', wallet)\n elif buy==goods3[0]:\n goods=goods3\n if wallet>goods[1]:\n ShoppingCart.append(goods3)\n wallet=wallet-goods[1]\n print(\"your shopping carts are:\",ShoppingCart,' your wallet have',wallet)\n else:\n print(\"your need$\", goods[1], 'but your have $', wallet)\n elif buy==goods4[0]:\n goods=goods4\n if wallet>goods[1]:\n ShoppingCart.append(goods4)\n wallet=wallet-goods[1]\n print(\"your shopping carts are:\",ShoppingCart,' your wallet have',wallet)\n else:\n print(\"your need$\", goods[1], 'but your have $', wallet)\n elif buy==goods5[0]:\n goods=goods5\n if wallet>goods[1]:\n ShoppingCart.append(goods5)\n wallet=wallet-goods[1]\n print(\"your shopping carts are:\",ShoppingCart,' your wallet have',wallet)\n else:\n print(\"your need$\", goods[1], 'but your have $', wallet)\n elif buy==goods6[0]:\n goods=goods6\n if wallet>goods[1]:\n ShoppingCart.append(goods6)\n wallet=wallet-goods[1]\n print(\"your shopping carts are:\",ShoppingCart,' your wallet have',wallet)\n else:\n print(\"your need$\",goods[1],'but your have $',wallet)\n elif buy=='Stop':\n print('your stop buy by yourself')\n print(\"your shopping carts are:\", ShoppingCart, ' your wallet have', wallet)\n break\nelse:\n print(\"your have spend most of your money, your can't buy any more\")\n\n\n",
"goods1 = ['G1', 256]\ngoods2 = ['G2', 300]\ngoods3 = ['G3', 400]\ngoods4 = ['G4', 123]\ngoods5 = ['G5', 56]\ngoods6 = ['G6', 321]\nwallet = 1000\nShoppingCart = []\nprint('your wallet have $', wallet)\nwhile wallet > 56:\n buy = input('what do you want to buy? G1-G6 or Stop')\n if buy == goods1[0]:\n goods = goods1\n if wallet > goods[1]:\n ShoppingCart.append(goods1)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == goods2[0]:\n goods = goods2\n if wallet > goods[1]:\n ShoppingCart.append(goods2)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == goods3[0]:\n goods = goods3\n if wallet > goods[1]:\n ShoppingCart.append(goods3)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == goods4[0]:\n goods = goods4\n if wallet > goods[1]:\n ShoppingCart.append(goods4)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == goods5[0]:\n goods = goods5\n if wallet > goods[1]:\n ShoppingCart.append(goods5)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == goods6[0]:\n goods = goods6\n if wallet > goods[1]:\n ShoppingCart.append(goods6)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == 'Stop':\n print('your stop buy by yourself')\n print('your shopping carts are:', ShoppingCart, ' your wallet have',\n wallet)\n break\nelse:\n print(\"your have spend most of your money, your can't buy any more\")\n",
"<assignment token>\nprint('your wallet have $', wallet)\nwhile wallet > 56:\n buy = input('what do you want to buy? G1-G6 or Stop')\n if buy == goods1[0]:\n goods = goods1\n if wallet > goods[1]:\n ShoppingCart.append(goods1)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == goods2[0]:\n goods = goods2\n if wallet > goods[1]:\n ShoppingCart.append(goods2)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == goods3[0]:\n goods = goods3\n if wallet > goods[1]:\n ShoppingCart.append(goods3)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == goods4[0]:\n goods = goods4\n if wallet > goods[1]:\n ShoppingCart.append(goods4)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == goods5[0]:\n goods = goods5\n if wallet > goods[1]:\n ShoppingCart.append(goods5)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == goods6[0]:\n goods = goods6\n if wallet > goods[1]:\n ShoppingCart.append(goods6)\n wallet = wallet - goods[1]\n print('your shopping carts are:', ShoppingCart,\n ' your wallet have', wallet)\n else:\n print('your need$', goods[1], 'but your have $', wallet)\n elif buy == 'Stop':\n print('your stop buy by yourself')\n print('your shopping carts are:', ShoppingCart, ' your wallet have',\n wallet)\n break\nelse:\n print(\"your have spend most of your money, your can't buy any more\")\n",
"<assignment token>\n<code token>\n"
] | false |
98,523 |
38b75abbd2444a6ffed296cf1dea8795893cb456
|
# parse a passport string
def parsePassport(passport):
parsedPassport = dict()
passport = passport.replace("\n"," ")
for p in passport.split(" "):
if p.find(":") != -1:
parsedPassport[(p.split(":")[0]).strip("\n")] = p.split(":")[1].strip("\n")
return parsedPassport
def checkInRange(value,min,max):
value = int(value)
validValues = range(min,max+1)
return value in validValues
def test_checkInRange():
assert checkInRange(1919,1920,2002) == False, "Should be False"
assert checkInRange(2002,1920,2002) == True, "Should be True"
assert checkInRange(2003,1920,2002) == False, "Should be False"
assert checkInRange(1950,1920,2002) == True, "Should be True"
assert checkInRange(2020,2020,2030) == True, "Should be True"
def parseHgt(hgtField):
if (len(hgtField) == 5) & (hgtField.find("cm") != -1):
value = hgtField[:3]
units = "cm"
return {"units": units, "value": int(value)}
elif (len(hgtField) == 4) & (hgtField.find("in") != -1):
value = hgtField[:2]
units = "in"
return {"units": units, "value": int(value)}
else:
return {"units": "xx", "value": 0}
def test_parseHgt():
assert parseHgt('60in') == {'units':'in','value':60}
assert parseHgt('190cm') == {'units':'cm','value':190}
assert parseHgt('60cm') == {'units':'xx','value':0}
assert parseHgt('600in') == {'units':'xx','value':0}
def validateHgt(hgtField):
parsedHgt = parseHgt(hgtField)
if parsedHgt["units"] == "in":
return checkInRange(parsedHgt["value"], 59, 76)
elif parsedHgt["units"] == "cm":
return checkInRange(parsedHgt["value"], 150, 193)
else:
return False
def test_validateHgt():
assert validateHgt('60in') == True, "Should be True"
assert validateHgt('190cm') == True, "Should be True"
assert validateHgt('190in') == False, "Should be False"
assert validateHgt('190') == False, "Should be False"
def validateByr(byrField):
return checkInRange(byrField,1920,2002)
def test_validateByr():
assert validateByr(2002) == True, "Should be True"
assert validateByr(2003) == False, "Should be False"
def validateIyr(iyrField):
return checkInRange(iyrField,2010,2020)
def test_validateIyr():
assert validateIyr(2010) == True, "Should be True"
assert validateIyr(2012) == True, "Should be True"
assert validateIyr(2020) == True, "Should be True"
assert validateIyr(2015) == True, "Should be True"
assert validateIyr(2002) == False, "Should be False"
assert validateIyr(2021) == False, "Should be False"
def validateEyr(eyrField):
return checkInRange(eyrField,2020,2030)
def test_validateEyr():
assert validateEyr(2020) == True, "Should be True"
assert validateEyr(2030) == True, "Should be True"
assert validateEyr(2025) == True, "Should be True"
assert validateEyr(2002) == False, "Should be False"
assert validateEyr(2031) == False, "Should be False"
def validateHcl(hclField):
if hclField[0] != "#":
return False
else:
colour = hclField[1:]
validChrs = ([str(x) for x in range(0,10)] + [chr(x) for x in range(ord('a'),ord('f')+1)])
return all([c in validChrs for c in colour])
def test_validateHcl():
assert validateHcl("#123abc") == True
assert validateHcl("#123abz") == False
assert validateHcl("123abc") == False
def validateEcl(eclField):
return eclField in ['amb','blu','brn','gry','grn','hzl','oth']
def test_validateEcl():
assert validateEcl("brn") == True
assert validateEcl("wat") == False
def validatePid(pidField):
if len(pidField) != 9:
return False
else:
validChrs = [str(x) for x in range(0,10)]
return all([c in validChrs for c in pidField])
def test_validatePid():
assert validatePid("000000001") == True
assert validatePid("0123456789") == False
def validateCid(cidField):
return True
def checkFields(passport):
parsedPassport = parsePassport(passport)
hasFields = list(parsedPassport.keys())
if 'cid' in hasFields:
needsFields = ['byr','iyr','eyr','hgt','hcl','ecl','pid','cid']
return (all([c in hasFields for c in needsFields]))
else:
needsFields = ['byr','iyr','eyr','hgt','hcl','ecl','pid']
return (all([c in hasFields for c in needsFields]))
def test_checkFields():
x = """
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
"""
assert checkFields(x) == True
x = """
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
"""
assert checkFields(x) == False
x = """
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
"""
assert checkFields(x) == True
x = """
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
"""
assert checkFields(x) == False
x = """"
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
"""
assert checkFields(x) == True
def checkPassport(passport):
if checkFields(passport) == False:
return False
else:
parsedPassport = parsePassport(passport)
valids = []
for f in list(parsedPassport.keys()):
if eval("validate" + f.capitalize() + "('" + parsedPassport[f] + "')") == False:
valids.append(False)
else:
valids.append(True)
return all(valids)
def test_checkPassport():
x = """
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
"""
assert checkPassport(x) == False, "Should be false"
x = """
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
"""
assert checkPassport(x) == False, "Should be false"
x = """
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
"""
assert checkPassport(x) == False, "Should be false"
x = """
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
"""
assert checkPassport(x) == False, "Should be false"
x = """
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
"""
assert checkPassport(x) == True, "Should be true"
x = """
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
"""
assert checkPassport(x) == True, "Should be true"
x = """
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
"""
assert checkPassport(x) == True, "Should be true"
x = """
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
"""
assert checkPassport(x) == True, "Should be true"
if __name__ != "__main__":
test_checkInRange()
print("checkInRange() tests passed!")
test_validateByr()
print("validateByr() tests passed!")
test_validateIyr()
print("validateIyr() tests passed!")
test_validateEyr()
print("validateEyr() tests passed!")
test_parseHgt()
print("parseHgt() tests passed!")
test_validateHgt()
print("validateHgt() tests passed!")
test_validateHcl()
print("validateHcl() tests passed!")
test_validateEcl()
print("validateEcl() tests passed!")
test_validatePid()
print("validatePid() tests passed!")
test_checkFields()
print("checkFields() tests passed!")
test_checkPassport()
print("checkPasport() tests passed!")
print("All tests passed!")
file = "puzzle_data.txt"
passports = []
count = 1
# First just count the number of passports
with open(file) as f:
for line in f:
if line == "\n":
count = count + 1
passports = [''] * count
count = 0
with open(file) as f:
for line in f:
if line == "\n":
count = count + 1
passports[count] = passports[count] + " " + line.strip("\n")
ptI = 0
ptII = 0
for p in passports:
ptI = ptI + int(checkFields(p))
ptII = ptII + int(checkPassport(p))
print(ptI)
print(ptII)
|
[
"\n# parse a passport string\ndef parsePassport(passport):\n parsedPassport = dict()\n passport = passport.replace(\"\\n\",\" \")\n for p in passport.split(\" \"):\n if p.find(\":\") != -1:\n parsedPassport[(p.split(\":\")[0]).strip(\"\\n\")] = p.split(\":\")[1].strip(\"\\n\")\n\n return parsedPassport\n\n\ndef checkInRange(value,min,max):\n value = int(value)\n validValues = range(min,max+1)\n return value in validValues\n\n\n\ndef test_checkInRange():\n assert checkInRange(1919,1920,2002) == False, \"Should be False\"\n assert checkInRange(2002,1920,2002) == True, \"Should be True\"\n assert checkInRange(2003,1920,2002) == False, \"Should be False\"\n assert checkInRange(1950,1920,2002) == True, \"Should be True\"\n assert checkInRange(2020,2020,2030) == True, \"Should be True\"\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find(\"cm\") != -1):\n value = hgtField[:3]\n units = \"cm\"\n return {\"units\": units, \"value\": int(value)}\n\n elif (len(hgtField) == 4) & (hgtField.find(\"in\") != -1):\n\n value = hgtField[:2]\n units = \"in\"\n return {\"units\": units, \"value\": int(value)}\n else:\n return {\"units\": \"xx\", \"value\": 0}\n \n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units':'in','value':60}\n assert parseHgt('190cm') == {'units':'cm','value':190}\n assert parseHgt('60cm') == {'units':'xx','value':0}\n assert parseHgt('600in') == {'units':'xx','value':0}\n\n\n\ndef validateHgt(hgtField):\n parsedHgt = parseHgt(hgtField)\n if parsedHgt[\"units\"] == \"in\":\n return checkInRange(parsedHgt[\"value\"], 59, 76)\n elif parsedHgt[\"units\"] == \"cm\":\n return checkInRange(parsedHgt[\"value\"], 150, 193)\n else:\n return False\n\n\ndef test_validateHgt():\n assert validateHgt('60in') == True, \"Should be True\"\n assert validateHgt('190cm') == True, \"Should be True\"\n assert validateHgt('190in') == False, \"Should be False\"\n assert validateHgt('190') == False, \"Should be False\"\n\n\ndef validateByr(byrField):\n return checkInRange(byrField,1920,2002)\n\ndef test_validateByr():\n assert validateByr(2002) == True, \"Should be True\"\n assert validateByr(2003) == False, \"Should be False\"\n\n\ndef validateIyr(iyrField):\n return checkInRange(iyrField,2010,2020)\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, \"Should be True\"\n assert validateIyr(2012) == True, \"Should be True\"\n assert validateIyr(2020) == True, \"Should be True\"\n assert validateIyr(2015) == True, \"Should be True\"\n assert validateIyr(2002) == False, \"Should be False\"\n assert validateIyr(2021) == False, \"Should be False\"\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField,2020,2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, \"Should be True\"\n assert validateEyr(2030) == True, \"Should be True\"\n assert validateEyr(2025) == True, \"Should be True\"\n assert validateEyr(2002) == False, \"Should be False\"\n assert validateEyr(2031) == False, \"Should be False\"\n\n\ndef validateHcl(hclField):\n if hclField[0] != \"#\":\n return False\n else:\n colour = hclField[1:]\n validChrs = ([str(x) for x in range(0,10)] + [chr(x) for x in range(ord('a'),ord('f')+1)])\n return all([c in validChrs for c in colour])\n\ndef test_validateHcl():\n assert validateHcl(\"#123abc\") == True\n assert validateHcl(\"#123abz\") == False\n assert validateHcl(\"123abc\") == False\n\n \ndef validateEcl(eclField):\n return eclField in ['amb','blu','brn','gry','grn','hzl','oth']\n\ndef test_validateEcl():\n assert validateEcl(\"brn\") == True\n assert validateEcl(\"wat\") == False \n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0,10)]\n return all([c in validChrs for c in pidField])\n\ndef test_validatePid():\n assert validatePid(\"000000001\") == True\n assert validatePid(\"0123456789\") == False \n\n\ndef validateCid(cidField):\n return True\n\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr','iyr','eyr','hgt','hcl','ecl','pid','cid']\n return (all([c in hasFields for c in needsFields]))\n else:\n needsFields = ['byr','iyr','eyr','hgt','hcl','ecl','pid']\n return (all([c in hasFields for c in needsFields]))\n\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n \n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n\n\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n\n valids = []\n for f in list(parsedPassport.keys()):\n if eval(\"validate\" + f.capitalize() + \"('\" + parsedPassport[f] + \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n \n return all(valids) \n\n\n\ndef test_checkPassport():\n\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, \"Should be false\" \n\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, \"Should be false\" \n\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, \"Should be false\" \n\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, \"Should be false\" \n\n\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, \"Should be true\" \n\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, \"Should be true\" \n\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, \"Should be true\" \n\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, \"Should be true\" \n\n\nif __name__ != \"__main__\":\n test_checkInRange()\n print(\"checkInRange() tests passed!\")\n\n test_validateByr()\n print(\"validateByr() tests passed!\")\n\n test_validateIyr()\n print(\"validateIyr() tests passed!\")\n\n test_validateEyr()\n print(\"validateEyr() tests passed!\")\n\n test_parseHgt()\n print(\"parseHgt() tests passed!\")\n\n test_validateHgt()\n print(\"validateHgt() tests passed!\")\n\n test_validateHcl()\n print(\"validateHcl() tests passed!\")\n\n test_validateEcl()\n print(\"validateEcl() tests passed!\")\n\n test_validatePid()\n print(\"validatePid() tests passed!\")\n\n test_checkFields()\n print(\"checkFields() tests passed!\")\n\n test_checkPassport()\n print(\"checkPasport() tests passed!\")\n\n print(\"All tests passed!\")\n\n\nfile = \"puzzle_data.txt\"\npassports = []\ncount = 1\n\n\n# First just count the number of passports\nwith open(file) as f:\n for line in f:\n if line == \"\\n\":\n count = count + 1\n\n\npassports = [''] * count\ncount = 0\nwith open(file) as f:\n for line in f:\n if line == \"\\n\":\n count = count + 1\n\n passports[count] = passports[count] + \" \" + line.strip(\"\\n\")\n\nptI = 0\nptII = 0\nfor p in passports:\n ptI = ptI + int(checkFields(p))\n ptII = ptII + int(checkPassport(p))\n\nprint(ptI)\nprint(ptII)\n\n",
"def parsePassport(passport):\n parsedPassport = dict()\n passport = passport.replace('\\n', ' ')\n for p in passport.split(' '):\n if p.find(':') != -1:\n parsedPassport[p.split(':')[0].strip('\\n')] = p.split(':')[1\n ].strip('\\n')\n return parsedPassport\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\ndef test_checkInRange():\n assert checkInRange(1919, 1920, 2002) == False, 'Should be False'\n assert checkInRange(2002, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2003, 1920, 2002) == False, 'Should be False'\n assert checkInRange(1950, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2020, 2020, 2030) == True, 'Should be True'\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\ndef validateHgt(hgtField):\n parsedHgt = parseHgt(hgtField)\n if parsedHgt['units'] == 'in':\n return checkInRange(parsedHgt['value'], 59, 76)\n elif parsedHgt['units'] == 'cm':\n return checkInRange(parsedHgt['value'], 150, 193)\n else:\n return False\n\n\ndef test_validateHgt():\n assert validateHgt('60in') == True, 'Should be True'\n assert validateHgt('190cm') == True, 'Should be True'\n assert validateHgt('190in') == False, 'Should be False'\n assert validateHgt('190') == False, 'Should be False'\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\ndef test_validateByr():\n assert validateByr(2002) == True, 'Should be True'\n assert validateByr(2003) == False, 'Should be False'\n\n\ndef validateIyr(iyrField):\n return checkInRange(iyrField, 2010, 2020)\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\ndef test_validateEcl():\n assert validateEcl('brn') == True\n assert validateEcl('wat') == False\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\ndef test_validatePid():\n assert validatePid('000000001') == True\n assert validatePid('0123456789') == False\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n valids = []\n for f in list(parsedPassport.keys()):\n if eval('validate' + f.capitalize() + \"('\" + parsedPassport[f] +\n \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n return all(valids)\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\nif __name__ != '__main__':\n test_checkInRange()\n print('checkInRange() tests passed!')\n test_validateByr()\n print('validateByr() tests passed!')\n test_validateIyr()\n print('validateIyr() tests passed!')\n test_validateEyr()\n print('validateEyr() tests passed!')\n test_parseHgt()\n print('parseHgt() tests passed!')\n test_validateHgt()\n print('validateHgt() tests passed!')\n test_validateHcl()\n print('validateHcl() tests passed!')\n test_validateEcl()\n print('validateEcl() tests passed!')\n test_validatePid()\n print('validatePid() tests passed!')\n test_checkFields()\n print('checkFields() tests passed!')\n test_checkPassport()\n print('checkPasport() tests passed!')\n print('All tests passed!')\nfile = 'puzzle_data.txt'\npassports = []\ncount = 1\nwith open(file) as f:\n for line in f:\n if line == '\\n':\n count = count + 1\npassports = [''] * count\ncount = 0\nwith open(file) as f:\n for line in f:\n if line == '\\n':\n count = count + 1\n passports[count] = passports[count] + ' ' + line.strip('\\n')\nptI = 0\nptII = 0\nfor p in passports:\n ptI = ptI + int(checkFields(p))\n ptII = ptII + int(checkPassport(p))\nprint(ptI)\nprint(ptII)\n",
"def parsePassport(passport):\n parsedPassport = dict()\n passport = passport.replace('\\n', ' ')\n for p in passport.split(' '):\n if p.find(':') != -1:\n parsedPassport[p.split(':')[0].strip('\\n')] = p.split(':')[1\n ].strip('\\n')\n return parsedPassport\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\ndef test_checkInRange():\n assert checkInRange(1919, 1920, 2002) == False, 'Should be False'\n assert checkInRange(2002, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2003, 1920, 2002) == False, 'Should be False'\n assert checkInRange(1950, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2020, 2020, 2030) == True, 'Should be True'\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\ndef validateHgt(hgtField):\n parsedHgt = parseHgt(hgtField)\n if parsedHgt['units'] == 'in':\n return checkInRange(parsedHgt['value'], 59, 76)\n elif parsedHgt['units'] == 'cm':\n return checkInRange(parsedHgt['value'], 150, 193)\n else:\n return False\n\n\ndef test_validateHgt():\n assert validateHgt('60in') == True, 'Should be True'\n assert validateHgt('190cm') == True, 'Should be True'\n assert validateHgt('190in') == False, 'Should be False'\n assert validateHgt('190') == False, 'Should be False'\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\ndef test_validateByr():\n assert validateByr(2002) == True, 'Should be True'\n assert validateByr(2003) == False, 'Should be False'\n\n\ndef validateIyr(iyrField):\n return checkInRange(iyrField, 2010, 2020)\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\ndef test_validateEcl():\n assert validateEcl('brn') == True\n assert validateEcl('wat') == False\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\ndef test_validatePid():\n assert validatePid('000000001') == True\n assert validatePid('0123456789') == False\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n valids = []\n for f in list(parsedPassport.keys()):\n if eval('validate' + f.capitalize() + \"('\" + parsedPassport[f] +\n \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n return all(valids)\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\nif __name__ != '__main__':\n test_checkInRange()\n print('checkInRange() tests passed!')\n test_validateByr()\n print('validateByr() tests passed!')\n test_validateIyr()\n print('validateIyr() tests passed!')\n test_validateEyr()\n print('validateEyr() tests passed!')\n test_parseHgt()\n print('parseHgt() tests passed!')\n test_validateHgt()\n print('validateHgt() tests passed!')\n test_validateHcl()\n print('validateHcl() tests passed!')\n test_validateEcl()\n print('validateEcl() tests passed!')\n test_validatePid()\n print('validatePid() tests passed!')\n test_checkFields()\n print('checkFields() tests passed!')\n test_checkPassport()\n print('checkPasport() tests passed!')\n print('All tests passed!')\n<assignment token>\nwith open(file) as f:\n for line in f:\n if line == '\\n':\n count = count + 1\n<assignment token>\nwith open(file) as f:\n for line in f:\n if line == '\\n':\n count = count + 1\n passports[count] = passports[count] + ' ' + line.strip('\\n')\n<assignment token>\nfor p in passports:\n ptI = ptI + int(checkFields(p))\n ptII = ptII + int(checkPassport(p))\nprint(ptI)\nprint(ptII)\n",
"def parsePassport(passport):\n parsedPassport = dict()\n passport = passport.replace('\\n', ' ')\n for p in passport.split(' '):\n if p.find(':') != -1:\n parsedPassport[p.split(':')[0].strip('\\n')] = p.split(':')[1\n ].strip('\\n')\n return parsedPassport\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\ndef test_checkInRange():\n assert checkInRange(1919, 1920, 2002) == False, 'Should be False'\n assert checkInRange(2002, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2003, 1920, 2002) == False, 'Should be False'\n assert checkInRange(1950, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2020, 2020, 2030) == True, 'Should be True'\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\ndef validateHgt(hgtField):\n parsedHgt = parseHgt(hgtField)\n if parsedHgt['units'] == 'in':\n return checkInRange(parsedHgt['value'], 59, 76)\n elif parsedHgt['units'] == 'cm':\n return checkInRange(parsedHgt['value'], 150, 193)\n else:\n return False\n\n\ndef test_validateHgt():\n assert validateHgt('60in') == True, 'Should be True'\n assert validateHgt('190cm') == True, 'Should be True'\n assert validateHgt('190in') == False, 'Should be False'\n assert validateHgt('190') == False, 'Should be False'\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\ndef test_validateByr():\n assert validateByr(2002) == True, 'Should be True'\n assert validateByr(2003) == False, 'Should be False'\n\n\ndef validateIyr(iyrField):\n return checkInRange(iyrField, 2010, 2020)\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\ndef test_validateEcl():\n assert validateEcl('brn') == True\n assert validateEcl('wat') == False\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\ndef test_validatePid():\n assert validatePid('000000001') == True\n assert validatePid('0123456789') == False\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n valids = []\n for f in list(parsedPassport.keys()):\n if eval('validate' + f.capitalize() + \"('\" + parsedPassport[f] +\n \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n return all(valids)\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"def parsePassport(passport):\n parsedPassport = dict()\n passport = passport.replace('\\n', ' ')\n for p in passport.split(' '):\n if p.find(':') != -1:\n parsedPassport[p.split(':')[0].strip('\\n')] = p.split(':')[1\n ].strip('\\n')\n return parsedPassport\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\ndef test_checkInRange():\n assert checkInRange(1919, 1920, 2002) == False, 'Should be False'\n assert checkInRange(2002, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2003, 1920, 2002) == False, 'Should be False'\n assert checkInRange(1950, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2020, 2020, 2030) == True, 'Should be True'\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\ndef validateHgt(hgtField):\n parsedHgt = parseHgt(hgtField)\n if parsedHgt['units'] == 'in':\n return checkInRange(parsedHgt['value'], 59, 76)\n elif parsedHgt['units'] == 'cm':\n return checkInRange(parsedHgt['value'], 150, 193)\n else:\n return False\n\n\ndef test_validateHgt():\n assert validateHgt('60in') == True, 'Should be True'\n assert validateHgt('190cm') == True, 'Should be True'\n assert validateHgt('190in') == False, 'Should be False'\n assert validateHgt('190') == False, 'Should be False'\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n\n\ndef validateIyr(iyrField):\n return checkInRange(iyrField, 2010, 2020)\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\ndef test_validateEcl():\n assert validateEcl('brn') == True\n assert validateEcl('wat') == False\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\ndef test_validatePid():\n assert validatePid('000000001') == True\n assert validatePid('0123456789') == False\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n valids = []\n for f in list(parsedPassport.keys()):\n if eval('validate' + f.capitalize() + \"('\" + parsedPassport[f] +\n \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n return all(valids)\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\ndef test_checkInRange():\n assert checkInRange(1919, 1920, 2002) == False, 'Should be False'\n assert checkInRange(2002, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2003, 1920, 2002) == False, 'Should be False'\n assert checkInRange(1950, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2020, 2020, 2030) == True, 'Should be True'\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\ndef validateHgt(hgtField):\n parsedHgt = parseHgt(hgtField)\n if parsedHgt['units'] == 'in':\n return checkInRange(parsedHgt['value'], 59, 76)\n elif parsedHgt['units'] == 'cm':\n return checkInRange(parsedHgt['value'], 150, 193)\n else:\n return False\n\n\ndef test_validateHgt():\n assert validateHgt('60in') == True, 'Should be True'\n assert validateHgt('190cm') == True, 'Should be True'\n assert validateHgt('190in') == False, 'Should be False'\n assert validateHgt('190') == False, 'Should be False'\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n\n\ndef validateIyr(iyrField):\n return checkInRange(iyrField, 2010, 2020)\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\ndef test_validateEcl():\n assert validateEcl('brn') == True\n assert validateEcl('wat') == False\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\ndef test_validatePid():\n assert validatePid('000000001') == True\n assert validatePid('0123456789') == False\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n valids = []\n for f in list(parsedPassport.keys()):\n if eval('validate' + f.capitalize() + \"('\" + parsedPassport[f] +\n \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n return all(valids)\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\ndef test_checkInRange():\n assert checkInRange(1919, 1920, 2002) == False, 'Should be False'\n assert checkInRange(2002, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2003, 1920, 2002) == False, 'Should be False'\n assert checkInRange(1950, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2020, 2020, 2030) == True, 'Should be True'\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\ndef validateHgt(hgtField):\n parsedHgt = parseHgt(hgtField)\n if parsedHgt['units'] == 'in':\n return checkInRange(parsedHgt['value'], 59, 76)\n elif parsedHgt['units'] == 'cm':\n return checkInRange(parsedHgt['value'], 150, 193)\n else:\n return False\n\n\ndef test_validateHgt():\n assert validateHgt('60in') == True, 'Should be True'\n assert validateHgt('190cm') == True, 'Should be True'\n assert validateHgt('190in') == False, 'Should be False'\n assert validateHgt('190') == False, 'Should be False'\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n\n\ndef validateIyr(iyrField):\n return checkInRange(iyrField, 2010, 2020)\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\ndef test_validatePid():\n assert validatePid('000000001') == True\n assert validatePid('0123456789') == False\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n valids = []\n for f in list(parsedPassport.keys()):\n if eval('validate' + f.capitalize() + \"('\" + parsedPassport[f] +\n \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n return all(valids)\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\ndef test_checkInRange():\n assert checkInRange(1919, 1920, 2002) == False, 'Should be False'\n assert checkInRange(2002, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2003, 1920, 2002) == False, 'Should be False'\n assert checkInRange(1950, 1920, 2002) == True, 'Should be True'\n assert checkInRange(2020, 2020, 2030) == True, 'Should be True'\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\ndef validateHgt(hgtField):\n parsedHgt = parseHgt(hgtField)\n if parsedHgt['units'] == 'in':\n return checkInRange(parsedHgt['value'], 59, 76)\n elif parsedHgt['units'] == 'cm':\n return checkInRange(parsedHgt['value'], 150, 193)\n else:\n return False\n\n\ndef test_validateHgt():\n assert validateHgt('60in') == True, 'Should be True'\n assert validateHgt('190cm') == True, 'Should be True'\n assert validateHgt('190in') == False, 'Should be False'\n assert validateHgt('190') == False, 'Should be False'\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\ndef test_validatePid():\n assert validatePid('000000001') == True\n assert validatePid('0123456789') == False\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n valids = []\n for f in list(parsedPassport.keys()):\n if eval('validate' + f.capitalize() + \"('\" + parsedPassport[f] +\n \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n return all(valids)\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\ndef validateHgt(hgtField):\n parsedHgt = parseHgt(hgtField)\n if parsedHgt['units'] == 'in':\n return checkInRange(parsedHgt['value'], 59, 76)\n elif parsedHgt['units'] == 'cm':\n return checkInRange(parsedHgt['value'], 150, 193)\n else:\n return False\n\n\ndef test_validateHgt():\n assert validateHgt('60in') == True, 'Should be True'\n assert validateHgt('190cm') == True, 'Should be True'\n assert validateHgt('190in') == False, 'Should be False'\n assert validateHgt('190') == False, 'Should be False'\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\ndef test_validatePid():\n assert validatePid('000000001') == True\n assert validatePid('0123456789') == False\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n valids = []\n for f in list(parsedPassport.keys()):\n if eval('validate' + f.capitalize() + \"('\" + parsedPassport[f] +\n \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n return all(valids)\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\ndef validateHgt(hgtField):\n parsedHgt = parseHgt(hgtField)\n if parsedHgt['units'] == 'in':\n return checkInRange(parsedHgt['value'], 59, 76)\n elif parsedHgt['units'] == 'cm':\n return checkInRange(parsedHgt['value'], 150, 193)\n else:\n return False\n\n\n<function token>\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\ndef test_validatePid():\n assert validatePid('000000001') == True\n assert validatePid('0123456789') == False\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n valids = []\n for f in list(parsedPassport.keys()):\n if eval('validate' + f.capitalize() + \"('\" + parsedPassport[f] +\n \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n return all(valids)\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\n<function token>\n<function token>\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\ndef test_validatePid():\n assert validatePid('000000001') == True\n assert validatePid('0123456789') == False\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n valids = []\n for f in list(parsedPassport.keys()):\n if eval('validate' + f.capitalize() + \"('\" + parsedPassport[f] +\n \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n return all(valids)\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\n<function token>\n<function token>\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\ndef checkPassport(passport):\n if checkFields(passport) == False:\n return False\n else:\n parsedPassport = parsePassport(passport)\n valids = []\n for f in list(parsedPassport.keys()):\n if eval('validate' + f.capitalize() + \"('\" + parsedPassport[f] +\n \"')\") == False:\n valids.append(False)\n else:\n valids.append(True)\n return all(valids)\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\n<function token>\n<function token>\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n\n\ndef test_validateIyr():\n assert validateIyr(2010) == True, 'Should be True'\n assert validateIyr(2012) == True, 'Should be True'\n assert validateIyr(2020) == True, 'Should be True'\n assert validateIyr(2015) == True, 'Should be True'\n assert validateIyr(2002) == False, 'Should be False'\n assert validateIyr(2021) == False, 'Should be False'\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\n<function token>\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\n<function token>\n<function token>\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\ndef checkFields(passport):\n parsedPassport = parsePassport(passport)\n hasFields = list(parsedPassport.keys())\n if 'cid' in hasFields:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']\n return all([(c in hasFields) for c in needsFields])\n else:\n needsFields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n return all([(c in hasFields) for c in needsFields])\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\n<function token>\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\n<function token>\n<function token>\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\ndef validateEcl(eclField):\n return eclField in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\n\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\n<function token>\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\n<function token>\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\n<function token>\n<function token>\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\ndef test_validateHcl():\n assert validateHcl('#123abc') == True\n assert validateHcl('#123abz') == False\n assert validateHcl('123abc') == False\n\n\n<function token>\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\n<function token>\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\n<function token>\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\n<function token>\n<function token>\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef validateEyr(eyrField):\n return checkInRange(eyrField, 2020, 2030)\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\n<function token>\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\n<function token>\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\ndef test_parseHgt():\n assert parseHgt('60in') == {'units': 'in', 'value': 60}\n assert parseHgt('190cm') == {'units': 'cm', 'value': 190}\n assert parseHgt('60cm') == {'units': 'xx', 'value': 0}\n assert parseHgt('600in') == {'units': 'xx', 'value': 0}\n\n\n<function token>\n<function token>\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\n<function token>\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\n<function token>\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n\n\ndef parseHgt(hgtField):\n if (len(hgtField) == 5) & (hgtField.find('cm') != -1):\n value = hgtField[:3]\n units = 'cm'\n return {'units': units, 'value': int(value)}\n elif (len(hgtField) == 4) & (hgtField.find('in') != -1):\n value = hgtField[:2]\n units = 'in'\n return {'units': units, 'value': int(value)}\n else:\n return {'units': 'xx', 'value': 0}\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\n<function token>\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\n<function token>\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef validateByr(byrField):\n return checkInRange(byrField, 1920, 2002)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\n<function token>\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\n<function token>\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\n<function token>\n\n\ndef test_checkFields():\n x = \"\"\"\n ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n byr:1937 iyr:2017 cid:147 hgt:183cm \n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n hcl:#cfa07d byr:1929 \n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\n hcl:#ae17e1 iyr:2013\n eyr:2024\n ecl:brn pid:760753108 byr:1931\n hgt:179cm\n \"\"\"\n assert checkFields(x) == True\n x = \"\"\"\n hcl:#cfa07d eyr:2025 pid:166559648\n iyr:2011 ecl:brn hgt:59in\n \"\"\"\n assert checkFields(x) == False\n x = \"\"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f \n \"\"\"\n assert checkFields(x) == True\n\n\n<function token>\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\ndef validateHcl(hclField):\n if hclField[0] != '#':\n return False\n else:\n colour = hclField[1:]\n validChrs = [str(x) for x in range(0, 10)] + [chr(x) for x in range\n (ord('a'), ord('f') + 1)]\n return all([(c in validChrs) for c in colour])\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_checkPassport():\n x = \"\"\"\n eyr:1972 cid:100\n hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n iyr:2019\n hcl:#602927 eyr:1967 hgt:170cm\n ecl:grn pid:012533040 byr:1946\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hcl:dab227 iyr:2012\n ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n hgt:59cm ecl:zzz\n eyr:2038 hcl:74454a iyr:2023\n pid:3556412378 byr:2007\n \"\"\"\n assert checkPassport(x) == False, 'Should be false'\n x = \"\"\"\n pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n hcl:#623a2f\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n eyr:2029 ecl:blu cid:129 byr:1989\n iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n hcl:#888785\n hgt:164cm byr:2001 iyr:2015 cid:88\n pid:545766238 ecl:hzl\n eyr:2022\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n x = \"\"\"\n iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n\n \"\"\"\n assert checkPassport(x) == True, 'Should be true'\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n\n\ndef validateCid(cidField):\n return True\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef checkInRange(value, min, max):\n value = int(value)\n validValues = range(min, max + 1)\n return value in validValues\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef validatePid(pidField):\n if len(pidField) != 9:\n return False\n else:\n validChrs = [str(x) for x in range(0, 10)]\n return all([(c in validChrs) for c in pidField])\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_validateEyr():\n assert validateEyr(2020) == True, 'Should be True'\n assert validateEyr(2030) == True, 'Should be True'\n assert validateEyr(2025) == True, 'Should be True'\n assert validateEyr(2002) == False, 'Should be False'\n assert validateEyr(2031) == False, 'Should be False'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,524 |
02564e271b7e929d3ce4de7683efdc6713c5928e
|
import time
import json
import queue
import base64
import struct
import urllib.request as ur
from urllib.error import URLError
from enum import IntEnum
from typing import Callable, Dict
from modi.module.input_module.button import Button
from modi.module.input_module.dial import Dial
from modi.module.input_module.env import Env
from modi.module.input_module.gyro import Gyro
from modi.module.input_module.ir import Ir
from modi.module.input_module.mic import Mic
from modi.module.input_module.ultrasonic import Ultrasonic
from modi.module.output_module.display import Display
from modi.module.output_module.led import Led
from modi.module.output_module.motor import Motor
from modi.module.output_module.speaker import Speaker
from modi.module.module import Module
from modi.util.msgutil import unpack_data as up
class ExeTask:
"""
:param queue send_q: Inter-process queue for writing serial
message.
:param queue recv_q: Inter-process queue for parsing json message.
:param dict() module_ids: dict() of module_id : ['timestamp', 'uuid'].
:param list() modules: list() of module instance.
"""
# variables shared across all class instances
__module_categories = ["network", "input", "output"]
__module_types = {
"network": ["usb", "usb/wifi/ble"],
"input": ["env", "gyro", "mic", "button", "dial", "ultrasonic", "ir"],
"output": ["display", "motor", "led", "speaker"],
}
def __init__(self, modules, module_ids, topology_data,
recv_q, send_q, init_event, nb_modules, firmware_updater):
self._modules = modules
self._module_ids = module_ids
self._topology_data = topology_data
self._recv_q = recv_q
self._send_q = send_q
self._init_event = init_event
self._nb_modules = nb_modules
self.firmware_updater = firmware_updater
# Check if a user has been notified when firmware is outdated
self.firmware_update_message_flag = False
self.__init_modules()
print('Start initializing connected MODI modules')
def run(self, delay: float):
""" Run in ExecutorThread
:param delay: time value to wait in seconds
:type delay: float
"""
time.sleep(delay)
try:
raw_message = self._recv_q.get_nowait()
message = json.loads(raw_message)
except queue.Empty:
pass
except json.decoder.JSONDecodeError:
print('current json message:', raw_message)
else:
self.__command_handler(message["c"])(message)
def __command_handler(self,
command: int) -> Callable[[Dict[str, int]], None]:
""" Execute task based on command message
:param command: command code
:type command: int
:return: a function the corresponds to the command code
:rtype: Callable[[Dict[str, int]], None]
"""
return {
0x00: self.__update_health,
0x0A: self.__update_warning,
0x0C: self.__update_firmware_state,
0x05: self.__update_modules,
0x07: self.__update_topology,
0x1F: self.__update_property,
}.get(command, lambda _: None)
def __update_firmware_state(self, message):
byte_data = message["b"]
message_decoded = bytearray(base64.b64decode(byte_data))
stream_state = message_decoded[4]
# TODO: Remove this if and elif branches
if stream_state == self.firmware_updater.State.CRC_ERROR.value:
self.firmware_updater.update_response(response=True,
is_error_response=True)
elif stream_state == self.firmware_updater.State.CRC_COMPLETE.value:
self.firmware_updater.update_response(response=True)
elif stream_state == self.firmware_updater.State.ERASE_ERROR.value:
self.firmware_updater.update_response(response=True,
is_error_response=True)
elif stream_state == self.firmware_updater.State.ERASE_COMPLETE.value:
self.firmware_updater.update_response(response=True)
def __update_topology(self, message: Dict[str, int]) -> None:
"""Update the topology of the connected modules
:param message: Dictionary format message of the module
:return: None
"""
# print('topology_msg:', message)
# Setup prerequisites
src_id = message["s"]
byte_data = message["b"]
broadcast_id = 2 ** 16 - 1
topology_by_id = {}
message_decoded = bytearray(base64.b64decode(byte_data))
# print('topology_msg_dec:', message_decoded)
# UUID
src_uuid = self.__get_uuid_by_id(src_id)
topology_by_id['uuid'] = src_uuid
# RIGHT ID
right_id = message_decoded[1] << 8 | message_decoded[0]
topology_by_id['r'] = right_id if right_id != broadcast_id else None
# TOP ID
top_id = message_decoded[3] << 8 | message_decoded[2]
topology_by_id['t'] = top_id if top_id != broadcast_id else None
# LEFT ID
left_id = message_decoded[5] << 8 | message_decoded[4]
topology_by_id['l'] = left_id if left_id != broadcast_id else None
# BOTTOM ID
bottom_id = message_decoded[7] << 8 | message_decoded[6]
topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None
# Save topology data for current module
if not self._topology_data.get(src_id):
self._topology_data[src_id] = topology_by_id
else:
# If the topology data already exists, update it
for key in self._topology_data[src_id]:
if not self._topology_data[src_id][key]:
self._topology_data[src_id][key] = topology_by_id[key]
def __get_uuid_by_id(self, id_: int) -> int:
"""Find id of a module which has corresponding uuid
:param id_: ID of the module
:type id_: int
:return: UUID
:rtype: int
"""
for module in self._modules:
if module.id == id_:
return module.uuid
return None
def __update_health(self, message: Dict[str, int]) -> None:
""" Update information by health message
:param message: Dictionary format message of the module
:type message: Dictionary
:return: None
"""
# Record current time and uuid, timestamp, battery information
module_id = message["s"]
curr_time_ms = int(time.time() * 1000)
message_decoded = bytearray(base64.b64decode(message["b"]))
self._module_ids[module_id] = self._module_ids.get(module_id, dict())
self._module_ids[module_id]["timestamp"] = curr_time_ms
self._module_ids[module_id]["uuid"] = self._module_ids[module_id].get(
"uuid", str()
)
self._module_ids[module_id]["battery"] = int(message_decoded[3])
# Request uuid from network modules and other modules
if not self._module_ids[module_id]["uuid"]:
message_to_write = self.__request_uuid(
module_id, is_network_module=False)
self._send_q.put(message_to_write)
message_to_write = self.__request_uuid(
module_id, is_network_module=True)
self._send_q.put(message_to_write)
# Disconnect modules with no health message for more than 2 seconds
for module_id, module_info in list(self._module_ids.items()):
if curr_time_ms - module_info["timestamp"] > 1000:
for module in self._modules:
if module.uuid == module_info["uuid"]:
module.set_connection_state(connection_state=False)
def __update_warning(self, message: Dict[str, int]) -> None:
"""Update the warning message
:param message: Warning message in Dictionary format
:return: None
"""
# print('Warning message:', message)
warning_data = bytearray(base64.b64decode(message["b"]))
warning_type = warning_data[6]
# If warning shows current module works fine, return immediately
if not warning_type:
return
module_uuid = warning_data[:6]
module_uuid_res = 0
for i, v in enumerate(module_uuid):
module_uuid_res |= v << 8 * i
module_id = message["s"]
module_type = self.__get_type_from_uuid(module_uuid_res)
# No need to update Network module's STM firmware
if module_type == 'Network':
return
if warning_type == 1:
self.firmware_updater.check_to_update_firmware(module_id)
elif warning_type == 2:
# Note that more than one warning type 2 message can be received
if self.firmware_updater.update_in_progress:
self.firmware_updater.add_to_waitlist(module_id, module_type)
else:
self.firmware_updater.update_module(module_id, module_type)
else:
# TODO: Handle warning_type of 7 and 10
# print("Unsupported warning type:", warning_type)
pass
def __update_modules(self, message: Dict[str, str]) -> None:
""" Update module information
:param message: Dictionary format module info
:type message: Dictionary
:return: None
"""
# Set time variable for timestamp
curr_time_ms = int(time.time() * 1000)
# Record information by module id
module_id = message["s"]
self._module_ids[module_id] = self._module_ids.get(module_id, dict())
self._module_ids[module_id]["timestamp"] = curr_time_ms
self._module_ids[module_id]["uuid"] = self._module_ids[module_id].get(
"uuid", str()
)
# Extract uuid from message "b"
message_decoded = bytearray(base64.b64decode(message["b"]))
module_uuid_bytes = message_decoded[:4]
module_info_bytes = message_decoded[-4:]
module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]
module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]
# Retrieve most recent skeleton version from the server
version_path = (
"https://download.luxrobo.com/modi-skeleton-mobile/version.txt"
)
version_info = None
try:
for line in ur.urlopen(version_path, timeout=1):
version_info = line.decode('utf-8').lstrip('v')
version_digits = [int(digit) for digit in version_info.split('.')]
""" Version number is formed by concatenating all three version bits
e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100
"""
latest_version = (
version_digits[0] << 13
| version_digits[1] << 8
| version_digits[2]
)
except URLError:
latest_version = module_version_info
module_category_idx = module_info >> 13
module_type_idx = (module_info >> 4) & 0x1FF
module_category = self.__module_categories[module_category_idx]
module_type = self.__module_types[module_category][module_type_idx]
module_uuid = self.__fit_module_uuid(
module_info,
(
(module_uuid_bytes[3] << 24)
+ (module_uuid_bytes[2] << 16)
+ (module_uuid_bytes[1] << 8)
+ module_uuid_bytes[0]
),
)
module_uuid = up(message['b'], (6, 2))[0]
if module_category != 'network' and \
not self.firmware_update_message_flag and \
module_version_info < latest_version:
print("Your MODI module(s) is not up-to-date.")
print("You can update your MODI modules by calling "
"'update_module_firmware()'")
self.firmware_update_message_flag = True
self._module_ids[module_id]["uuid"] = module_uuid
# Handle re-connected modules
for module in self._modules:
if module.uuid == module_uuid and not module.is_connected:
module.set_connection_state(connection_state=True)
# When reconnected, turn-off module pnp state
pnp_off_message = self.__set_module_state(
0xFFF, Module.State.RUN, Module.State.PNP_OFF
)
self._send_q.put(pnp_off_message)
# Handle newly-connected modules
if not next(
(module for module in self._modules if module.uuid == module_uuid),
None
):
if module_category != "network":
module_template = self.__init_module(module_type)
module_instance = module_template(
module_id, module_uuid, self._send_q
)
self.__set_pnp(
module_id=module_instance.id,
module_pnp_state=Module.State.PNP_OFF
)
module_instance.version = module_version_info
module_instance.is_up_to_date = \
(module_version_info == latest_version)
self._modules.append(module_instance)
print(f"{type(module_instance).__name__} ({module_id}) "
f"has been connected!")
if self.__is_all_connected():
self._init_event.set()
def __is_all_connected(self) -> bool:
""" Determine whether all modules are connected
:return: true is all modules are connected
:rtype: bool
"""
return self._nb_modules == len(self._modules)
def __init_module(self, module_type: str) -> Module:
""" Find module type for module initialize
:param module_type: Type of the module in string
:type module_type: str
:return: Module corresponding to the type
:rtype: Module
"""
module = {
"button": Button,
"dial": Dial,
"display": Display,
"env": Env,
"gyro": Gyro,
"ir": Ir,
"led": Led,
"mic": Mic,
"motor": Motor,
"speaker": Speaker,
"ultrasonic": Ultrasonic,
}.get(module_type)
return module
def __update_property(self, message: Dict[str, int]) -> None:
""" Update module property
:param message: Dictionary format message
:type message: Dictionary
:return: None
"""
# Do not update reserved property
property_number = message["d"]
if property_number == 0 or property_number == 1:
return
# Decode message of module id and module property for update property
for module in self._modules:
if module.id == message["s"]:
message_decoded = bytearray(base64.b64decode(message["b"]))
property_type = module.PropertyType(property_number)
module.update_property(
property_type,
round(struct.unpack("f", bytes(
message_decoded[:4]))[0], 2),
)
def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) -> None:
""" Generate module pnp on/off command
:param module_id: ID of the target module
:type module_id: int
:param module_pnp_state: Pnp state value
:type module_pnp_state: IntEnum
:return: None
"""
# If no module_id is specified, it will broadcast incoming pnp state
if module_id is None:
for curr_module_id in self._module_ids:
pnp_message = self.__set_module_state(
curr_module_id, Module.State.RUN, module_pnp_state
)
self._send_q.put(pnp_message)
# Otherwise, it sets pnp state of the given module
else:
pnp_message = self.__set_module_state(
module_id, Module.State.RUN, module_pnp_state
)
self._send_q.put(pnp_message)
def __fit_module_uuid(self, module_info: int, module_uuid: int) -> int:
""" Generate uuid using bitwise operation
:param module_info: Module info
:type module_info: int
:param module_uuid: Module uuid
:type module_uuid: int
:return: Fitted uuid
:rtype: int
"""
sizeof_module_uuid = 0
while (module_uuid >> sizeof_module_uuid) > 0:
sizeof_module_uuid += 1
sizeof_module_uuid += sizeof_module_uuid % 4
return (module_info << sizeof_module_uuid) | module_uuid
def __set_module_state(self, destination_id: int, module_state: IntEnum,
pnp_state: IntEnum) -> str:
""" Generate message for set module state and pnp state
:param destination_id: Id to target destination
:type destination_id: int
:param module_state: State value of the module
:type module_state: int
:param pnp_state: Pnp state value
:type pnp_state: IntEnum
:return: json serialized message
:rtype: str
"""
message = dict()
message["c"] = 0x09
message["s"] = 0
message["d"] = destination_id
state_bytes = bytearray(2)
state_bytes[0] = module_state
state_bytes[1] = pnp_state
message["b"] = base64.b64encode(bytes(state_bytes)).decode("utf-8")
message["l"] = 2
return json.dumps(message, separators=(",", ":"))
def __init_modules(self) -> None:
""" Initialize module on first run
:return: None
"""
BROADCAST_ID = 0xFFF
# Reboot module
reboot_message = self.__set_module_state(
BROADCAST_ID, Module.State.REBOOT, Module.State.PNP_OFF
)
self._send_q.put(reboot_message)
# self.__delay()
# Command module pnp off
pnp_off_message = self.__set_module_state(
BROADCAST_ID, Module.State.RUN, Module.State.PNP_OFF
)
self._send_q.put(pnp_off_message)
# self.__delay()
# Command module uuid
request_uuid_message = self.__request_uuid(BROADCAST_ID)
self._send_q.put(request_uuid_message)
# self.__delay()
# Request topology data
self.request_topology()
# self.__delay()
def __delay(self) -> None:
""" Wait for delay
:return: None
"""
time.sleep(0.5)
def __request_uuid(self, source_id: int,
is_network_module: bool = False) -> str:
""" Generate broadcasting message for request uuid
:param source_id: Id of the source
:type source_id: int
:param is_network_module: true if network module
:type is_network_module: bool
:return: json serialized message
:rtype: str
"""
BROADCAST_ID = 0xFFF
message = dict()
message["c"] = 0x28 if is_network_module else 0x08
message["s"] = source_id
message["d"] = BROADCAST_ID
id_bytes = bytearray(8)
id_bytes[0] = 0xFF
id_bytes[1] = 0x0F
message["b"] = base64.b64encode(bytes(id_bytes)).decode("utf-8")
message["l"] = 8
return json.dumps(message, separators=(",", ":"))
def request_topology(self, cmd: int = 0x07,
module_id: int = 0xFFF) -> None:
"""Request module topology
:return: json serialized topology request message
:rtype: str
"""
message = dict()
message["c"] = cmd
message["s"] = 0
message["d"] = module_id
direction_data = bytearray(8)
message["b"] = base64.b64encode(bytes(direction_data)).decode("utf-8")
message["l"] = 8
self._send_q.put(json.dumps(message, separators=(",", ":")))
def update_firmware(self) -> None:
""" Remove firmware of MODI modules
:return: None
"""
BROADCAST_ID = 0xFFF
firmware_update_message = self.__set_module_state(
BROADCAST_ID, Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF
)
self._send_q.put(firmware_update_message)
self.__delay()
def update_firmware_ready(self, module_id: int) -> None:
""" Check if modules with no firmware are ready to update its firmware
:param module_id: Id of the target module
:type module_id: int
:return: None
"""
firmware_update_ready_message = self.__set_module_state(
module_id, Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF
)
self._send_q.put(firmware_update_ready_message)
self.__delay()
def __get_type_from_uuid(self, uuid):
if uuid is None:
return 'Network'
hexadecimal = hex(uuid).lstrip("0x")
type_indicator = str(hexadecimal)[:4]
module_type = {
# Input modules
'2000': 'env',
'2010': 'gyro',
'2020': 'mic',
'2030': 'button',
'2040': 'dial',
'2050': 'ultrasonic',
'2060': 'ir',
# Output modules
'4000': 'display',
'4010': 'motor',
'4020': 'led',
'4030': 'speaker',
}.get(type_indicator)
return 'Network' if module_type is None else module_type
|
[
"import time\nimport json\nimport queue\nimport base64\nimport struct\n\nimport urllib.request as ur\n\nfrom urllib.error import URLError\n\nfrom enum import IntEnum\nfrom typing import Callable, Dict\n\nfrom modi.module.input_module.button import Button\nfrom modi.module.input_module.dial import Dial\nfrom modi.module.input_module.env import Env\nfrom modi.module.input_module.gyro import Gyro\nfrom modi.module.input_module.ir import Ir\nfrom modi.module.input_module.mic import Mic\nfrom modi.module.input_module.ultrasonic import Ultrasonic\n\nfrom modi.module.output_module.display import Display\nfrom modi.module.output_module.led import Led\nfrom modi.module.output_module.motor import Motor\nfrom modi.module.output_module.speaker import Speaker\n\nfrom modi.module.module import Module\nfrom modi.util.msgutil import unpack_data as up\n\n\nclass ExeTask:\n \"\"\"\n :param queue send_q: Inter-process queue for writing serial\n message.\n :param queue recv_q: Inter-process queue for parsing json message.\n :param dict() module_ids: dict() of module_id : ['timestamp', 'uuid'].\n :param list() modules: list() of module instance.\n \"\"\"\n\n # variables shared across all class instances\n __module_categories = [\"network\", \"input\", \"output\"]\n __module_types = {\n \"network\": [\"usb\", \"usb/wifi/ble\"],\n \"input\": [\"env\", \"gyro\", \"mic\", \"button\", \"dial\", \"ultrasonic\", \"ir\"],\n \"output\": [\"display\", \"motor\", \"led\", \"speaker\"],\n }\n\n def __init__(self, modules, module_ids, topology_data,\n recv_q, send_q, init_event, nb_modules, firmware_updater):\n\n self._modules = modules\n self._module_ids = module_ids\n self._topology_data = topology_data\n self._recv_q = recv_q\n self._send_q = send_q\n self._init_event = init_event\n self._nb_modules = nb_modules\n\n self.firmware_updater = firmware_updater\n\n # Check if a user has been notified when firmware is outdated\n self.firmware_update_message_flag = False\n\n self.__init_modules()\n print('Start initializing connected MODI modules')\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message[\"c\"])(message)\n\n def __command_handler(self,\n command: int) -> Callable[[Dict[str, int]], None]:\n \"\"\" Execute task based on command message\n\n :param command: command code\n :type command: int\n :return: a function the corresponds to the command code\n :rtype: Callable[[Dict[str, int]], None]\n \"\"\"\n\n return {\n 0x00: self.__update_health,\n 0x0A: self.__update_warning,\n 0x0C: self.__update_firmware_state,\n 0x05: self.__update_modules,\n 0x07: self.__update_topology,\n 0x1F: self.__update_property,\n }.get(command, lambda _: None)\n\n def __update_firmware_state(self, message):\n byte_data = message[\"b\"]\n message_decoded = bytearray(base64.b64decode(byte_data))\n\n stream_state = message_decoded[4]\n\n # TODO: Remove this if and elif branches\n if stream_state == self.firmware_updater.State.CRC_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.CRC_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n elif stream_state == self.firmware_updater.State.ERASE_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.ERASE_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n\n def __update_topology(self, message: Dict[str, int]) -> None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n # print('topology_msg:', message)\n\n # Setup prerequisites\n src_id = message[\"s\"]\n byte_data = message[\"b\"]\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n\n message_decoded = bytearray(base64.b64decode(byte_data))\n # print('topology_msg_dec:', message_decoded)\n\n # UUID\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n\n # RIGHT ID\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n\n # TOP ID\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n\n # LEFT ID\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n\n # BOTTOM ID\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n\n # Save topology data for current module\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n # If the topology data already exists, update it\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n\n def __get_uuid_by_id(self, id_: int) -> int:\n \"\"\"Find id of a module which has corresponding uuid\n\n :param id_: ID of the module\n :type id_: int\n :return: UUID\n :rtype: int\n \"\"\"\n for module in self._modules:\n if module.id == id_:\n return module.uuid\n return None\n\n def __update_health(self, message: Dict[str, int]) -> None:\n \"\"\" Update information by health message\n\n :param message: Dictionary format message of the module\n :type message: Dictionary\n :return: None\n \"\"\"\n # Record current time and uuid, timestamp, battery information\n module_id = message[\"s\"]\n curr_time_ms = int(time.time() * 1000)\n message_decoded = bytearray(base64.b64decode(message[\"b\"]))\n\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id][\"timestamp\"] = curr_time_ms\n self._module_ids[module_id][\"uuid\"] = self._module_ids[module_id].get(\n \"uuid\", str()\n )\n self._module_ids[module_id][\"battery\"] = int(message_decoded[3])\n\n # Request uuid from network modules and other modules\n if not self._module_ids[module_id][\"uuid\"]:\n message_to_write = self.__request_uuid(\n module_id, is_network_module=False)\n self._send_q.put(message_to_write)\n message_to_write = self.__request_uuid(\n module_id, is_network_module=True)\n self._send_q.put(message_to_write)\n\n # Disconnect modules with no health message for more than 2 seconds\n for module_id, module_info in list(self._module_ids.items()):\n if curr_time_ms - module_info[\"timestamp\"] > 1000:\n for module in self._modules:\n if module.uuid == module_info[\"uuid\"]:\n module.set_connection_state(connection_state=False)\n\n def __update_warning(self, message: Dict[str, int]) -> None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n # print('Warning message:', message)\n\n warning_data = bytearray(base64.b64decode(message[\"b\"]))\n warning_type = warning_data[6]\n\n # If warning shows current module works fine, return immediately\n if not warning_type:\n return\n\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n\n module_id = message[\"s\"]\n module_type = self.__get_type_from_uuid(module_uuid_res)\n\n # No need to update Network module's STM firmware\n if module_type == 'Network':\n return\n\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n # Note that more than one warning type 2 message can be received\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n # TODO: Handle warning_type of 7 and 10\n # print(\"Unsupported warning type:\", warning_type)\n pass\n\n def __update_modules(self, message: Dict[str, str]) -> None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n\n # Set time variable for timestamp\n curr_time_ms = int(time.time() * 1000)\n\n # Record information by module id\n module_id = message[\"s\"]\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id][\"timestamp\"] = curr_time_ms\n self._module_ids[module_id][\"uuid\"] = self._module_ids[module_id].get(\n \"uuid\", str()\n )\n\n # Extract uuid from message \"b\"\n message_decoded = bytearray(base64.b64decode(message[\"b\"]))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n\n # Retrieve most recent skeleton version from the server\n version_path = (\n \"https://download.luxrobo.com/modi-skeleton-mobile/version.txt\"\n )\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = (\n version_digits[0] << 13\n | version_digits[1] << 8\n | version_digits[2]\n )\n except URLError:\n latest_version = module_version_info\n\n module_category_idx = module_info >> 13\n module_type_idx = (module_info >> 4) & 0x1FF\n\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(\n module_info,\n (\n (module_uuid_bytes[3] << 24)\n + (module_uuid_bytes[2] << 16)\n + (module_uuid_bytes[1] << 8)\n + module_uuid_bytes[0]\n ),\n )\n\n module_uuid = up(message['b'], (6, 2))[0]\n\n if module_category != 'network' and \\\n not self.firmware_update_message_flag and \\\n module_version_info < latest_version:\n\n print(\"Your MODI module(s) is not up-to-date.\")\n print(\"You can update your MODI modules by calling \"\n \"'update_module_firmware()'\")\n self.firmware_update_message_flag = True\n\n self._module_ids[module_id][\"uuid\"] = module_uuid\n\n # Handle re-connected modules\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n # When reconnected, turn-off module pnp state\n pnp_off_message = self.__set_module_state(\n 0xFFF, Module.State.RUN, Module.State.PNP_OFF\n )\n self._send_q.put(pnp_off_message)\n\n # Handle newly-connected modules\n if not next(\n (module for module in self._modules if module.uuid == module_uuid),\n None\n ):\n if module_category != \"network\":\n module_template = self.__init_module(module_type)\n module_instance = module_template(\n module_id, module_uuid, self._send_q\n )\n self.__set_pnp(\n module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF\n )\n module_instance.version = module_version_info\n module_instance.is_up_to_date = \\\n (module_version_info == latest_version)\n self._modules.append(module_instance)\n print(f\"{type(module_instance).__name__} ({module_id}) \"\n f\"has been connected!\")\n\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) -> bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) -> Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n\n module = {\n \"button\": Button,\n \"dial\": Dial,\n \"display\": Display,\n \"env\": Env,\n \"gyro\": Gyro,\n \"ir\": Ir,\n \"led\": Led,\n \"mic\": Mic,\n \"motor\": Motor,\n \"speaker\": Speaker,\n \"ultrasonic\": Ultrasonic,\n }.get(module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) -> None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n # Do not update reserved property\n property_number = message[\"d\"]\n if property_number == 0 or property_number == 1:\n return\n\n # Decode message of module id and module property for update property\n for module in self._modules:\n if module.id == message[\"s\"]:\n message_decoded = bytearray(base64.b64decode(message[\"b\"]))\n property_type = module.PropertyType(property_number)\n module.update_property(\n property_type,\n round(struct.unpack(\"f\", bytes(\n message_decoded[:4]))[0], 2),\n )\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) -> None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n\n # If no module_id is specified, it will broadcast incoming pnp state\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(\n curr_module_id, Module.State.RUN, module_pnp_state\n )\n self._send_q.put(pnp_message)\n\n # Otherwise, it sets pnp state of the given module\n else:\n pnp_message = self.__set_module_state(\n module_id, Module.State.RUN, module_pnp_state\n )\n self._send_q.put(pnp_message)\n\n def __fit_module_uuid(self, module_info: int, module_uuid: int) -> int:\n \"\"\" Generate uuid using bitwise operation\n\n :param module_info: Module info\n :type module_info: int\n :param module_uuid: Module uuid\n :type module_uuid: int\n :return: Fitted uuid\n :rtype: int\n \"\"\"\n\n sizeof_module_uuid = 0\n while (module_uuid >> sizeof_module_uuid) > 0:\n sizeof_module_uuid += 1\n sizeof_module_uuid += sizeof_module_uuid % 4\n return (module_info << sizeof_module_uuid) | module_uuid\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) -> str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n\n message = dict()\n\n message[\"c\"] = 0x09\n message[\"s\"] = 0\n message[\"d\"] = destination_id\n\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n\n message[\"b\"] = base64.b64encode(bytes(state_bytes)).decode(\"utf-8\")\n message[\"l\"] = 2\n\n return json.dumps(message, separators=(\",\", \":\"))\n\n def __init_modules(self) -> None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n\n BROADCAST_ID = 0xFFF\n\n # Reboot module\n reboot_message = self.__set_module_state(\n BROADCAST_ID, Module.State.REBOOT, Module.State.PNP_OFF\n )\n self._send_q.put(reboot_message)\n # self.__delay()\n\n # Command module pnp off\n pnp_off_message = self.__set_module_state(\n BROADCAST_ID, Module.State.RUN, Module.State.PNP_OFF\n )\n self._send_q.put(pnp_off_message)\n # self.__delay()\n\n # Command module uuid\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n # self.__delay()\n\n # Request topology data\n self.request_topology()\n # self.__delay()\n\n def __delay(self) -> None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int,\n is_network_module: bool = False) -> str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n\n BROADCAST_ID = 0xFFF\n\n message = dict()\n message[\"c\"] = 0x28 if is_network_module else 0x08\n message[\"s\"] = source_id\n message[\"d\"] = BROADCAST_ID\n\n id_bytes = bytearray(8)\n id_bytes[0] = 0xFF\n id_bytes[1] = 0x0F\n\n message[\"b\"] = base64.b64encode(bytes(id_bytes)).decode(\"utf-8\")\n message[\"l\"] = 8\n\n return json.dumps(message, separators=(\",\", \":\"))\n\n def request_topology(self, cmd: int = 0x07,\n module_id: int = 0xFFF) -> None:\n \"\"\"Request module topology\n\n :return: json serialized topology request message\n :rtype: str\n \"\"\"\n message = dict()\n message[\"c\"] = cmd\n message[\"s\"] = 0\n message[\"d\"] = module_id\n\n direction_data = bytearray(8)\n message[\"b\"] = base64.b64encode(bytes(direction_data)).decode(\"utf-8\")\n message[\"l\"] = 8\n\n self._send_q.put(json.dumps(message, separators=(\",\", \":\")))\n\n def update_firmware(self) -> None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n\n BROADCAST_ID = 0xFFF\n firmware_update_message = self.__set_module_state(\n BROADCAST_ID, Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF\n )\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) -> None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n\n firmware_update_ready_message = self.__set_module_state(\n module_id, Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF\n )\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n\n hexadecimal = hex(uuid).lstrip(\"0x\")\n type_indicator = str(hexadecimal)[:4]\n module_type = {\n # Input modules\n '2000': 'env',\n '2010': 'gyro',\n '2020': 'mic',\n '2030': 'button',\n '2040': 'dial',\n '2050': 'ultrasonic',\n '2060': 'ir',\n\n # Output modules\n '4000': 'display',\n '4010': 'motor',\n '4020': 'led',\n '4030': 'speaker',\n }.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"import time\nimport json\nimport queue\nimport base64\nimport struct\nimport urllib.request as ur\nfrom urllib.error import URLError\nfrom enum import IntEnum\nfrom typing import Callable, Dict\nfrom modi.module.input_module.button import Button\nfrom modi.module.input_module.dial import Dial\nfrom modi.module.input_module.env import Env\nfrom modi.module.input_module.gyro import Gyro\nfrom modi.module.input_module.ir import Ir\nfrom modi.module.input_module.mic import Mic\nfrom modi.module.input_module.ultrasonic import Ultrasonic\nfrom modi.module.output_module.display import Display\nfrom modi.module.output_module.led import Led\nfrom modi.module.output_module.motor import Motor\nfrom modi.module.output_module.speaker import Speaker\nfrom modi.module.module import Module\nfrom modi.util.msgutil import unpack_data as up\n\n\nclass ExeTask:\n \"\"\"\n :param queue send_q: Inter-process queue for writing serial\n message.\n :param queue recv_q: Inter-process queue for parsing json message.\n :param dict() module_ids: dict() of module_id : ['timestamp', 'uuid'].\n :param list() modules: list() of module instance.\n \"\"\"\n __module_categories = ['network', 'input', 'output']\n __module_types = {'network': ['usb', 'usb/wifi/ble'], 'input': ['env',\n 'gyro', 'mic', 'button', 'dial', 'ultrasonic', 'ir'], 'output': [\n 'display', 'motor', 'led', 'speaker']}\n\n def __init__(self, modules, module_ids, topology_data, recv_q, send_q,\n init_event, nb_modules, firmware_updater):\n self._modules = modules\n self._module_ids = module_ids\n self._topology_data = topology_data\n self._recv_q = recv_q\n self._send_q = send_q\n self._init_event = init_event\n self._nb_modules = nb_modules\n self.firmware_updater = firmware_updater\n self.firmware_update_message_flag = False\n self.__init_modules()\n print('Start initializing connected MODI modules')\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n\n def __command_handler(self, command: int) ->Callable[[Dict[str, int]], None\n ]:\n \"\"\" Execute task based on command message\n\n :param command: command code\n :type command: int\n :return: a function the corresponds to the command code\n :rtype: Callable[[Dict[str, int]], None]\n \"\"\"\n return {(0): self.__update_health, (10): self.__update_warning, (12\n ): self.__update_firmware_state, (5): self.__update_modules, (7\n ): self.__update_topology, (31): self.__update_property}.get(\n command, lambda _: None)\n\n def __update_firmware_state(self, message):\n byte_data = message['b']\n message_decoded = bytearray(base64.b64decode(byte_data))\n stream_state = message_decoded[4]\n if stream_state == self.firmware_updater.State.CRC_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.CRC_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n elif stream_state == self.firmware_updater.State.ERASE_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.ERASE_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n\n def __get_uuid_by_id(self, id_: int) ->int:\n \"\"\"Find id of a module which has corresponding uuid\n\n :param id_: ID of the module\n :type id_: int\n :return: UUID\n :rtype: int\n \"\"\"\n for module in self._modules:\n if module.id == id_:\n return module.uuid\n return None\n\n def __update_health(self, message: Dict[str, int]) ->None:\n \"\"\" Update information by health message\n\n :param message: Dictionary format message of the module\n :type message: Dictionary\n :return: None\n \"\"\"\n module_id = message['s']\n curr_time_ms = int(time.time() * 1000)\n message_decoded = bytearray(base64.b64decode(message['b']))\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n self._module_ids[module_id]['battery'] = int(message_decoded[3])\n if not self._module_ids[module_id]['uuid']:\n message_to_write = self.__request_uuid(module_id,\n is_network_module=False)\n self._send_q.put(message_to_write)\n message_to_write = self.__request_uuid(module_id,\n is_network_module=True)\n self._send_q.put(message_to_write)\n for module_id, module_info in list(self._module_ids.items()):\n if curr_time_ms - module_info['timestamp'] > 1000:\n for module in self._modules:\n if module.uuid == module_info['uuid']:\n module.set_connection_state(connection_state=False)\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n\n def __fit_module_uuid(self, module_info: int, module_uuid: int) ->int:\n \"\"\" Generate uuid using bitwise operation\n\n :param module_info: Module info\n :type module_info: int\n :param module_uuid: Module uuid\n :type module_uuid: int\n :return: Fitted uuid\n :rtype: int\n \"\"\"\n sizeof_module_uuid = 0\n while module_uuid >> sizeof_module_uuid > 0:\n sizeof_module_uuid += 1\n sizeof_module_uuid += sizeof_module_uuid % 4\n return module_info << sizeof_module_uuid | module_uuid\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n\n def request_topology(self, cmd: int=7, module_id: int=4095) ->None:\n \"\"\"Request module topology\n\n :return: json serialized topology request message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = cmd\n message['s'] = 0\n message['d'] = module_id\n direction_data = bytearray(8)\n message['b'] = base64.b64encode(bytes(direction_data)).decode('utf-8')\n message['l'] = 8\n self._send_q.put(json.dumps(message, separators=(',', ':')))\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n hexadecimal = hex(uuid).lstrip('0x')\n type_indicator = str(hexadecimal)[:4]\n module_type = {'2000': 'env', '2010': 'gyro', '2020': 'mic', '2030':\n 'button', '2040': 'dial', '2050': 'ultrasonic', '2060': 'ir',\n '4000': 'display', '4010': 'motor', '4020': 'led', '4030':\n 'speaker'}.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"<import token>\n\n\nclass ExeTask:\n \"\"\"\n :param queue send_q: Inter-process queue for writing serial\n message.\n :param queue recv_q: Inter-process queue for parsing json message.\n :param dict() module_ids: dict() of module_id : ['timestamp', 'uuid'].\n :param list() modules: list() of module instance.\n \"\"\"\n __module_categories = ['network', 'input', 'output']\n __module_types = {'network': ['usb', 'usb/wifi/ble'], 'input': ['env',\n 'gyro', 'mic', 'button', 'dial', 'ultrasonic', 'ir'], 'output': [\n 'display', 'motor', 'led', 'speaker']}\n\n def __init__(self, modules, module_ids, topology_data, recv_q, send_q,\n init_event, nb_modules, firmware_updater):\n self._modules = modules\n self._module_ids = module_ids\n self._topology_data = topology_data\n self._recv_q = recv_q\n self._send_q = send_q\n self._init_event = init_event\n self._nb_modules = nb_modules\n self.firmware_updater = firmware_updater\n self.firmware_update_message_flag = False\n self.__init_modules()\n print('Start initializing connected MODI modules')\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n\n def __command_handler(self, command: int) ->Callable[[Dict[str, int]], None\n ]:\n \"\"\" Execute task based on command message\n\n :param command: command code\n :type command: int\n :return: a function the corresponds to the command code\n :rtype: Callable[[Dict[str, int]], None]\n \"\"\"\n return {(0): self.__update_health, (10): self.__update_warning, (12\n ): self.__update_firmware_state, (5): self.__update_modules, (7\n ): self.__update_topology, (31): self.__update_property}.get(\n command, lambda _: None)\n\n def __update_firmware_state(self, message):\n byte_data = message['b']\n message_decoded = bytearray(base64.b64decode(byte_data))\n stream_state = message_decoded[4]\n if stream_state == self.firmware_updater.State.CRC_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.CRC_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n elif stream_state == self.firmware_updater.State.ERASE_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.ERASE_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n\n def __get_uuid_by_id(self, id_: int) ->int:\n \"\"\"Find id of a module which has corresponding uuid\n\n :param id_: ID of the module\n :type id_: int\n :return: UUID\n :rtype: int\n \"\"\"\n for module in self._modules:\n if module.id == id_:\n return module.uuid\n return None\n\n def __update_health(self, message: Dict[str, int]) ->None:\n \"\"\" Update information by health message\n\n :param message: Dictionary format message of the module\n :type message: Dictionary\n :return: None\n \"\"\"\n module_id = message['s']\n curr_time_ms = int(time.time() * 1000)\n message_decoded = bytearray(base64.b64decode(message['b']))\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n self._module_ids[module_id]['battery'] = int(message_decoded[3])\n if not self._module_ids[module_id]['uuid']:\n message_to_write = self.__request_uuid(module_id,\n is_network_module=False)\n self._send_q.put(message_to_write)\n message_to_write = self.__request_uuid(module_id,\n is_network_module=True)\n self._send_q.put(message_to_write)\n for module_id, module_info in list(self._module_ids.items()):\n if curr_time_ms - module_info['timestamp'] > 1000:\n for module in self._modules:\n if module.uuid == module_info['uuid']:\n module.set_connection_state(connection_state=False)\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n\n def __fit_module_uuid(self, module_info: int, module_uuid: int) ->int:\n \"\"\" Generate uuid using bitwise operation\n\n :param module_info: Module info\n :type module_info: int\n :param module_uuid: Module uuid\n :type module_uuid: int\n :return: Fitted uuid\n :rtype: int\n \"\"\"\n sizeof_module_uuid = 0\n while module_uuid >> sizeof_module_uuid > 0:\n sizeof_module_uuid += 1\n sizeof_module_uuid += sizeof_module_uuid % 4\n return module_info << sizeof_module_uuid | module_uuid\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n\n def request_topology(self, cmd: int=7, module_id: int=4095) ->None:\n \"\"\"Request module topology\n\n :return: json serialized topology request message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = cmd\n message['s'] = 0\n message['d'] = module_id\n direction_data = bytearray(8)\n message['b'] = base64.b64encode(bytes(direction_data)).decode('utf-8')\n message['l'] = 8\n self._send_q.put(json.dumps(message, separators=(',', ':')))\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n hexadecimal = hex(uuid).lstrip('0x')\n type_indicator = str(hexadecimal)[:4]\n module_type = {'2000': 'env', '2010': 'gyro', '2020': 'mic', '2030':\n 'button', '2040': 'dial', '2050': 'ultrasonic', '2060': 'ir',\n '4000': 'display', '4010': 'motor', '4020': 'led', '4030':\n 'speaker'}.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n __module_categories = ['network', 'input', 'output']\n __module_types = {'network': ['usb', 'usb/wifi/ble'], 'input': ['env',\n 'gyro', 'mic', 'button', 'dial', 'ultrasonic', 'ir'], 'output': [\n 'display', 'motor', 'led', 'speaker']}\n\n def __init__(self, modules, module_ids, topology_data, recv_q, send_q,\n init_event, nb_modules, firmware_updater):\n self._modules = modules\n self._module_ids = module_ids\n self._topology_data = topology_data\n self._recv_q = recv_q\n self._send_q = send_q\n self._init_event = init_event\n self._nb_modules = nb_modules\n self.firmware_updater = firmware_updater\n self.firmware_update_message_flag = False\n self.__init_modules()\n print('Start initializing connected MODI modules')\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n\n def __command_handler(self, command: int) ->Callable[[Dict[str, int]], None\n ]:\n \"\"\" Execute task based on command message\n\n :param command: command code\n :type command: int\n :return: a function the corresponds to the command code\n :rtype: Callable[[Dict[str, int]], None]\n \"\"\"\n return {(0): self.__update_health, (10): self.__update_warning, (12\n ): self.__update_firmware_state, (5): self.__update_modules, (7\n ): self.__update_topology, (31): self.__update_property}.get(\n command, lambda _: None)\n\n def __update_firmware_state(self, message):\n byte_data = message['b']\n message_decoded = bytearray(base64.b64decode(byte_data))\n stream_state = message_decoded[4]\n if stream_state == self.firmware_updater.State.CRC_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.CRC_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n elif stream_state == self.firmware_updater.State.ERASE_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.ERASE_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n\n def __get_uuid_by_id(self, id_: int) ->int:\n \"\"\"Find id of a module which has corresponding uuid\n\n :param id_: ID of the module\n :type id_: int\n :return: UUID\n :rtype: int\n \"\"\"\n for module in self._modules:\n if module.id == id_:\n return module.uuid\n return None\n\n def __update_health(self, message: Dict[str, int]) ->None:\n \"\"\" Update information by health message\n\n :param message: Dictionary format message of the module\n :type message: Dictionary\n :return: None\n \"\"\"\n module_id = message['s']\n curr_time_ms = int(time.time() * 1000)\n message_decoded = bytearray(base64.b64decode(message['b']))\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n self._module_ids[module_id]['battery'] = int(message_decoded[3])\n if not self._module_ids[module_id]['uuid']:\n message_to_write = self.__request_uuid(module_id,\n is_network_module=False)\n self._send_q.put(message_to_write)\n message_to_write = self.__request_uuid(module_id,\n is_network_module=True)\n self._send_q.put(message_to_write)\n for module_id, module_info in list(self._module_ids.items()):\n if curr_time_ms - module_info['timestamp'] > 1000:\n for module in self._modules:\n if module.uuid == module_info['uuid']:\n module.set_connection_state(connection_state=False)\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n\n def __fit_module_uuid(self, module_info: int, module_uuid: int) ->int:\n \"\"\" Generate uuid using bitwise operation\n\n :param module_info: Module info\n :type module_info: int\n :param module_uuid: Module uuid\n :type module_uuid: int\n :return: Fitted uuid\n :rtype: int\n \"\"\"\n sizeof_module_uuid = 0\n while module_uuid >> sizeof_module_uuid > 0:\n sizeof_module_uuid += 1\n sizeof_module_uuid += sizeof_module_uuid % 4\n return module_info << sizeof_module_uuid | module_uuid\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n\n def request_topology(self, cmd: int=7, module_id: int=4095) ->None:\n \"\"\"Request module topology\n\n :return: json serialized topology request message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = cmd\n message['s'] = 0\n message['d'] = module_id\n direction_data = bytearray(8)\n message['b'] = base64.b64encode(bytes(direction_data)).decode('utf-8')\n message['l'] = 8\n self._send_q.put(json.dumps(message, separators=(',', ':')))\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n hexadecimal = hex(uuid).lstrip('0x')\n type_indicator = str(hexadecimal)[:4]\n module_type = {'2000': 'env', '2010': 'gyro', '2020': 'mic', '2030':\n 'button', '2040': 'dial', '2050': 'ultrasonic', '2060': 'ir',\n '4000': 'display', '4010': 'motor', '4020': 'led', '4030':\n 'speaker'}.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, modules, module_ids, topology_data, recv_q, send_q,\n init_event, nb_modules, firmware_updater):\n self._modules = modules\n self._module_ids = module_ids\n self._topology_data = topology_data\n self._recv_q = recv_q\n self._send_q = send_q\n self._init_event = init_event\n self._nb_modules = nb_modules\n self.firmware_updater = firmware_updater\n self.firmware_update_message_flag = False\n self.__init_modules()\n print('Start initializing connected MODI modules')\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n\n def __command_handler(self, command: int) ->Callable[[Dict[str, int]], None\n ]:\n \"\"\" Execute task based on command message\n\n :param command: command code\n :type command: int\n :return: a function the corresponds to the command code\n :rtype: Callable[[Dict[str, int]], None]\n \"\"\"\n return {(0): self.__update_health, (10): self.__update_warning, (12\n ): self.__update_firmware_state, (5): self.__update_modules, (7\n ): self.__update_topology, (31): self.__update_property}.get(\n command, lambda _: None)\n\n def __update_firmware_state(self, message):\n byte_data = message['b']\n message_decoded = bytearray(base64.b64decode(byte_data))\n stream_state = message_decoded[4]\n if stream_state == self.firmware_updater.State.CRC_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.CRC_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n elif stream_state == self.firmware_updater.State.ERASE_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.ERASE_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n\n def __get_uuid_by_id(self, id_: int) ->int:\n \"\"\"Find id of a module which has corresponding uuid\n\n :param id_: ID of the module\n :type id_: int\n :return: UUID\n :rtype: int\n \"\"\"\n for module in self._modules:\n if module.id == id_:\n return module.uuid\n return None\n\n def __update_health(self, message: Dict[str, int]) ->None:\n \"\"\" Update information by health message\n\n :param message: Dictionary format message of the module\n :type message: Dictionary\n :return: None\n \"\"\"\n module_id = message['s']\n curr_time_ms = int(time.time() * 1000)\n message_decoded = bytearray(base64.b64decode(message['b']))\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n self._module_ids[module_id]['battery'] = int(message_decoded[3])\n if not self._module_ids[module_id]['uuid']:\n message_to_write = self.__request_uuid(module_id,\n is_network_module=False)\n self._send_q.put(message_to_write)\n message_to_write = self.__request_uuid(module_id,\n is_network_module=True)\n self._send_q.put(message_to_write)\n for module_id, module_info in list(self._module_ids.items()):\n if curr_time_ms - module_info['timestamp'] > 1000:\n for module in self._modules:\n if module.uuid == module_info['uuid']:\n module.set_connection_state(connection_state=False)\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n\n def __fit_module_uuid(self, module_info: int, module_uuid: int) ->int:\n \"\"\" Generate uuid using bitwise operation\n\n :param module_info: Module info\n :type module_info: int\n :param module_uuid: Module uuid\n :type module_uuid: int\n :return: Fitted uuid\n :rtype: int\n \"\"\"\n sizeof_module_uuid = 0\n while module_uuid >> sizeof_module_uuid > 0:\n sizeof_module_uuid += 1\n sizeof_module_uuid += sizeof_module_uuid % 4\n return module_info << sizeof_module_uuid | module_uuid\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n\n def request_topology(self, cmd: int=7, module_id: int=4095) ->None:\n \"\"\"Request module topology\n\n :return: json serialized topology request message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = cmd\n message['s'] = 0\n message['d'] = module_id\n direction_data = bytearray(8)\n message['b'] = base64.b64encode(bytes(direction_data)).decode('utf-8')\n message['l'] = 8\n self._send_q.put(json.dumps(message, separators=(',', ':')))\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n hexadecimal = hex(uuid).lstrip('0x')\n type_indicator = str(hexadecimal)[:4]\n module_type = {'2000': 'env', '2010': 'gyro', '2020': 'mic', '2030':\n 'button', '2040': 'dial', '2050': 'ultrasonic', '2060': 'ir',\n '4000': 'display', '4010': 'motor', '4020': 'led', '4030':\n 'speaker'}.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, modules, module_ids, topology_data, recv_q, send_q,\n init_event, nb_modules, firmware_updater):\n self._modules = modules\n self._module_ids = module_ids\n self._topology_data = topology_data\n self._recv_q = recv_q\n self._send_q = send_q\n self._init_event = init_event\n self._nb_modules = nb_modules\n self.firmware_updater = firmware_updater\n self.firmware_update_message_flag = False\n self.__init_modules()\n print('Start initializing connected MODI modules')\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n\n def __command_handler(self, command: int) ->Callable[[Dict[str, int]], None\n ]:\n \"\"\" Execute task based on command message\n\n :param command: command code\n :type command: int\n :return: a function the corresponds to the command code\n :rtype: Callable[[Dict[str, int]], None]\n \"\"\"\n return {(0): self.__update_health, (10): self.__update_warning, (12\n ): self.__update_firmware_state, (5): self.__update_modules, (7\n ): self.__update_topology, (31): self.__update_property}.get(\n command, lambda _: None)\n\n def __update_firmware_state(self, message):\n byte_data = message['b']\n message_decoded = bytearray(base64.b64decode(byte_data))\n stream_state = message_decoded[4]\n if stream_state == self.firmware_updater.State.CRC_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.CRC_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n elif stream_state == self.firmware_updater.State.ERASE_ERROR.value:\n self.firmware_updater.update_response(response=True,\n is_error_response=True)\n elif stream_state == self.firmware_updater.State.ERASE_COMPLETE.value:\n self.firmware_updater.update_response(response=True)\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n\n def __get_uuid_by_id(self, id_: int) ->int:\n \"\"\"Find id of a module which has corresponding uuid\n\n :param id_: ID of the module\n :type id_: int\n :return: UUID\n :rtype: int\n \"\"\"\n for module in self._modules:\n if module.id == id_:\n return module.uuid\n return None\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n\n def __fit_module_uuid(self, module_info: int, module_uuid: int) ->int:\n \"\"\" Generate uuid using bitwise operation\n\n :param module_info: Module info\n :type module_info: int\n :param module_uuid: Module uuid\n :type module_uuid: int\n :return: Fitted uuid\n :rtype: int\n \"\"\"\n sizeof_module_uuid = 0\n while module_uuid >> sizeof_module_uuid > 0:\n sizeof_module_uuid += 1\n sizeof_module_uuid += sizeof_module_uuid % 4\n return module_info << sizeof_module_uuid | module_uuid\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n\n def request_topology(self, cmd: int=7, module_id: int=4095) ->None:\n \"\"\"Request module topology\n\n :return: json serialized topology request message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = cmd\n message['s'] = 0\n message['d'] = module_id\n direction_data = bytearray(8)\n message['b'] = base64.b64encode(bytes(direction_data)).decode('utf-8')\n message['l'] = 8\n self._send_q.put(json.dumps(message, separators=(',', ':')))\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n hexadecimal = hex(uuid).lstrip('0x')\n type_indicator = str(hexadecimal)[:4]\n module_type = {'2000': 'env', '2010': 'gyro', '2020': 'mic', '2030':\n 'button', '2040': 'dial', '2050': 'ultrasonic', '2060': 'ir',\n '4000': 'display', '4010': 'motor', '4020': 'led', '4030':\n 'speaker'}.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, modules, module_ids, topology_data, recv_q, send_q,\n init_event, nb_modules, firmware_updater):\n self._modules = modules\n self._module_ids = module_ids\n self._topology_data = topology_data\n self._recv_q = recv_q\n self._send_q = send_q\n self._init_event = init_event\n self._nb_modules = nb_modules\n self.firmware_updater = firmware_updater\n self.firmware_update_message_flag = False\n self.__init_modules()\n print('Start initializing connected MODI modules')\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n\n def __command_handler(self, command: int) ->Callable[[Dict[str, int]], None\n ]:\n \"\"\" Execute task based on command message\n\n :param command: command code\n :type command: int\n :return: a function the corresponds to the command code\n :rtype: Callable[[Dict[str, int]], None]\n \"\"\"\n return {(0): self.__update_health, (10): self.__update_warning, (12\n ): self.__update_firmware_state, (5): self.__update_modules, (7\n ): self.__update_topology, (31): self.__update_property}.get(\n command, lambda _: None)\n <function token>\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n\n def __get_uuid_by_id(self, id_: int) ->int:\n \"\"\"Find id of a module which has corresponding uuid\n\n :param id_: ID of the module\n :type id_: int\n :return: UUID\n :rtype: int\n \"\"\"\n for module in self._modules:\n if module.id == id_:\n return module.uuid\n return None\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n\n def __fit_module_uuid(self, module_info: int, module_uuid: int) ->int:\n \"\"\" Generate uuid using bitwise operation\n\n :param module_info: Module info\n :type module_info: int\n :param module_uuid: Module uuid\n :type module_uuid: int\n :return: Fitted uuid\n :rtype: int\n \"\"\"\n sizeof_module_uuid = 0\n while module_uuid >> sizeof_module_uuid > 0:\n sizeof_module_uuid += 1\n sizeof_module_uuid += sizeof_module_uuid % 4\n return module_info << sizeof_module_uuid | module_uuid\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n\n def request_topology(self, cmd: int=7, module_id: int=4095) ->None:\n \"\"\"Request module topology\n\n :return: json serialized topology request message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = cmd\n message['s'] = 0\n message['d'] = module_id\n direction_data = bytearray(8)\n message['b'] = base64.b64encode(bytes(direction_data)).decode('utf-8')\n message['l'] = 8\n self._send_q.put(json.dumps(message, separators=(',', ':')))\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n hexadecimal = hex(uuid).lstrip('0x')\n type_indicator = str(hexadecimal)[:4]\n module_type = {'2000': 'env', '2010': 'gyro', '2020': 'mic', '2030':\n 'button', '2040': 'dial', '2050': 'ultrasonic', '2060': 'ir',\n '4000': 'display', '4010': 'motor', '4020': 'led', '4030':\n 'speaker'}.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, modules, module_ids, topology_data, recv_q, send_q,\n init_event, nb_modules, firmware_updater):\n self._modules = modules\n self._module_ids = module_ids\n self._topology_data = topology_data\n self._recv_q = recv_q\n self._send_q = send_q\n self._init_event = init_event\n self._nb_modules = nb_modules\n self.firmware_updater = firmware_updater\n self.firmware_update_message_flag = False\n self.__init_modules()\n print('Start initializing connected MODI modules')\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n\n def __command_handler(self, command: int) ->Callable[[Dict[str, int]], None\n ]:\n \"\"\" Execute task based on command message\n\n :param command: command code\n :type command: int\n :return: a function the corresponds to the command code\n :rtype: Callable[[Dict[str, int]], None]\n \"\"\"\n return {(0): self.__update_health, (10): self.__update_warning, (12\n ): self.__update_firmware_state, (5): self.__update_modules, (7\n ): self.__update_topology, (31): self.__update_property}.get(\n command, lambda _: None)\n <function token>\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n <function token>\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n\n def __fit_module_uuid(self, module_info: int, module_uuid: int) ->int:\n \"\"\" Generate uuid using bitwise operation\n\n :param module_info: Module info\n :type module_info: int\n :param module_uuid: Module uuid\n :type module_uuid: int\n :return: Fitted uuid\n :rtype: int\n \"\"\"\n sizeof_module_uuid = 0\n while module_uuid >> sizeof_module_uuid > 0:\n sizeof_module_uuid += 1\n sizeof_module_uuid += sizeof_module_uuid % 4\n return module_info << sizeof_module_uuid | module_uuid\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n\n def request_topology(self, cmd: int=7, module_id: int=4095) ->None:\n \"\"\"Request module topology\n\n :return: json serialized topology request message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = cmd\n message['s'] = 0\n message['d'] = module_id\n direction_data = bytearray(8)\n message['b'] = base64.b64encode(bytes(direction_data)).decode('utf-8')\n message['l'] = 8\n self._send_q.put(json.dumps(message, separators=(',', ':')))\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n hexadecimal = hex(uuid).lstrip('0x')\n type_indicator = str(hexadecimal)[:4]\n module_type = {'2000': 'env', '2010': 'gyro', '2020': 'mic', '2030':\n 'button', '2040': 'dial', '2050': 'ultrasonic', '2060': 'ir',\n '4000': 'display', '4010': 'motor', '4020': 'led', '4030':\n 'speaker'}.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, modules, module_ids, topology_data, recv_q, send_q,\n init_event, nb_modules, firmware_updater):\n self._modules = modules\n self._module_ids = module_ids\n self._topology_data = topology_data\n self._recv_q = recv_q\n self._send_q = send_q\n self._init_event = init_event\n self._nb_modules = nb_modules\n self.firmware_updater = firmware_updater\n self.firmware_update_message_flag = False\n self.__init_modules()\n print('Start initializing connected MODI modules')\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n <function token>\n <function token>\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n <function token>\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n\n def __fit_module_uuid(self, module_info: int, module_uuid: int) ->int:\n \"\"\" Generate uuid using bitwise operation\n\n :param module_info: Module info\n :type module_info: int\n :param module_uuid: Module uuid\n :type module_uuid: int\n :return: Fitted uuid\n :rtype: int\n \"\"\"\n sizeof_module_uuid = 0\n while module_uuid >> sizeof_module_uuid > 0:\n sizeof_module_uuid += 1\n sizeof_module_uuid += sizeof_module_uuid % 4\n return module_info << sizeof_module_uuid | module_uuid\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n\n def request_topology(self, cmd: int=7, module_id: int=4095) ->None:\n \"\"\"Request module topology\n\n :return: json serialized topology request message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = cmd\n message['s'] = 0\n message['d'] = module_id\n direction_data = bytearray(8)\n message['b'] = base64.b64encode(bytes(direction_data)).decode('utf-8')\n message['l'] = 8\n self._send_q.put(json.dumps(message, separators=(',', ':')))\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n hexadecimal = hex(uuid).lstrip('0x')\n type_indicator = str(hexadecimal)[:4]\n module_type = {'2000': 'env', '2010': 'gyro', '2020': 'mic', '2030':\n 'button', '2040': 'dial', '2050': 'ultrasonic', '2060': 'ir',\n '4000': 'display', '4010': 'motor', '4020': 'led', '4030':\n 'speaker'}.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, modules, module_ids, topology_data, recv_q, send_q,\n init_event, nb_modules, firmware_updater):\n self._modules = modules\n self._module_ids = module_ids\n self._topology_data = topology_data\n self._recv_q = recv_q\n self._send_q = send_q\n self._init_event = init_event\n self._nb_modules = nb_modules\n self.firmware_updater = firmware_updater\n self.firmware_update_message_flag = False\n self.__init_modules()\n print('Start initializing connected MODI modules')\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n <function token>\n <function token>\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n <function token>\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n\n def __fit_module_uuid(self, module_info: int, module_uuid: int) ->int:\n \"\"\" Generate uuid using bitwise operation\n\n :param module_info: Module info\n :type module_info: int\n :param module_uuid: Module uuid\n :type module_uuid: int\n :return: Fitted uuid\n :rtype: int\n \"\"\"\n sizeof_module_uuid = 0\n while module_uuid >> sizeof_module_uuid > 0:\n sizeof_module_uuid += 1\n sizeof_module_uuid += sizeof_module_uuid % 4\n return module_info << sizeof_module_uuid | module_uuid\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n hexadecimal = hex(uuid).lstrip('0x')\n type_indicator = str(hexadecimal)[:4]\n module_type = {'2000': 'env', '2010': 'gyro', '2020': 'mic', '2030':\n 'button', '2040': 'dial', '2050': 'ultrasonic', '2060': 'ir',\n '4000': 'display', '4010': 'motor', '4020': 'led', '4030':\n 'speaker'}.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n <function token>\n <function token>\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n <function token>\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n\n def __fit_module_uuid(self, module_info: int, module_uuid: int) ->int:\n \"\"\" Generate uuid using bitwise operation\n\n :param module_info: Module info\n :type module_info: int\n :param module_uuid: Module uuid\n :type module_uuid: int\n :return: Fitted uuid\n :rtype: int\n \"\"\"\n sizeof_module_uuid = 0\n while module_uuid >> sizeof_module_uuid > 0:\n sizeof_module_uuid += 1\n sizeof_module_uuid += sizeof_module_uuid % 4\n return module_info << sizeof_module_uuid | module_uuid\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n hexadecimal = hex(uuid).lstrip('0x')\n type_indicator = str(hexadecimal)[:4]\n module_type = {'2000': 'env', '2010': 'gyro', '2020': 'mic', '2030':\n 'button', '2040': 'dial', '2050': 'ultrasonic', '2060': 'ir',\n '4000': 'display', '4010': 'motor', '4020': 'led', '4030':\n 'speaker'}.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n <function token>\n <function token>\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n <function token>\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n\n def __get_type_from_uuid(self, uuid):\n if uuid is None:\n return 'Network'\n hexadecimal = hex(uuid).lstrip('0x')\n type_indicator = str(hexadecimal)[:4]\n module_type = {'2000': 'env', '2010': 'gyro', '2020': 'mic', '2030':\n 'button', '2040': 'dial', '2050': 'ultrasonic', '2060': 'ir',\n '4000': 'display', '4010': 'motor', '4020': 'led', '4030':\n 'speaker'}.get(type_indicator)\n return 'Network' if module_type is None else module_type\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n <function token>\n <function token>\n\n def __update_topology(self, message: Dict[str, int]) ->None:\n \"\"\"Update the topology of the connected modules\n\n :param message: Dictionary format message of the module\n :return: None\n \"\"\"\n src_id = message['s']\n byte_data = message['b']\n broadcast_id = 2 ** 16 - 1\n topology_by_id = {}\n message_decoded = bytearray(base64.b64decode(byte_data))\n src_uuid = self.__get_uuid_by_id(src_id)\n topology_by_id['uuid'] = src_uuid\n right_id = message_decoded[1] << 8 | message_decoded[0]\n topology_by_id['r'] = right_id if right_id != broadcast_id else None\n top_id = message_decoded[3] << 8 | message_decoded[2]\n topology_by_id['t'] = top_id if top_id != broadcast_id else None\n left_id = message_decoded[5] << 8 | message_decoded[4]\n topology_by_id['l'] = left_id if left_id != broadcast_id else None\n bottom_id = message_decoded[7] << 8 | message_decoded[6]\n topology_by_id['b'] = bottom_id if bottom_id != broadcast_id else None\n if not self._topology_data.get(src_id):\n self._topology_data[src_id] = topology_by_id\n else:\n for key in self._topology_data[src_id]:\n if not self._topology_data[src_id][key]:\n self._topology_data[src_id][key] = topology_by_id[key]\n <function token>\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n\n def __request_uuid(self, source_id: int, is_network_module: bool=False\n ) ->str:\n \"\"\" Generate broadcasting message for request uuid\n\n :param source_id: Id of the source\n :type source_id: int\n :param is_network_module: true if network module\n :type is_network_module: bool\n :return: json serialized message\n :rtype: str\n \"\"\"\n BROADCAST_ID = 4095\n message = dict()\n message['c'] = 40 if is_network_module else 8\n message['s'] = source_id\n message['d'] = BROADCAST_ID\n id_bytes = bytearray(8)\n id_bytes[0] = 255\n id_bytes[1] = 15\n message['b'] = base64.b64encode(bytes(id_bytes)).decode('utf-8')\n message['l'] = 8\n return json.dumps(message, separators=(',', ':'))\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n\n def __delay(self) ->None:\n \"\"\" Wait for delay\n\n :return: None\n \"\"\"\n time.sleep(0.5)\n <function token>\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n <function token>\n <function token>\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n\n def update_firmware_ready(self, module_id: int) ->None:\n \"\"\" Check if modules with no firmware are ready to update its firmware\n\n :param module_id: Id of the target module\n :type module_id: int\n :return: None\n \"\"\"\n firmware_update_ready_message = self.__set_module_state(module_id,\n Module.State.UPDATE_FIRMWARE_READY, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_ready_message)\n self.__delay()\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __update_warning(self, message: Dict[str, int]) ->None:\n \"\"\"Update the warning message\n\n :param message: Warning message in Dictionary format\n :return: None\n \"\"\"\n warning_data = bytearray(base64.b64decode(message['b']))\n warning_type = warning_data[6]\n if not warning_type:\n return\n module_uuid = warning_data[:6]\n module_uuid_res = 0\n for i, v in enumerate(module_uuid):\n module_uuid_res |= v << 8 * i\n module_id = message['s']\n module_type = self.__get_type_from_uuid(module_uuid_res)\n if module_type == 'Network':\n return\n if warning_type == 1:\n self.firmware_updater.check_to_update_firmware(module_id)\n elif warning_type == 2:\n if self.firmware_updater.update_in_progress:\n self.firmware_updater.add_to_waitlist(module_id, module_type)\n else:\n self.firmware_updater.update_module(module_id, module_type)\n else:\n pass\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n <function token>\n <function token>\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n\n def __init_modules(self) ->None:\n \"\"\" Initialize module on first run\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n reboot_message = self.__set_module_state(BROADCAST_ID, Module.State\n .REBOOT, Module.State.PNP_OFF)\n self._send_q.put(reboot_message)\n pnp_off_message = self.__set_module_state(BROADCAST_ID, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n request_uuid_message = self.__request_uuid(BROADCAST_ID)\n self._send_q.put(request_uuid_message)\n self.request_topology()\n <function token>\n <function token>\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def run(self, delay: float):\n \"\"\" Run in ExecutorThread\n\n :param delay: time value to wait in seconds\n :type delay: float\n \"\"\"\n time.sleep(delay)\n try:\n raw_message = self._recv_q.get_nowait()\n message = json.loads(raw_message)\n except queue.Empty:\n pass\n except json.decoder.JSONDecodeError:\n print('current json message:', raw_message)\n else:\n self.__command_handler(message['c'])(message)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __update_modules(self, message: Dict[str, str]) ->None:\n \"\"\" Update module information\n\n :param message: Dictionary format module info\n :type message: Dictionary\n :return: None\n \"\"\"\n curr_time_ms = int(time.time() * 1000)\n module_id = message['s']\n self._module_ids[module_id] = self._module_ids.get(module_id, dict())\n self._module_ids[module_id]['timestamp'] = curr_time_ms\n self._module_ids[module_id]['uuid'] = self._module_ids[module_id].get(\n 'uuid', str())\n message_decoded = bytearray(base64.b64decode(message['b']))\n module_uuid_bytes = message_decoded[:4]\n module_info_bytes = message_decoded[-4:]\n module_info = (module_info_bytes[1] << 8) + module_info_bytes[0]\n module_version_info = module_info_bytes[3] << 8 | module_info_bytes[2]\n version_path = (\n 'https://download.luxrobo.com/modi-skeleton-mobile/version.txt')\n version_info = None\n try:\n for line in ur.urlopen(version_path, timeout=1):\n version_info = line.decode('utf-8').lstrip('v')\n version_digits = [int(digit) for digit in version_info.split('.')]\n \"\"\" Version number is formed by concatenating all three version bits\n e.g. v2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100\n \"\"\"\n latest_version = version_digits[0] << 13 | version_digits[1\n ] << 8 | version_digits[2]\n except URLError:\n latest_version = module_version_info\n module_category_idx = module_info >> 13\n module_type_idx = module_info >> 4 & 511\n module_category = self.__module_categories[module_category_idx]\n module_type = self.__module_types[module_category][module_type_idx]\n module_uuid = self.__fit_module_uuid(module_info, (\n module_uuid_bytes[3] << 24) + (module_uuid_bytes[2] << 16) + (\n module_uuid_bytes[1] << 8) + module_uuid_bytes[0])\n module_uuid = up(message['b'], (6, 2))[0]\n if (module_category != 'network' and not self.\n firmware_update_message_flag and module_version_info <\n latest_version):\n print('Your MODI module(s) is not up-to-date.')\n print(\n \"You can update your MODI modules by calling 'update_module_firmware()'\"\n )\n self.firmware_update_message_flag = True\n self._module_ids[module_id]['uuid'] = module_uuid\n for module in self._modules:\n if module.uuid == module_uuid and not module.is_connected:\n module.set_connection_state(connection_state=True)\n pnp_off_message = self.__set_module_state(4095, Module.\n State.RUN, Module.State.PNP_OFF)\n self._send_q.put(pnp_off_message)\n if not next((module for module in self._modules if module.uuid ==\n module_uuid), None):\n if module_category != 'network':\n module_template = self.__init_module(module_type)\n module_instance = module_template(module_id, module_uuid,\n self._send_q)\n self.__set_pnp(module_id=module_instance.id,\n module_pnp_state=Module.State.PNP_OFF)\n module_instance.version = module_version_info\n module_instance.is_up_to_date = (module_version_info ==\n latest_version)\n self._modules.append(module_instance)\n print(\n f'{type(module_instance).__name__} ({module_id}) has been connected!'\n )\n if self.__is_all_connected():\n self._init_event.set()\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n\n def __update_property(self, message: Dict[str, int]) ->None:\n \"\"\" Update module property\n\n :param message: Dictionary format message\n :type message: Dictionary\n :return: None\n \"\"\"\n property_number = message['d']\n if property_number == 0 or property_number == 1:\n return\n for module in self._modules:\n if module.id == message['s']:\n message_decoded = bytearray(base64.b64decode(message['b']))\n property_type = module.PropertyType(property_number)\n module.update_property(property_type, round(struct.unpack(\n 'f', bytes(message_decoded[:4]))[0], 2))\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n <function token>\n\n def __set_pnp(self, module_id: int, module_pnp_state: IntEnum) ->None:\n \"\"\" Generate module pnp on/off command\n\n :param module_id: ID of the target module\n :type module_id: int\n :param module_pnp_state: Pnp state value\n :type module_pnp_state: IntEnum\n :return: None\n \"\"\"\n if module_id is None:\n for curr_module_id in self._module_ids:\n pnp_message = self.__set_module_state(curr_module_id,\n Module.State.RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n else:\n pnp_message = self.__set_module_state(module_id, Module.State.\n RUN, module_pnp_state)\n self._send_q.put(pnp_message)\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n\n def __init_module(self, module_type: str) ->Module:\n \"\"\" Find module type for module initialize\n\n :param module_type: Type of the module in string\n :type module_type: str\n :return: Module corresponding to the type\n :rtype: Module\n \"\"\"\n module = {'button': Button, 'dial': Dial, 'display': Display, 'env':\n Env, 'gyro': Gyro, 'ir': Ir, 'led': Led, 'mic': Mic, 'motor':\n Motor, 'speaker': Speaker, 'ultrasonic': Ultrasonic}.get(\n module_type)\n return module\n <function token>\n <function token>\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_firmware(self) ->None:\n \"\"\" Remove firmware of MODI modules\n\n :return: None\n \"\"\"\n BROADCAST_ID = 4095\n firmware_update_message = self.__set_module_state(BROADCAST_ID,\n Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF)\n self._send_q.put(firmware_update_message)\n self.__delay()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __is_all_connected(self) ->bool:\n \"\"\" Determine whether all modules are connected\n\n :return: true is all modules are connected\n :rtype: bool\n \"\"\"\n return self._nb_modules == len(self._modules)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __set_module_state(self, destination_id: int, module_state: IntEnum,\n pnp_state: IntEnum) ->str:\n \"\"\" Generate message for set module state and pnp state\n\n :param destination_id: Id to target destination\n :type destination_id: int\n :param module_state: State value of the module\n :type module_state: int\n :param pnp_state: Pnp state value\n :type pnp_state: IntEnum\n :return: json serialized message\n :rtype: str\n \"\"\"\n message = dict()\n message['c'] = 9\n message['s'] = 0\n message['d'] = destination_id\n state_bytes = bytearray(2)\n state_bytes[0] = module_state\n state_bytes[1] = pnp_state\n message['b'] = base64.b64encode(bytes(state_bytes)).decode('utf-8')\n message['l'] = 2\n return json.dumps(message, separators=(',', ':'))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ExeTask:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,525 |
1412c450206c3826c88c6bc3a6133a06e6714e0c
|
# Import flask and template operators
from flask import Flask, render_template, session
# Define the WSGI application object
app = Flask(__name__)
# Configurations
app.config.from_object('config')
# Import a module / component using its blueprint handler variable
from app.controllers.home_controller import mod_home as home_module
# Register blueprint(s)
app.register_blueprint(home_module)
|
[
"# Import flask and template operators\nfrom flask import Flask, render_template, session\n\n\n# Define the WSGI application object\napp = Flask(__name__)\n\n# Configurations\napp.config.from_object('config')\n\n# Import a module / component using its blueprint handler variable\nfrom app.controllers.home_controller import mod_home as home_module\n\n# Register blueprint(s)\napp.register_blueprint(home_module)\n\n\n\n\n",
"from flask import Flask, render_template, session\napp = Flask(__name__)\napp.config.from_object('config')\nfrom app.controllers.home_controller import mod_home as home_module\napp.register_blueprint(home_module)\n",
"<import token>\napp = Flask(__name__)\napp.config.from_object('config')\n<import token>\napp.register_blueprint(home_module)\n",
"<import token>\n<assignment token>\napp.config.from_object('config')\n<import token>\napp.register_blueprint(home_module)\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<code token>\n"
] | false |
98,526 |
fe896723b6074e9fa404eb297bb2cdf72bdf3892
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Query Service example SDK usage."""
import os
import sys
import time
import logging
# Necessary to reference cortex package in relative path
curpath = os.path.dirname(os.path.abspath(__file__))
sys.path[:0] = [os.path.join(curpath, os.pardir)]
from pan_cortex_data_lake import Credentials, QueryService
url = "https://api.us.cdl.paloaltonetworks.com" # prod us
# Create Credentials instance
# export PAN_DEVELOPER_TOKEN for quick access
c = Credentials()
# Create Query Service instance
qs = QueryService(url=url, force_trace=True, credentials=c)
# SQL = 'SELECT * FROM `2020001.firewall.traffic` LIMIT 100'
SQL = "SELECT * FROM `4199400902993631660.firewall.traffic` LIMIT 1"
# Generate new 'query'
query_params = {"query": SQL}
q = qs.create_query(query_params=query_params)
print("QUERY Params: {}\n".format(query_params))
print("QUERY HTTP STATUS CODE: {}\n".format(q.status_code))
print("QUERY Response: {}\n".format(q.text))
job_id = q.json()["jobId"] # access 'jobId' from 'query' response
# Iterate through job results (pages)
print("Iterate through job results: \n")
for p in qs.iter_job_results(job_id=job_id, result_format="valuesDictionary"):
print("RESULTS: {}\n".format(p.text))
print("STATS: {}".format(qs.stats))
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Query Service example SDK usage.\"\"\"\n\nimport os\nimport sys\nimport time\nimport logging\n\n# Necessary to reference cortex package in relative path\ncurpath = os.path.dirname(os.path.abspath(__file__))\nsys.path[:0] = [os.path.join(curpath, os.pardir)]\n\nfrom pan_cortex_data_lake import Credentials, QueryService\n\nurl = \"https://api.us.cdl.paloaltonetworks.com\" # prod us\n\n# Create Credentials instance\n# export PAN_DEVELOPER_TOKEN for quick access\nc = Credentials()\n\n# Create Query Service instance\nqs = QueryService(url=url, force_trace=True, credentials=c)\n\n# SQL = 'SELECT * FROM `2020001.firewall.traffic` LIMIT 100'\nSQL = \"SELECT * FROM `4199400902993631660.firewall.traffic` LIMIT 1\"\n\n# Generate new 'query'\nquery_params = {\"query\": SQL}\n\nq = qs.create_query(query_params=query_params)\n\nprint(\"QUERY Params: {}\\n\".format(query_params))\n\nprint(\"QUERY HTTP STATUS CODE: {}\\n\".format(q.status_code))\n\nprint(\"QUERY Response: {}\\n\".format(q.text))\n\njob_id = q.json()[\"jobId\"] # access 'jobId' from 'query' response\n\n# Iterate through job results (pages)\nprint(\"Iterate through job results: \\n\")\nfor p in qs.iter_job_results(job_id=job_id, result_format=\"valuesDictionary\"):\n print(\"RESULTS: {}\\n\".format(p.text))\n\nprint(\"STATS: {}\".format(qs.stats))\n",
"<docstring token>\nimport os\nimport sys\nimport time\nimport logging\ncurpath = os.path.dirname(os.path.abspath(__file__))\nsys.path[:0] = [os.path.join(curpath, os.pardir)]\nfrom pan_cortex_data_lake import Credentials, QueryService\nurl = 'https://api.us.cdl.paloaltonetworks.com'\nc = Credentials()\nqs = QueryService(url=url, force_trace=True, credentials=c)\nSQL = 'SELECT * FROM `4199400902993631660.firewall.traffic` LIMIT 1'\nquery_params = {'query': SQL}\nq = qs.create_query(query_params=query_params)\nprint('QUERY Params: {}\\n'.format(query_params))\nprint('QUERY HTTP STATUS CODE: {}\\n'.format(q.status_code))\nprint('QUERY Response: {}\\n'.format(q.text))\njob_id = q.json()['jobId']\nprint('Iterate through job results: \\n')\nfor p in qs.iter_job_results(job_id=job_id, result_format='valuesDictionary'):\n print('RESULTS: {}\\n'.format(p.text))\nprint('STATS: {}'.format(qs.stats))\n",
"<docstring token>\n<import token>\ncurpath = os.path.dirname(os.path.abspath(__file__))\nsys.path[:0] = [os.path.join(curpath, os.pardir)]\n<import token>\nurl = 'https://api.us.cdl.paloaltonetworks.com'\nc = Credentials()\nqs = QueryService(url=url, force_trace=True, credentials=c)\nSQL = 'SELECT * FROM `4199400902993631660.firewall.traffic` LIMIT 1'\nquery_params = {'query': SQL}\nq = qs.create_query(query_params=query_params)\nprint('QUERY Params: {}\\n'.format(query_params))\nprint('QUERY HTTP STATUS CODE: {}\\n'.format(q.status_code))\nprint('QUERY Response: {}\\n'.format(q.text))\njob_id = q.json()['jobId']\nprint('Iterate through job results: \\n')\nfor p in qs.iter_job_results(job_id=job_id, result_format='valuesDictionary'):\n print('RESULTS: {}\\n'.format(p.text))\nprint('STATS: {}'.format(qs.stats))\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\nprint('QUERY Params: {}\\n'.format(query_params))\nprint('QUERY HTTP STATUS CODE: {}\\n'.format(q.status_code))\nprint('QUERY Response: {}\\n'.format(q.text))\n<assignment token>\nprint('Iterate through job results: \\n')\nfor p in qs.iter_job_results(job_id=job_id, result_format='valuesDictionary'):\n print('RESULTS: {}\\n'.format(p.text))\nprint('STATS: {}'.format(qs.stats))\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,527 |
555902fca5063d79a8e4ee0e7e04362d202b2c80
|
def count_factors(x):
count=0
while x%2==0:
x=x//2
count+=1
i=3
while i*i<=x:
while x%i==0:
x=x//i
count+=1
i+=2
if x>2:
count+=1
return count
t = int(input())
for _ in range(t):
x,k = [int(x) for x in input().strip().split()]
count = count_factors(x)
if count>=k:
print(1)
else:
print(0)
|
[
"def count_factors(x):\n count=0\n\n while x%2==0:\n x=x//2\n count+=1\n \n i=3\n while i*i<=x:\n while x%i==0:\n x=x//i\n count+=1\n \n i+=2\n\n if x>2:\n count+=1\n\n return count\n\nt = int(input())\nfor _ in range(t):\n x,k = [int(x) for x in input().strip().split()]\n count = count_factors(x)\n\n if count>=k:\n print(1)\n else:\n print(0)\n",
"def count_factors(x):\n count = 0\n while x % 2 == 0:\n x = x // 2\n count += 1\n i = 3\n while i * i <= x:\n while x % i == 0:\n x = x // i\n count += 1\n i += 2\n if x > 2:\n count += 1\n return count\n\n\nt = int(input())\nfor _ in range(t):\n x, k = [int(x) for x in input().strip().split()]\n count = count_factors(x)\n if count >= k:\n print(1)\n else:\n print(0)\n",
"def count_factors(x):\n count = 0\n while x % 2 == 0:\n x = x // 2\n count += 1\n i = 3\n while i * i <= x:\n while x % i == 0:\n x = x // i\n count += 1\n i += 2\n if x > 2:\n count += 1\n return count\n\n\n<assignment token>\nfor _ in range(t):\n x, k = [int(x) for x in input().strip().split()]\n count = count_factors(x)\n if count >= k:\n print(1)\n else:\n print(0)\n",
"def count_factors(x):\n count = 0\n while x % 2 == 0:\n x = x // 2\n count += 1\n i = 3\n while i * i <= x:\n while x % i == 0:\n x = x // i\n count += 1\n i += 2\n if x > 2:\n count += 1\n return count\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<assignment token>\n<code token>\n"
] | false |
98,528 |
c7e53815e48340584793f8aec25c242a19e4ae7b
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from module import *
class DeConvVAE(nn.Module):
def __init__(self, args, data):
super(DeConvVAE, self).__init__()
self.args = args
self.encoder = ConvolutionEncoder(args)
self.fc_mu = nn.Linear(args.feature_maps[2], args.latent_size)
self.fc_logvar = nn.Linear(args.feature_maps[2], args.latent_size)
self.decoder = DeconvolutionDecoder(args)
self.dropout = nn.Dropout(args.dropout)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, x, word_emb):
# Encode
h = self.encoder(self.dropout(x))
mu = self.fc_mu(self.dropout(h))
logvar = self.fc_logvar(self.dropout(h))
# Sample
z = self.reparameterize(mu, logvar)
# Decode
x_hat = self.decoder(z)
# normalize
norm_x_hat = torch.norm(x_hat, 2, dim=2, keepdim=True)
rec_x_hat = x_hat / norm_x_hat
norm_w = torch.norm(word_emb.weight.data, 2, dim=1, keepdim=True)
rec_w = (word_emb.weight.data / (norm_w + 1e-20)).t()
# compute probability
prob_logits = torch.bmm(rec_x_hat, rec_w.unsqueeze(0)
.expand(rec_x_hat.size(0), *rec_w.size())) / self.args.tau
log_prob = F.log_softmax(prob_logits, dim=2)
return log_prob, mu, logvar, z
def generate(self, sample_num, word_emb):
latent_size = self.args.latent_size
device = torch.device(self.args.device)
# Sample
z = torch.cat([torch.randn(latent_size).unsqueeze_(0) for i in range(sample_num)], dim=0)
z = z.to(device)
# Decode
x_hat = self.decoder(z)
# normalize
norm_x_hat = torch.norm(x_hat, 2, dim=2, keepdim=True)
rec_x_hat = x_hat / norm_x_hat
norm_w = torch.norm(word_emb.weight.data, 2, dim=1, keepdim=True)
rec_w = (word_emb.weight.data / (norm_w + 1e-20)).t()
# compute probability
prob_logits = torch.bmm(rec_x_hat, rec_w.unsqueeze(0)
.expand(rec_x_hat.size(0), *rec_w.size())) / self.args.tau
log_prob = F.log_softmax(prob_logits, dim=2)
return log_prob
class NN4VAE(nn.Module):
def __init__(self, args, data):
super(NN4VAE, self).__init__()
self.args = args
self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)
# initialize word embedding with GloVe
self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)
# fine-tune the word embedding
self.word_emb.weight.requires_grad = True
# <unk> vectors is randomly initialized
nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)
self.vae = DeConvVAE(args, data)
def forward(self, x):
# word embedding
x = self.word_emb(x)
log_prob, mu, logvar, z = self.vae(x, self.word_emb)
return log_prob, mu, logvar, z
def generate(self, sample_num):
return self.vae.generate(sample_num, self.word_emb)
class NN4SNLI(nn.Module):
def __init__(self, args, data):
super(NN4SNLI, self).__init__()
self.args = args
self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)
# initialize word embedding with GloVe
self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)
# fine-tune the word embedding
self.word_emb.weight.requires_grad = True
# <unk> vectors is randomly initialized
nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)
self.vae = DeConvVAE(args, data)
self.fc_1 = nn.Linear(4*args.latent_size, args.hidden_size)
self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc_out = nn.Linear(args.hidden_size, args.class_size)
self.relu = nn.ReLU()
def forward(self, batch):
p = batch.premise
h = batch.hypothesis
# (batch, seq_len, word_dim)
p_x = self.word_emb(p)
h_x = self.word_emb(h)
# VAE
p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)
h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)
# matching layer
m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)
# fully-connected layers
out = self.relu(self.fc_1(m))
out = self.relu(self.fc_2(out))
out = self.fc_out(out)
return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar
|
[
"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom module import *\r\n\r\n\r\nclass DeConvVAE(nn.Module):\r\n\r\n\tdef __init__(self, args, data):\r\n\t\tsuper(DeConvVAE, self).__init__()\r\n\t\tself.args = args\r\n\r\n\t\tself.encoder = ConvolutionEncoder(args)\r\n\r\n\t\tself.fc_mu = nn.Linear(args.feature_maps[2], args.latent_size)\r\n\t\tself.fc_logvar\t= nn.Linear(args.feature_maps[2], args.latent_size)\r\n\r\n\t\tself.decoder = DeconvolutionDecoder(args)\r\n\r\n\t\tself.dropout = nn.Dropout(args.dropout)\r\n\r\n\r\n\tdef reparameterize(self, mu, logvar):\r\n\t\tif self.training:\r\n\t\t\tstd = torch.exp(0.5 * logvar)\r\n\t\t\teps = torch.randn_like(std)\r\n\t\t\treturn eps.mul(std).add_(mu)\r\n\t\telse:\r\n\t\t\treturn mu\r\n\r\n\r\n\tdef forward(self, x, word_emb):\r\n\t\t# Encode\r\n\t\th = self.encoder(self.dropout(x))\r\n\t\tmu = self.fc_mu(self.dropout(h))\r\n\t\tlogvar = self.fc_logvar(self.dropout(h))\r\n\r\n\t\t# Sample\r\n\t\tz = self.reparameterize(mu, logvar)\r\n\r\n\t\t# Decode\r\n\t\tx_hat = self.decoder(z)\r\n\r\n\t\t# normalize\r\n\t\tnorm_x_hat = torch.norm(x_hat, 2, dim=2, keepdim=True)\r\n\t\trec_x_hat = x_hat / norm_x_hat\r\n\t\tnorm_w = torch.norm(word_emb.weight.data, 2, dim=1, keepdim=True)\r\n\t\trec_w = (word_emb.weight.data / (norm_w + 1e-20)).t()\r\n\r\n\t\t# compute probability\r\n\t\tprob_logits = torch.bmm(rec_x_hat, rec_w.unsqueeze(0)\r\n\t\t\t\t\t\t\t\t.expand(rec_x_hat.size(0), *rec_w.size())) / self.args.tau\r\n\t\tlog_prob = F.log_softmax(prob_logits, dim=2)\r\n\r\n\t\treturn log_prob, mu, logvar, z\r\n\r\n\r\n\tdef generate(self, sample_num, word_emb):\r\n\t\tlatent_size = self.args.latent_size\r\n\t\tdevice = torch.device(self.args.device)\r\n\r\n\t\t# Sample\r\n\t\tz = torch.cat([torch.randn(latent_size).unsqueeze_(0) for i in range(sample_num)], dim=0)\r\n\t\tz = z.to(device)\r\n\r\n\t\t# Decode\r\n\t\tx_hat = self.decoder(z)\r\n\r\n\t\t# normalize\r\n\t\tnorm_x_hat = torch.norm(x_hat, 2, dim=2, keepdim=True)\r\n\t\trec_x_hat = x_hat / norm_x_hat\r\n\t\tnorm_w = torch.norm(word_emb.weight.data, 2, dim=1, keepdim=True)\r\n\t\trec_w = (word_emb.weight.data / (norm_w + 1e-20)).t()\r\n\r\n\t\t# compute probability\r\n\t\tprob_logits = torch.bmm(rec_x_hat, rec_w.unsqueeze(0)\r\n\t\t\t\t\t\t\t\t.expand(rec_x_hat.size(0), *rec_w.size())) / self.args.tau\r\n\t\tlog_prob = F.log_softmax(prob_logits, dim=2)\r\n\r\n\t\treturn log_prob\r\n\r\n\r\nclass NN4VAE(nn.Module):\r\n\r\n\tdef __init__(self, args, data):\r\n\t\tsuper(NN4VAE, self).__init__()\r\n\r\n\t\tself.args = args\r\n\r\n\t\tself.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\r\n\t\t# initialize word embedding with GloVe\r\n\t\tself.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\r\n\t\t# fine-tune the word embedding\r\n\t\tself.word_emb.weight.requires_grad = True\r\n\t\t# <unk> vectors is randomly initialized\r\n\t\tnn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\r\n\r\n\t\tself.vae = DeConvVAE(args, data)\r\n\r\n\r\n\tdef forward(self, x):\r\n\t\t# word embedding\r\n\t\tx = self.word_emb(x)\r\n\r\n\t\tlog_prob, mu, logvar, z = self.vae(x, self.word_emb)\r\n\r\n\t\treturn log_prob, mu, logvar, z\r\n\r\n\r\n\tdef generate(self, sample_num):\r\n\t\treturn self.vae.generate(sample_num, self.word_emb)\r\n\r\n\r\nclass NN4SNLI(nn.Module):\r\n\r\n\tdef __init__(self, args, data):\r\n\t\tsuper(NN4SNLI, self).__init__()\r\n\r\n\t\tself.args = args\r\n\r\n\t\tself.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\r\n\t\t# initialize word embedding with GloVe\r\n\t\tself.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\r\n\t\t# fine-tune the word embedding\r\n\t\tself.word_emb.weight.requires_grad = True\r\n\t\t# <unk> vectors is randomly initialized\r\n\t\tnn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\r\n\r\n\t\tself.vae = DeConvVAE(args, data)\r\n\r\n\t\tself.fc_1 = nn.Linear(4*args.latent_size, args.hidden_size)\r\n\t\tself.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\r\n\t\tself.fc_out = nn.Linear(args.hidden_size, args.class_size)\r\n\r\n\t\tself.relu = nn.ReLU()\r\n\r\n\r\n\tdef forward(self, batch):\r\n\t\tp = batch.premise\r\n\t\th = batch.hypothesis\r\n\r\n\t\t# (batch, seq_len, word_dim)\r\n\t\tp_x = self.word_emb(p)\r\n\t\th_x = self.word_emb(h)\r\n\r\n\t\t# VAE\r\n\t\tp_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\r\n\t\th_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\r\n\r\n\t\t# matching layer\r\n\t\tm = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\r\n\r\n\t\t# fully-connected layers\r\n\t\tout = self.relu(self.fc_1(m))\r\n\t\tout = self.relu(self.fc_2(out))\r\n\t\tout = self.fc_out(out)\r\n\r\n\t\treturn out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom module import *\n\n\nclass DeConvVAE(nn.Module):\n\n def __init__(self, args, data):\n super(DeConvVAE, self).__init__()\n self.args = args\n self.encoder = ConvolutionEncoder(args)\n self.fc_mu = nn.Linear(args.feature_maps[2], args.latent_size)\n self.fc_logvar = nn.Linear(args.feature_maps[2], args.latent_size)\n self.decoder = DeconvolutionDecoder(args)\n self.dropout = nn.Dropout(args.dropout)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps.mul(std).add_(mu)\n else:\n return mu\n\n def forward(self, x, word_emb):\n h = self.encoder(self.dropout(x))\n mu = self.fc_mu(self.dropout(h))\n logvar = self.fc_logvar(self.dropout(h))\n z = self.reparameterize(mu, logvar)\n x_hat = self.decoder(z)\n norm_x_hat = torch.norm(x_hat, 2, dim=2, keepdim=True)\n rec_x_hat = x_hat / norm_x_hat\n norm_w = torch.norm(word_emb.weight.data, 2, dim=1, keepdim=True)\n rec_w = (word_emb.weight.data / (norm_w + 1e-20)).t()\n prob_logits = torch.bmm(rec_x_hat, rec_w.unsqueeze(0).expand(\n rec_x_hat.size(0), *rec_w.size())) / self.args.tau\n log_prob = F.log_softmax(prob_logits, dim=2)\n return log_prob, mu, logvar, z\n\n def generate(self, sample_num, word_emb):\n latent_size = self.args.latent_size\n device = torch.device(self.args.device)\n z = torch.cat([torch.randn(latent_size).unsqueeze_(0) for i in\n range(sample_num)], dim=0)\n z = z.to(device)\n x_hat = self.decoder(z)\n norm_x_hat = torch.norm(x_hat, 2, dim=2, keepdim=True)\n rec_x_hat = x_hat / norm_x_hat\n norm_w = torch.norm(word_emb.weight.data, 2, dim=1, keepdim=True)\n rec_w = (word_emb.weight.data / (norm_w + 1e-20)).t()\n prob_logits = torch.bmm(rec_x_hat, rec_w.unsqueeze(0).expand(\n rec_x_hat.size(0), *rec_w.size())) / self.args.tau\n log_prob = F.log_softmax(prob_logits, dim=2)\n return log_prob\n\n\nclass NN4VAE(nn.Module):\n\n def __init__(self, args, data):\n super(NN4VAE, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n\n def forward(self, x):\n x = self.word_emb(x)\n log_prob, mu, logvar, z = self.vae(x, self.word_emb)\n return log_prob, mu, logvar, z\n\n def generate(self, sample_num):\n return self.vae.generate(sample_num, self.word_emb)\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n\n def forward(self, batch):\n p = batch.premise\n h = batch.hypothesis\n p_x = self.word_emb(p)\n h_x = self.word_emb(h)\n p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\n h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\n m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\n out = self.relu(self.fc_1(m))\n out = self.relu(self.fc_2(out))\n out = self.fc_out(out)\n return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar\n",
"<import token>\n\n\nclass DeConvVAE(nn.Module):\n\n def __init__(self, args, data):\n super(DeConvVAE, self).__init__()\n self.args = args\n self.encoder = ConvolutionEncoder(args)\n self.fc_mu = nn.Linear(args.feature_maps[2], args.latent_size)\n self.fc_logvar = nn.Linear(args.feature_maps[2], args.latent_size)\n self.decoder = DeconvolutionDecoder(args)\n self.dropout = nn.Dropout(args.dropout)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps.mul(std).add_(mu)\n else:\n return mu\n\n def forward(self, x, word_emb):\n h = self.encoder(self.dropout(x))\n mu = self.fc_mu(self.dropout(h))\n logvar = self.fc_logvar(self.dropout(h))\n z = self.reparameterize(mu, logvar)\n x_hat = self.decoder(z)\n norm_x_hat = torch.norm(x_hat, 2, dim=2, keepdim=True)\n rec_x_hat = x_hat / norm_x_hat\n norm_w = torch.norm(word_emb.weight.data, 2, dim=1, keepdim=True)\n rec_w = (word_emb.weight.data / (norm_w + 1e-20)).t()\n prob_logits = torch.bmm(rec_x_hat, rec_w.unsqueeze(0).expand(\n rec_x_hat.size(0), *rec_w.size())) / self.args.tau\n log_prob = F.log_softmax(prob_logits, dim=2)\n return log_prob, mu, logvar, z\n\n def generate(self, sample_num, word_emb):\n latent_size = self.args.latent_size\n device = torch.device(self.args.device)\n z = torch.cat([torch.randn(latent_size).unsqueeze_(0) for i in\n range(sample_num)], dim=0)\n z = z.to(device)\n x_hat = self.decoder(z)\n norm_x_hat = torch.norm(x_hat, 2, dim=2, keepdim=True)\n rec_x_hat = x_hat / norm_x_hat\n norm_w = torch.norm(word_emb.weight.data, 2, dim=1, keepdim=True)\n rec_w = (word_emb.weight.data / (norm_w + 1e-20)).t()\n prob_logits = torch.bmm(rec_x_hat, rec_w.unsqueeze(0).expand(\n rec_x_hat.size(0), *rec_w.size())) / self.args.tau\n log_prob = F.log_softmax(prob_logits, dim=2)\n return log_prob\n\n\nclass NN4VAE(nn.Module):\n\n def __init__(self, args, data):\n super(NN4VAE, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n\n def forward(self, x):\n x = self.word_emb(x)\n log_prob, mu, logvar, z = self.vae(x, self.word_emb)\n return log_prob, mu, logvar, z\n\n def generate(self, sample_num):\n return self.vae.generate(sample_num, self.word_emb)\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n\n def forward(self, batch):\n p = batch.premise\n h = batch.hypothesis\n p_x = self.word_emb(p)\n h_x = self.word_emb(h)\n p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\n h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\n m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\n out = self.relu(self.fc_1(m))\n out = self.relu(self.fc_2(out))\n out = self.fc_out(out)\n return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar\n",
"<import token>\n\n\nclass DeConvVAE(nn.Module):\n\n def __init__(self, args, data):\n super(DeConvVAE, self).__init__()\n self.args = args\n self.encoder = ConvolutionEncoder(args)\n self.fc_mu = nn.Linear(args.feature_maps[2], args.latent_size)\n self.fc_logvar = nn.Linear(args.feature_maps[2], args.latent_size)\n self.decoder = DeconvolutionDecoder(args)\n self.dropout = nn.Dropout(args.dropout)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps.mul(std).add_(mu)\n else:\n return mu\n <function token>\n\n def generate(self, sample_num, word_emb):\n latent_size = self.args.latent_size\n device = torch.device(self.args.device)\n z = torch.cat([torch.randn(latent_size).unsqueeze_(0) for i in\n range(sample_num)], dim=0)\n z = z.to(device)\n x_hat = self.decoder(z)\n norm_x_hat = torch.norm(x_hat, 2, dim=2, keepdim=True)\n rec_x_hat = x_hat / norm_x_hat\n norm_w = torch.norm(word_emb.weight.data, 2, dim=1, keepdim=True)\n rec_w = (word_emb.weight.data / (norm_w + 1e-20)).t()\n prob_logits = torch.bmm(rec_x_hat, rec_w.unsqueeze(0).expand(\n rec_x_hat.size(0), *rec_w.size())) / self.args.tau\n log_prob = F.log_softmax(prob_logits, dim=2)\n return log_prob\n\n\nclass NN4VAE(nn.Module):\n\n def __init__(self, args, data):\n super(NN4VAE, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n\n def forward(self, x):\n x = self.word_emb(x)\n log_prob, mu, logvar, z = self.vae(x, self.word_emb)\n return log_prob, mu, logvar, z\n\n def generate(self, sample_num):\n return self.vae.generate(sample_num, self.word_emb)\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n\n def forward(self, batch):\n p = batch.premise\n h = batch.hypothesis\n p_x = self.word_emb(p)\n h_x = self.word_emb(h)\n p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\n h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\n m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\n out = self.relu(self.fc_1(m))\n out = self.relu(self.fc_2(out))\n out = self.fc_out(out)\n return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar\n",
"<import token>\n\n\nclass DeConvVAE(nn.Module):\n\n def __init__(self, args, data):\n super(DeConvVAE, self).__init__()\n self.args = args\n self.encoder = ConvolutionEncoder(args)\n self.fc_mu = nn.Linear(args.feature_maps[2], args.latent_size)\n self.fc_logvar = nn.Linear(args.feature_maps[2], args.latent_size)\n self.decoder = DeconvolutionDecoder(args)\n self.dropout = nn.Dropout(args.dropout)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps.mul(std).add_(mu)\n else:\n return mu\n <function token>\n <function token>\n\n\nclass NN4VAE(nn.Module):\n\n def __init__(self, args, data):\n super(NN4VAE, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n\n def forward(self, x):\n x = self.word_emb(x)\n log_prob, mu, logvar, z = self.vae(x, self.word_emb)\n return log_prob, mu, logvar, z\n\n def generate(self, sample_num):\n return self.vae.generate(sample_num, self.word_emb)\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n\n def forward(self, batch):\n p = batch.premise\n h = batch.hypothesis\n p_x = self.word_emb(p)\n h_x = self.word_emb(h)\n p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\n h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\n m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\n out = self.relu(self.fc_1(m))\n out = self.relu(self.fc_2(out))\n out = self.fc_out(out)\n return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar\n",
"<import token>\n\n\nclass DeConvVAE(nn.Module):\n\n def __init__(self, args, data):\n super(DeConvVAE, self).__init__()\n self.args = args\n self.encoder = ConvolutionEncoder(args)\n self.fc_mu = nn.Linear(args.feature_maps[2], args.latent_size)\n self.fc_logvar = nn.Linear(args.feature_maps[2], args.latent_size)\n self.decoder = DeconvolutionDecoder(args)\n self.dropout = nn.Dropout(args.dropout)\n <function token>\n <function token>\n <function token>\n\n\nclass NN4VAE(nn.Module):\n\n def __init__(self, args, data):\n super(NN4VAE, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n\n def forward(self, x):\n x = self.word_emb(x)\n log_prob, mu, logvar, z = self.vae(x, self.word_emb)\n return log_prob, mu, logvar, z\n\n def generate(self, sample_num):\n return self.vae.generate(sample_num, self.word_emb)\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n\n def forward(self, batch):\n p = batch.premise\n h = batch.hypothesis\n p_x = self.word_emb(p)\n h_x = self.word_emb(h)\n p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\n h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\n m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\n out = self.relu(self.fc_1(m))\n out = self.relu(self.fc_2(out))\n out = self.fc_out(out)\n return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar\n",
"<import token>\n\n\nclass DeConvVAE(nn.Module):\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass NN4VAE(nn.Module):\n\n def __init__(self, args, data):\n super(NN4VAE, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n\n def forward(self, x):\n x = self.word_emb(x)\n log_prob, mu, logvar, z = self.vae(x, self.word_emb)\n return log_prob, mu, logvar, z\n\n def generate(self, sample_num):\n return self.vae.generate(sample_num, self.word_emb)\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n\n def forward(self, batch):\n p = batch.premise\n h = batch.hypothesis\n p_x = self.word_emb(p)\n h_x = self.word_emb(h)\n p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\n h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\n m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\n out = self.relu(self.fc_1(m))\n out = self.relu(self.fc_2(out))\n out = self.fc_out(out)\n return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar\n",
"<import token>\n<class token>\n\n\nclass NN4VAE(nn.Module):\n\n def __init__(self, args, data):\n super(NN4VAE, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n\n def forward(self, x):\n x = self.word_emb(x)\n log_prob, mu, logvar, z = self.vae(x, self.word_emb)\n return log_prob, mu, logvar, z\n\n def generate(self, sample_num):\n return self.vae.generate(sample_num, self.word_emb)\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n\n def forward(self, batch):\n p = batch.premise\n h = batch.hypothesis\n p_x = self.word_emb(p)\n h_x = self.word_emb(h)\n p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\n h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\n m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\n out = self.relu(self.fc_1(m))\n out = self.relu(self.fc_2(out))\n out = self.fc_out(out)\n return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar\n",
"<import token>\n<class token>\n\n\nclass NN4VAE(nn.Module):\n\n def __init__(self, args, data):\n super(NN4VAE, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n\n def forward(self, x):\n x = self.word_emb(x)\n log_prob, mu, logvar, z = self.vae(x, self.word_emb)\n return log_prob, mu, logvar, z\n <function token>\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n\n def forward(self, batch):\n p = batch.premise\n h = batch.hypothesis\n p_x = self.word_emb(p)\n h_x = self.word_emb(h)\n p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\n h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\n m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\n out = self.relu(self.fc_1(m))\n out = self.relu(self.fc_2(out))\n out = self.fc_out(out)\n return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar\n",
"<import token>\n<class token>\n\n\nclass NN4VAE(nn.Module):\n <function token>\n\n def forward(self, x):\n x = self.word_emb(x)\n log_prob, mu, logvar, z = self.vae(x, self.word_emb)\n return log_prob, mu, logvar, z\n <function token>\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n\n def forward(self, batch):\n p = batch.premise\n h = batch.hypothesis\n p_x = self.word_emb(p)\n h_x = self.word_emb(h)\n p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\n h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\n m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\n out = self.relu(self.fc_1(m))\n out = self.relu(self.fc_2(out))\n out = self.fc_out(out)\n return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar\n",
"<import token>\n<class token>\n\n\nclass NN4VAE(nn.Module):\n <function token>\n <function token>\n <function token>\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n\n def forward(self, batch):\n p = batch.premise\n h = batch.hypothesis\n p_x = self.word_emb(p)\n h_x = self.word_emb(h)\n p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\n h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\n m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\n out = self.relu(self.fc_1(m))\n out = self.relu(self.fc_2(out))\n out = self.fc_out(out)\n return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar\n",
"<import token>\n<class token>\n<class token>\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n\n def forward(self, batch):\n p = batch.premise\n h = batch.hypothesis\n p_x = self.word_emb(p)\n h_x = self.word_emb(h)\n p_log_prob, p_mu, p_logvar, z_p = self.vae(p_x, self.word_emb)\n h_log_prob, h_mu, h_logvar, z_h = self.vae(h_x, self.word_emb)\n m = torch.cat([z_p, z_h, z_p - z_h, z_p * z_h], dim=-1)\n out = self.relu(self.fc_1(m))\n out = self.relu(self.fc_2(out))\n out = self.fc_out(out)\n return out, p_log_prob, p_mu, p_logvar, h_log_prob, h_mu, h_logvar\n",
"<import token>\n<class token>\n<class token>\n\n\nclass NN4SNLI(nn.Module):\n\n def __init__(self, args, data):\n super(NN4SNLI, self).__init__()\n self.args = args\n self.word_emb = nn.Embedding(args.word_vocab_size, args.word_dim)\n self.word_emb.weight.data.copy_(data.TEXT.vocab.vectors)\n self.word_emb.weight.requires_grad = True\n nn.init.uniform_(self.word_emb.weight.data[0], -0.05, 0.05)\n self.vae = DeConvVAE(args, data)\n self.fc_1 = nn.Linear(4 * args.latent_size, args.hidden_size)\n self.fc_2 = nn.Linear(args.hidden_size, args.hidden_size)\n self.fc_out = nn.Linear(args.hidden_size, args.class_size)\n self.relu = nn.ReLU()\n <function token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass NN4SNLI(nn.Module):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,529 |
dd19e0e2cdf2b07dcf15567722e665dd799687fd
|
import serial
import time
import MySQLdb
from datetime import datetime
import cv2
import boto3
#현재 찍힌 사진이 s3 버킷에 올라감
port ="COM15"
brate = 9600
arduino =serial.Serial(port, baudrate = brate, timeout=None)
while True:
#아두이노로부터 데이터를 계속 받아옴
data= arduino.readline()
# 아두이노에서 \n\r 을 보내는데 그거를 지워주려고 하는거임
str = data[:-2].decode()
print(str)
a= int(str)
#value =0 게이트 안지나감
#value =1 게이트 지나감
#아무도 탑승안함
if str =="0":
print("nobody")
else:
#탑승은 하였지만 rf값이 없어 부정승차자
b=arduino.readline()[:-2].decode()
b=b[:1]
#print("부정승차"+b)
if(b=="0"):
print("부정승차자")
now = datetime.today().strftime("%Y-%m-%d-%H-%M-%S")
station = "hansung-"
type = ".jpg"
filename = station + now + type
st = filename.split('-')[0]
ye = filename.split('-')[1]
mo = filename.split('-')[2]
da = filename.split('-')[3]
#사진찍고
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
cv2.imwrite(filename, frame)
cap.release()
print('나쁜놈이다 잡아라')
#s3에 사진올리고
s3 = boto3.resource('s3')
bucketname = 'dongyeon1'
bucket = s3.Bucket(bucketname)
bucket.upload_file(filename, filename, ExtraArgs={'ACL': 'public-read'})
bucketurl = "https://s3-ap-northeast-1.amazonaws.com/dongyeon1/" + filename
db = MySQLdb.connect(host='localhost', user='root', passwd='rkd123', db='pythonprogramming')
cur = db.cursor()
#디비추가
sql = "insert into image(station,year,month,day,img_url) values(%s,%s,%s,%s,%s)"
cur.execute(sql, (st, ye, mo, da, bucketurl))
db.commit()
db.close()
else:
#탑승도 하였고 정상적으로 rf 카드값도 가지고 탐
#print("b짜른 값" +b[:1])
cardid=b[:1]
print("정상승차")
db = MySQLdb.connect(host='localhost', user='root', passwd='rkd123', db='pythonprogramming')
cur = db.cursor()
# #디비추가
#현재 날짜 ,시간
now1 = datetime.today().strftime("%Y.%m.%d")
now2 = datetime.today().strftime("%H:%M:%S")
# 승차 = 3 하차 = 4
state=3
fair =800
sql = "insert into user(station,cardid,fair,state,date,time) values(%s,%s,%s,%s,%s,%s)"
cur.execute(sql, ("hansung", cardid, fair,state,now1,now2))
db.commit()
db.close()
print("정상 결제 되셨습니다.")
|
[
"import serial\nimport time\nimport MySQLdb\nfrom datetime import datetime\nimport cv2\nimport boto3\n\n#현재 찍힌 사진이 s3 버킷에 올라감\n\nport =\"COM15\"\nbrate = 9600\narduino =serial.Serial(port, baudrate = brate, timeout=None)\n\nwhile True:\n #아두이노로부터 데이터를 계속 받아옴\n data= arduino.readline()\n\n # 아두이노에서 \\n\\r 을 보내는데 그거를 지워주려고 하는거임\n str = data[:-2].decode()\n print(str)\n a= int(str)\n\n #value =0 게이트 안지나감\n #value =1 게이트 지나감\n #아무도 탑승안함\n if str ==\"0\":\n print(\"nobody\")\n\n else:\n #탑승은 하였지만 rf값이 없어 부정승차자\n b=arduino.readline()[:-2].decode()\n b=b[:1]\n #print(\"부정승차\"+b)\n\n if(b==\"0\"):\n print(\"부정승차자\")\n now = datetime.today().strftime(\"%Y-%m-%d-%H-%M-%S\")\n\n station = \"hansung-\"\n type = \".jpg\"\n filename = station + now + type\n\n st = filename.split('-')[0]\n ye = filename.split('-')[1]\n mo = filename.split('-')[2]\n da = filename.split('-')[3]\n\n #사진찍고\n cap = cv2.VideoCapture(0)\n ret, frame = cap.read()\n cv2.imwrite(filename, frame)\n cap.release()\n print('나쁜놈이다 잡아라')\n\n #s3에 사진올리고\n s3 = boto3.resource('s3')\n bucketname = 'dongyeon1'\n bucket = s3.Bucket(bucketname)\n bucket.upload_file(filename, filename, ExtraArgs={'ACL': 'public-read'})\n bucketurl = \"https://s3-ap-northeast-1.amazonaws.com/dongyeon1/\" + filename\n\n db = MySQLdb.connect(host='localhost', user='root', passwd='rkd123', db='pythonprogramming')\n cur = db.cursor()\n\n #디비추가\n sql = \"insert into image(station,year,month,day,img_url) values(%s,%s,%s,%s,%s)\"\n cur.execute(sql, (st, ye, mo, da, bucketurl))\n\n\n db.commit()\n db.close()\n else:\n #탑승도 하였고 정상적으로 rf 카드값도 가지고 탐\n #print(\"b짜른 값\" +b[:1])\n cardid=b[:1]\n print(\"정상승차\")\n db = MySQLdb.connect(host='localhost', user='root', passwd='rkd123', db='pythonprogramming')\n cur = db.cursor()\n # #디비추가\n\n #현재 날짜 ,시간\n now1 = datetime.today().strftime(\"%Y.%m.%d\")\n now2 = datetime.today().strftime(\"%H:%M:%S\")\n # 승차 = 3 하차 = 4\n state=3\n fair =800\n\n sql = \"insert into user(station,cardid,fair,state,date,time) values(%s,%s,%s,%s,%s,%s)\"\n cur.execute(sql, (\"hansung\", cardid, fair,state,now1,now2))\n\n db.commit()\n db.close()\n\n print(\"정상 결제 되셨습니다.\")",
"import serial\nimport time\nimport MySQLdb\nfrom datetime import datetime\nimport cv2\nimport boto3\nport = 'COM15'\nbrate = 9600\narduino = serial.Serial(port, baudrate=brate, timeout=None)\nwhile True:\n data = arduino.readline()\n str = data[:-2].decode()\n print(str)\n a = int(str)\n if str == '0':\n print('nobody')\n else:\n b = arduino.readline()[:-2].decode()\n b = b[:1]\n if b == '0':\n print('부정승차자')\n now = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')\n station = 'hansung-'\n type = '.jpg'\n filename = station + now + type\n st = filename.split('-')[0]\n ye = filename.split('-')[1]\n mo = filename.split('-')[2]\n da = filename.split('-')[3]\n cap = cv2.VideoCapture(0)\n ret, frame = cap.read()\n cv2.imwrite(filename, frame)\n cap.release()\n print('나쁜놈이다 잡아라')\n s3 = boto3.resource('s3')\n bucketname = 'dongyeon1'\n bucket = s3.Bucket(bucketname)\n bucket.upload_file(filename, filename, ExtraArgs={'ACL':\n 'public-read'})\n bucketurl = (\n 'https://s3-ap-northeast-1.amazonaws.com/dongyeon1/' + filename\n )\n db = MySQLdb.connect(host='localhost', user='root', passwd=\n 'rkd123', db='pythonprogramming')\n cur = db.cursor()\n sql = (\n 'insert into image(station,year,month,day,img_url) values(%s,%s,%s,%s,%s)'\n )\n cur.execute(sql, (st, ye, mo, da, bucketurl))\n db.commit()\n db.close()\n else:\n cardid = b[:1]\n print('정상승차')\n db = MySQLdb.connect(host='localhost', user='root', passwd=\n 'rkd123', db='pythonprogramming')\n cur = db.cursor()\n now1 = datetime.today().strftime('%Y.%m.%d')\n now2 = datetime.today().strftime('%H:%M:%S')\n state = 3\n fair = 800\n sql = (\n 'insert into user(station,cardid,fair,state,date,time) values(%s,%s,%s,%s,%s,%s)'\n )\n cur.execute(sql, ('hansung', cardid, fair, state, now1, now2))\n db.commit()\n db.close()\n print('정상 결제 되셨습니다.')\n",
"<import token>\nport = 'COM15'\nbrate = 9600\narduino = serial.Serial(port, baudrate=brate, timeout=None)\nwhile True:\n data = arduino.readline()\n str = data[:-2].decode()\n print(str)\n a = int(str)\n if str == '0':\n print('nobody')\n else:\n b = arduino.readline()[:-2].decode()\n b = b[:1]\n if b == '0':\n print('부정승차자')\n now = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')\n station = 'hansung-'\n type = '.jpg'\n filename = station + now + type\n st = filename.split('-')[0]\n ye = filename.split('-')[1]\n mo = filename.split('-')[2]\n da = filename.split('-')[3]\n cap = cv2.VideoCapture(0)\n ret, frame = cap.read()\n cv2.imwrite(filename, frame)\n cap.release()\n print('나쁜놈이다 잡아라')\n s3 = boto3.resource('s3')\n bucketname = 'dongyeon1'\n bucket = s3.Bucket(bucketname)\n bucket.upload_file(filename, filename, ExtraArgs={'ACL':\n 'public-read'})\n bucketurl = (\n 'https://s3-ap-northeast-1.amazonaws.com/dongyeon1/' + filename\n )\n db = MySQLdb.connect(host='localhost', user='root', passwd=\n 'rkd123', db='pythonprogramming')\n cur = db.cursor()\n sql = (\n 'insert into image(station,year,month,day,img_url) values(%s,%s,%s,%s,%s)'\n )\n cur.execute(sql, (st, ye, mo, da, bucketurl))\n db.commit()\n db.close()\n else:\n cardid = b[:1]\n print('정상승차')\n db = MySQLdb.connect(host='localhost', user='root', passwd=\n 'rkd123', db='pythonprogramming')\n cur = db.cursor()\n now1 = datetime.today().strftime('%Y.%m.%d')\n now2 = datetime.today().strftime('%H:%M:%S')\n state = 3\n fair = 800\n sql = (\n 'insert into user(station,cardid,fair,state,date,time) values(%s,%s,%s,%s,%s,%s)'\n )\n cur.execute(sql, ('hansung', cardid, fair, state, now1, now2))\n db.commit()\n db.close()\n print('정상 결제 되셨습니다.')\n",
"<import token>\n<assignment token>\nwhile True:\n data = arduino.readline()\n str = data[:-2].decode()\n print(str)\n a = int(str)\n if str == '0':\n print('nobody')\n else:\n b = arduino.readline()[:-2].decode()\n b = b[:1]\n if b == '0':\n print('부정승차자')\n now = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')\n station = 'hansung-'\n type = '.jpg'\n filename = station + now + type\n st = filename.split('-')[0]\n ye = filename.split('-')[1]\n mo = filename.split('-')[2]\n da = filename.split('-')[3]\n cap = cv2.VideoCapture(0)\n ret, frame = cap.read()\n cv2.imwrite(filename, frame)\n cap.release()\n print('나쁜놈이다 잡아라')\n s3 = boto3.resource('s3')\n bucketname = 'dongyeon1'\n bucket = s3.Bucket(bucketname)\n bucket.upload_file(filename, filename, ExtraArgs={'ACL':\n 'public-read'})\n bucketurl = (\n 'https://s3-ap-northeast-1.amazonaws.com/dongyeon1/' + filename\n )\n db = MySQLdb.connect(host='localhost', user='root', passwd=\n 'rkd123', db='pythonprogramming')\n cur = db.cursor()\n sql = (\n 'insert into image(station,year,month,day,img_url) values(%s,%s,%s,%s,%s)'\n )\n cur.execute(sql, (st, ye, mo, da, bucketurl))\n db.commit()\n db.close()\n else:\n cardid = b[:1]\n print('정상승차')\n db = MySQLdb.connect(host='localhost', user='root', passwd=\n 'rkd123', db='pythonprogramming')\n cur = db.cursor()\n now1 = datetime.today().strftime('%Y.%m.%d')\n now2 = datetime.today().strftime('%H:%M:%S')\n state = 3\n fair = 800\n sql = (\n 'insert into user(station,cardid,fair,state,date,time) values(%s,%s,%s,%s,%s,%s)'\n )\n cur.execute(sql, ('hansung', cardid, fair, state, now1, now2))\n db.commit()\n db.close()\n print('정상 결제 되셨습니다.')\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,530 |
3aad77627c19ad4bc273a6c274a407fc9988dd5d
|
import math
import time
import quadratic
import random
def time_it(f, args=None):
t0 = time.time()
print('--- Timed execution for {} ----------------'.format(f.__name__))
print('Running...')
result = f(*args) if args is not None else f()
print('Solution is {}'.format(result))
t1 = time.time()
print('Executed in {} seconds'.format(round(t1 - t0, 6)))
def distinct(x):
"""
Returns a list of unique elements.
:param x: List of elements.
:return: List of unique elements.
"""
return list(set(x))
def is_number(n):
"""
Returns true if the number is an instance of an int.
or a float.
:param n: The number n to be tested.
:return: True if n is int or float.
"""
return isinstance(n, (int, float))
def is_unique_string(s):
"""
Determines if a given string only consists of unique
characters.
:param s: The string to test.
:return: True if the string only contains unique characters.
"""
return len(s) == len(set(s))
def divisors(x):
"""
Returns all the divisors for a number x, including x.
e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]
:param x: number >= 1.
:return: the divisors including 1 and x.
"""
x = abs(x)
result = []
upper_bound = int(math.sqrt(x))
for i in range(1, upper_bound + 1):
if x % i == 0:
if x / i == i:
result.append(i)
else:
result.append(i)
result.append(x//i)
return sorted(distinct(result))
def sum_of_proper_divisors_sieve(n):
"""
Generates an array with the sum of the divisors
for that index of the array. To find the sum of
divisors for 12: sieve[12].
:param n: Upper limit of numbers.
:return: List with sum of divisors.
"""
sieve = [1] * (n + 1)
for i in range(2, n // 2 + 1):
for j in range(i + i, n, i):
sieve[j] += i
return sieve
def prime_sieve(n):
"""
Generates an array which determines if the index
of the array is a prime number. To see if 997 is
a prime number: sieve[997] == True.
:param n: Upper limit of numbers.
:return: List with boolean values.
"""
upper_bound = int(math.sqrt(n))
primes = [True] * (n + 1)
primes[0] = primes[1] = False
for i in range(upper_bound + 1):
if not primes[i]:
continue
for j in range(2, n // i + 1):
if i*j < n:
primes[i*j] = False
return primes
def sieve_to_list(sieve):
"""
Returns the sieve as a list where the index is the number
where it was True.
:param sieve:
:return:
"""
return [i for i, v in enumerate(sieve) if v]
def triangle_number(n):
"""
Calculate the nth triangle number.
:param n: Fn
:return: Triangle number for n.
"""
return n * (n + 1) // 2
def is_triangle_number(n):
"""
Tests if a number is a triangle number. Solved with the
inverse of n(n+1)/2, and testing if that solution
is integer.
:param n: Number to test.
:return: True if it is a triangle number.
"""
_, x = quadratic.solve(1, 1, -2*n)
return is_number(x) and x.is_integer()
def triangle_number_sieve(n):
"""
Generates a sieve which can be used to tell if a number
is a triangle number.
:param n: Up to which n.
:return: Sieve with boolean values, sieve[3] = True.
"""
triangle_numbers = [False] * (n + 1)
tn = i = 1
while tn < n:
triangle_numbers[triangle_number(i)] = True
i += 1
tn = triangle_number(i)
return triangle_numbers
def hexagonal_number(n):
"""
Calculate the nth hexagonal number.
:param n: Hn
:return: Hexagonal number
"""
return n * (2 * n - 1)
def is_hexagonal_number(n):
"""
Determines if n is a hexagonal number.
:param n: Hn
:return: Hexagonal number
"""
_, x = quadratic.solve(2, -1, -n)
return is_number(x) and x.is_integer()
def pentagonal_number(n):
return n * (3 * n - 1) / 2
def is_pentagonal_number(n):
"""
Determines if n is a pentagonal number.
:param n:
:return: True if pentagonal.
"""
_, x = quadratic.solve(3, -1, -2 * n)
return is_number(x) and x.is_integer()
def proper_divisors(x):
"""
Returns all the proper divisors for a number x, excluding x.
e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]
:param x: number >= 1.
:return: the divisors excluding itself.
"""
return divisors(x)[:-1]
def restricted_divisors(x):
"""
Returns all the restricted divisors for a number x, excluding 1 and x.
e.g divisors(1001) = [7, 11, 13, 77, 91, 143]
:param x: number >= 1.
:return: the divisors excluding 1 and itself.
"""
return divisors(x)[1:-1]
def is_perfect_number(x):
"""
Test if a number is a perfect number. A number is perfect
if the sum of the proper divisors is equal to itself.
:param x: number to test.
:return: True if it is a perfect number.
"""
return sum(proper_divisors(x)) == x
def is_abundant_number(x):
"""
Test if a number is an abundant number. A number is abundant
if the sum of the proper divisors is greater than the number
itself.
:param x: number to test.
:return: True if it is an abundant number.
"""
return sum(proper_divisors(x)) > x
def is_deficient_number(x):
"""
Test if a number is a deficient number. A number is deficient
if the sum of the proper divisors is less than the number
itself.
:param x: number to test.
:return: True if it is a deficient number.
"""
return sum(proper_divisors(x)) < x
def digits(x):
"""
Returns the digits of a number in a list.
:param x: The number to sum the digits of.
:return: Sum of the number x.
"""
return [int(d) for d in str(x)]
def digits_to_int(x):
"""
Concatenate a list of digits to an integer.
:param x:
:return:
"""
if x is None:
return ""
return int(''.join([str(i) for i in x]))
def is_fibonacci_number(x):
"""
Test if x is a Fibonacci number.
:param x: Number to test.
:return: True if it is a Fibonacci number.
"""
a = math.sqrt(5 * x ** 2 + 4)
b = math.sqrt(5 * x ** 2 - 4)
return a.is_integer() or b.is_integer()
def fibonacci_n(n):
"""
Calculate the nth Fibonacci number (Fn).
:param n: which number to calculate.
:return: The nth Fibonacci number.
"""
sqrt5 = math.sqrt(5)
phi = (1 + sqrt5) / 2
psi = (1 - sqrt5) / 2
return (phi**n - psi**n) // sqrt5
def fibonacci_n_inv(x):
"""
Calculate the n for Fn for a Fibonacci number.
:param x: Fibonacci number.
:return: The position of the Fibonacci number (Fn)
"""
if x < 2:
raise ValueError('Function approximation is wrong when x < 2.')
sqrt5 = math.sqrt(5)
phi = (1 + sqrt5) / 2
rad = 5 * x**2
p = math.sqrt(5*x**2 + 4)
n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) \
if p.is_integer() \
else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)
return round(n)
def gcd(a, b):
"""
Determines the greatest common divisor for a and b
with the Euclidean Algorithm.
:param a: First number.
:param b: Second number.
:return: Greatest common divisor for a and b.
"""
a = abs(a)
b = abs(b)
if a == b:
return a
if b > a:
a, b = b, a
q = a // b
r = a - b * q
while r != 0:
a = b
b = r
q = a // b
r = a - b * q
return b
def lcm(a, b):
"""
Calculate the least common multiple (LCM) with the GCD
algorithm using: LCM(a,b) = (a*b)/GCD(a,b).
:param a:
:param b:
:return:
"""
return a * b // gcd(a, b)
def lcm3(a, b, c):
"""
Calculating the LCM for multiple digits is done with
LCM(a,b,c) = LCM(LCM(a,b),c)
:param a:
:param b:
:param c:
:return:
"""
return lcm(lcm(a, b), c)
def primitive_pythagorean_triplet_generator(n=math.inf):
"""
Generates n primitive pythagorean triplets.
:param n:
:return:
"""
v = 2
u = 1
while n > 0:
if not(is_odd(v) and is_odd(u)) and gcd(u, v) == 1:
a = v*v - u*u
b = 2*v*u
c = u*u + v*v
if a > b:
a, b = b, a
n -= 1
yield (a, b, c)
u += 1
if u >= v:
v += 1
u = 1
def prime_counting_function(n):
"""
Return the number of primes below a given number.
This is calculated with the proportionality which
states that π(n) ~ n / log(n).
:param n: Upper bound.
:return: Estimate of the number of primes below the
bound.
"""
return n / math.log(n)
def lambertw(x):
"""
Lambert W function with Newton's Method.
:param x:
:return:
"""
eps = 1e-8
w = x
while True:
ew = math.exp(w)
w_new = w - (w * ew - x) / (w * ew + ew)
if abs(w - w_new) <= eps:
break
w = w_new
return w
def prime_counting_function_inv(y):
"""
Returns the upper bound for a given number of primes.
:param y: How many primes you want.
:return: Upper bound.
"""
x = 2
while x / math.log(x) < y:
x += 1
return x
def product(numbers):
"""
Returns the product of a list of numbers.
:param numbers:
:return:
"""
p = 1
for x in numbers:
p *= x
return p
def factorial(n):
"""
Returns the factorial n! of a number.
:param n:
:return:
"""
return product(range(1, n + 1))
def is_even(n):
"""
Returns true if a number is even.
:param n:
:return:
"""
return not n & 1
def is_odd(n):
"""
Returns true if a number is odd.
:param n:
:return:
"""
return n & 1
def permutations(a):
"""
Generates all the permutations for a set.
:param a:
:return:
"""
n = len(a)
return _heap_perm_(n, a)
def _heap_perm_(n, a):
"""
Heap's permutation algorithm.
https://stackoverflow.com/a/29044942
:param n:
:param a:
:return:
"""
if n == 1:
yield a
else:
for i in range(n-1):
for hp in _heap_perm_(n-1, a):
yield list(hp)
j = 0 if (n % 2) == 1 else i
a[j], a[n - 1] = a[n - 1], a[j]
for hp in _heap_perm_(n-1, a):
yield list(hp)
def shift(a, n=1):
"""
Shift all the elements in the list by n.
:param a:
:param n:
:return:
"""
return a[n:] + a[:n]
def is_palindrome(x):
"""
Returns true if a number or a string is a palindrome.
:param x:
:return:
"""
strx = str(x)
return strx == strx[::-1]
# chars = [c for c in x] if not is_number(x) else digits(x)
# for i in range(len(chars) // 2):
# if chars[i] != chars[len(chars) - i - 1]:
# return False
# return True
def is_pandigital_to_n(x, n, zero_based=False):
"""
Returns true if a list of numbers is pandigital from 1 up to n.
:param x:
:param n:
:param zero_based:
:return:
"""
return set(x) == set(range(0 if zero_based else 1, n + 1))
def to_binary_string(x):
"""
Useful to convert a number into a binary number.
:param x:
:return:
"""
return "{0:b}".format(x)
def _palindrome_number_generator():
"""
https://stackoverflow.com/a/16344628
:return:
"""
yield 0
lower = 1
while True:
higher = lower*10
for i in range(lower, higher):
s = str(i)
yield int(s+s[-2::-1])
for i in range(lower, higher):
s = str(i)
yield int(s+s[::-1])
lower = higher
def palindrome_generator(lower, upper):
"""
Generates all palindromes between [lower, upper].
https://stackoverflow.com/a/16344628
:param lower:
:param upper:
:return:
"""
all_palindrome_numbers = _palindrome_number_generator()
for p in all_palindrome_numbers:
if p >= lower:
break
palindrome_list = [p]
for p in all_palindrome_numbers:
# Because we use the same generator object,
# p continues where the previous loop halted.
if p >= upper:
break
palindrome_list.append(p)
return palindrome_list
def string_split_2d(data, field_delimiter=',', line_delimiter='\n'):
"""
Split a string of 2D data into lists. Example of the data
1,2
3,4
5,6
to:
[[1,2],[3,4],[5,6]]
:param data:
:param field_delimiter: delimiter used between seperate fields, default: ,
:param line_delimiter: delimiter used between lines, default: \n
:return: 2D list
"""
return [line.split(field_delimiter) for line in data.split(line_delimiter)]
def simplify_fraction(a, b):
"""
Simplifies a fraction to the lowest common form.
:param a:
:param b:
:return:
"""
c = gcd(a, b)
return a // c, b // c
def modpow(a, n, p):
"""
Use Fermat's little theorem to calculate a^n mod p, which
can handle very large exponents. Calculates in O(log n) time.
:param a: base
:param n: exponent
:param p: mod
:return: (a^n) mod p
"""
res = 1
a = a % p
while n > 0:
# if n is odd
if n & 1:
res = (res * a) % p
n = n >> 1 # n = n / 2
a = (a*a) % p
return res
def is_prime(n, k):
"""
Test if a number n is prime k-times.
:param n: The prime number to be tested.
:param k: The number of tests.
:return:
"""
if n <= 1 or n == 4:
return False
if n <= 3:
return True
if is_even(n):
return False
while k > 0:
# Take random int in [2, n-2]
a = random.randint(2, n-1)
# Check if a and n are co-prime.
if gcd(n, a) != 1:
return False
# Fermat's little theorem
if modpow(a, n-1, n) != 1:
return False
k -= 1
return True
def _first_index_with_bigger_neighbour(P):
"""
Find the first index from the right whose element is larger
than his neighbour.
:param P:
:return:
"""
i = len(P) - 1
while i > 0 and P[i-1] >= P[i]:
i -= 1
return i
def _first_index_with_smaller_neighbour(P):
"""
Find the first index from the right whose element is smaller
than his neighbour.
:param P:
:return:
"""
i = len(P) - 1
while i > 0 and P[i-1] <= P[i]:
i -= 1
return i
def next_permutation(P):
"""
For any given permutation P, give the next permutation.
If there is no next permutation, P will be returned.
:param P:
:return:
"""
n = len(P)
# Find the first index with the bigger neighbour.
i = _first_index_with_bigger_neighbour(P)
# If this is the first, where i=0, then there is no next permutation.
if i == 0:
return P
# From the right, find a value in P that is smaller than
# the previous found value.
j = n - 1
while P[j] <= P[i-1]:
j -= 1
# Swap the values
P[i-1], P[j] = P[j], P[i-1]
# Restore the tail of the permutation.
j = n - 1
while i < j:
P[i], P[j] = P[j], P[i]
i += 1
j -= 1
return P
def previous_permutation(P):
"""
For any given permutation P, give the previous permutation.
If there is no pervious permutation, P will be returned.
:param P:
:return:
"""
n = len(P)
# Find the first index with the smaller neighbour.
i = _first_index_with_smaller_neighbour(P)
# If this is the first, where i=0, then there is no next permutation.
if i == 0:
return P
# From the right, find a value in P that is bigger than
# the previous found value.
j = n - 1
while P[j] >= P[i-1]:
j -= 1
# Swap the values
P[i-1], P[j] = P[j], P[i-1]
# Restore the tail of the permutation.
j = n - 1
while i < j:
P[i], P[j] = P[j], P[i]
i += 1
j -= 1
return P
def prime_factorization(x, sieve=None):
"""
Factorizes a number into the prime factorization.
Requires a sieve to be quick, if sieve is not specified
it will generate one itself.
:param x:
:param sieve:
:return:
"""
if x == 0:
return []
if x in [1, 2]:
return [x]
if sieve is None:
sieve = prime_sieve(x + 1)
factors = []
if sieve[x]:
return [x]
for i in range(2, int(math.sqrt(x) + 1)):
if sieve[x]:
break
if not sieve[i]:
continue
if x % i == 0:
factors.append(i)
x //= i
return factors + prime_factorization(x, sieve)
def is_permutation(A, B):
"""
Returns true if A and B are permutations of each other.
:param A:
:param B:
:return:
"""
return set(A) == set(B)
def is_permutation3(A, B, C):
"""
Returns true if A, B and C are permutations of each other.
:param A:
:param B:
:param C:
:return:
"""
return set(A) == set(B) == set(C)
def equal_sets(S):
"""
Returns true if all the sets s in S are equal
to each other.
:param S:
:return:
"""
s0 = S[0]
res = True
for i in range(1, len(S)):
res = res and s0 == S[i]
return res
def union_sets(S):
"""
Returns the union of all sets in S.
:param S:
:return:
"""
res = set()
for s in S:
res |= s
return res
def intersect_sets(S):
"""
Returns the intersection of all sets in S.
:param S:
:return:
"""
res = S[0]
for s in S:
res &= s
return res
def cumsum(L):
"""
Returns a list with the cumulative sum of a list L.
:param S:
:return:
"""
for i in range(1, len(L)):
L[i] += L[i-1]
return L
|
[
"import math\nimport time\nimport quadratic\nimport random\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\ndef distinct(x):\n \"\"\"\n Returns a list of unique elements.\n :param x: List of elements.\n :return: List of unique elements.\n \"\"\"\n return list(set(x))\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x//i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i*j < n:\n primes[i*j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\ndef triangle_number(n):\n \"\"\"\n Calculate the nth triangle number.\n :param n: Fn\n :return: Triangle number for n.\n \"\"\"\n return n * (n + 1) // 2\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2*n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\ndef is_hexagonal_number(n):\n \"\"\"\n Determines if n is a hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n _, x = quadratic.solve(2, -1, -n)\n return is_number(x) and x.is_integer()\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\ndef digits_to_int(x):\n \"\"\"\n Concatenate a list of digits to an integer.\n :param x:\n :return:\n \"\"\"\n if x is None:\n return \"\"\n return int(''.join([str(i) for i in x]))\n\n\ndef is_fibonacci_number(x):\n \"\"\"\n Test if x is a Fibonacci number.\n :param x: Number to test.\n :return: True if it is a Fibonacci number.\n \"\"\"\n a = math.sqrt(5 * x ** 2 + 4)\n b = math.sqrt(5 * x ** 2 - 4)\n return a.is_integer() or b.is_integer()\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi**n - psi**n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x**2\n p = math.sqrt(5*x**2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) \\\n if p.is_integer() \\\n else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\ndef gcd(a, b):\n \"\"\"\n Determines the greatest common divisor for a and b\n with the Euclidean Algorithm.\n :param a: First number.\n :param b: Second number.\n :return: Greatest common divisor for a and b.\n \"\"\"\n a = abs(a)\n b = abs(b)\n if a == b:\n return a\n if b > a:\n a, b = b, a\n q = a // b\n r = a - b * q\n while r != 0:\n a = b\n b = r\n q = a // b\n r = a - b * q\n return b\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not(is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v*v - u*u\n b = 2*v*u\n c = u*u + v*v\n if a > b:\n a, b = b, a\n n -= 1\n yield (a, b, c)\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-8\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\ndef factorial(n):\n \"\"\"\n Returns the factorial n! of a number.\n :param n:\n :return:\n \"\"\"\n return product(range(1, n + 1))\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n-1):\n for hp in _heap_perm_(n-1, a):\n yield list(hp)\n j = 0 if (n % 2) == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n-1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n # chars = [c for c in x] if not is_number(x) else digits(x)\n # for i in range(len(chars) // 2):\n # if chars[i] != chars[len(chars) - i - 1]:\n # return False\n # return True\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return \"{0:b}\".format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower*10\n for i in range(lower, higher):\n s = str(i)\n yield int(s+s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s+s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n # Because we use the same generator object,\n # p continues where the previous loop halted.\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \\n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\ndef simplify_fraction(a, b):\n \"\"\"\n Simplifies a fraction to the lowest common form.\n :param a:\n :param b:\n :return:\n \"\"\"\n c = gcd(a, b)\n return a // c, b // c\n\n\ndef modpow(a, n, p):\n \"\"\"\n Use Fermat's little theorem to calculate a^n mod p, which\n can handle very large exponents. Calculates in O(log n) time.\n :param a: base\n :param n: exponent\n :param p: mod\n :return: (a^n) mod p\n \"\"\"\n res = 1\n a = a % p\n while n > 0:\n # if n is odd\n if n & 1:\n res = (res * a) % p\n n = n >> 1 # n = n / 2\n a = (a*a) % p\n\n return res\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n\n # Take random int in [2, n-2]\n a = random.randint(2, n-1)\n\n # Check if a and n are co-prime.\n if gcd(n, a) != 1:\n return False\n\n # Fermat's little theorem\n if modpow(a, n-1, n) != 1:\n return False\n\n k -= 1\n\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i-1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i-1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n\n # Find the first index with the bigger neighbour.\n i = _first_index_with_bigger_neighbour(P)\n\n # If this is the first, where i=0, then there is no next permutation.\n if i == 0:\n return P\n\n # From the right, find a value in P that is smaller than\n # the previous found value.\n j = n - 1\n while P[j] <= P[i-1]:\n j -= 1\n\n # Swap the values\n P[i-1], P[j] = P[j], P[i-1]\n\n # Restore the tail of the permutation.\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n\n # Find the first index with the smaller neighbour.\n i = _first_index_with_smaller_neighbour(P)\n\n # If this is the first, where i=0, then there is no next permutation.\n if i == 0:\n return P\n\n # From the right, find a value in P that is bigger than\n # the previous found value.\n j = n - 1\n while P[j] >= P[i-1]:\n j -= 1\n\n # Swap the values\n P[i-1], P[j] = P[j], P[i-1]\n\n # Restore the tail of the permutation.\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i-1]\n return L\n",
"import math\nimport time\nimport quadratic\nimport random\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\ndef distinct(x):\n \"\"\"\n Returns a list of unique elements.\n :param x: List of elements.\n :return: List of unique elements.\n \"\"\"\n return list(set(x))\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\ndef triangle_number(n):\n \"\"\"\n Calculate the nth triangle number.\n :param n: Fn\n :return: Triangle number for n.\n \"\"\"\n return n * (n + 1) // 2\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\ndef is_hexagonal_number(n):\n \"\"\"\n Determines if n is a hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n _, x = quadratic.solve(2, -1, -n)\n return is_number(x) and x.is_integer()\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\ndef digits_to_int(x):\n \"\"\"\n Concatenate a list of digits to an integer.\n :param x:\n :return:\n \"\"\"\n if x is None:\n return ''\n return int(''.join([str(i) for i in x]))\n\n\ndef is_fibonacci_number(x):\n \"\"\"\n Test if x is a Fibonacci number.\n :param x: Number to test.\n :return: True if it is a Fibonacci number.\n \"\"\"\n a = math.sqrt(5 * x ** 2 + 4)\n b = math.sqrt(5 * x ** 2 - 4)\n return a.is_integer() or b.is_integer()\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\ndef gcd(a, b):\n \"\"\"\n Determines the greatest common divisor for a and b\n with the Euclidean Algorithm.\n :param a: First number.\n :param b: Second number.\n :return: Greatest common divisor for a and b.\n \"\"\"\n a = abs(a)\n b = abs(b)\n if a == b:\n return a\n if b > a:\n a, b = b, a\n q = a // b\n r = a - b * q\n while r != 0:\n a = b\n b = r\n q = a // b\n r = a - b * q\n return b\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\ndef factorial(n):\n \"\"\"\n Returns the factorial n! of a number.\n :param n:\n :return:\n \"\"\"\n return product(range(1, n + 1))\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\ndef simplify_fraction(a, b):\n \"\"\"\n Simplifies a fraction to the lowest common form.\n :param a:\n :param b:\n :return:\n \"\"\"\n c = gcd(a, b)\n return a // c, b // c\n\n\ndef modpow(a, n, p):\n \"\"\"\n Use Fermat's little theorem to calculate a^n mod p, which\n can handle very large exponents. Calculates in O(log n) time.\n :param a: base\n :param n: exponent\n :param p: mod\n :return: (a^n) mod p\n \"\"\"\n res = 1\n a = a % p\n while n > 0:\n if n & 1:\n res = res * a % p\n n = n >> 1\n a = a * a % p\n return res\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\ndef distinct(x):\n \"\"\"\n Returns a list of unique elements.\n :param x: List of elements.\n :return: List of unique elements.\n \"\"\"\n return list(set(x))\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\ndef triangle_number(n):\n \"\"\"\n Calculate the nth triangle number.\n :param n: Fn\n :return: Triangle number for n.\n \"\"\"\n return n * (n + 1) // 2\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\ndef is_hexagonal_number(n):\n \"\"\"\n Determines if n is a hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n _, x = quadratic.solve(2, -1, -n)\n return is_number(x) and x.is_integer()\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\ndef digits_to_int(x):\n \"\"\"\n Concatenate a list of digits to an integer.\n :param x:\n :return:\n \"\"\"\n if x is None:\n return ''\n return int(''.join([str(i) for i in x]))\n\n\ndef is_fibonacci_number(x):\n \"\"\"\n Test if x is a Fibonacci number.\n :param x: Number to test.\n :return: True if it is a Fibonacci number.\n \"\"\"\n a = math.sqrt(5 * x ** 2 + 4)\n b = math.sqrt(5 * x ** 2 - 4)\n return a.is_integer() or b.is_integer()\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\ndef gcd(a, b):\n \"\"\"\n Determines the greatest common divisor for a and b\n with the Euclidean Algorithm.\n :param a: First number.\n :param b: Second number.\n :return: Greatest common divisor for a and b.\n \"\"\"\n a = abs(a)\n b = abs(b)\n if a == b:\n return a\n if b > a:\n a, b = b, a\n q = a // b\n r = a - b * q\n while r != 0:\n a = b\n b = r\n q = a // b\n r = a - b * q\n return b\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\ndef factorial(n):\n \"\"\"\n Returns the factorial n! of a number.\n :param n:\n :return:\n \"\"\"\n return product(range(1, n + 1))\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\ndef simplify_fraction(a, b):\n \"\"\"\n Simplifies a fraction to the lowest common form.\n :param a:\n :param b:\n :return:\n \"\"\"\n c = gcd(a, b)\n return a // c, b // c\n\n\ndef modpow(a, n, p):\n \"\"\"\n Use Fermat's little theorem to calculate a^n mod p, which\n can handle very large exponents. Calculates in O(log n) time.\n :param a: base\n :param n: exponent\n :param p: mod\n :return: (a^n) mod p\n \"\"\"\n res = 1\n a = a % p\n while n > 0:\n if n & 1:\n res = res * a % p\n n = n >> 1\n a = a * a % p\n return res\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\ndef distinct(x):\n \"\"\"\n Returns a list of unique elements.\n :param x: List of elements.\n :return: List of unique elements.\n \"\"\"\n return list(set(x))\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\ndef triangle_number(n):\n \"\"\"\n Calculate the nth triangle number.\n :param n: Fn\n :return: Triangle number for n.\n \"\"\"\n return n * (n + 1) // 2\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\ndef is_hexagonal_number(n):\n \"\"\"\n Determines if n is a hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n _, x = quadratic.solve(2, -1, -n)\n return is_number(x) and x.is_integer()\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\ndef digits_to_int(x):\n \"\"\"\n Concatenate a list of digits to an integer.\n :param x:\n :return:\n \"\"\"\n if x is None:\n return ''\n return int(''.join([str(i) for i in x]))\n\n\ndef is_fibonacci_number(x):\n \"\"\"\n Test if x is a Fibonacci number.\n :param x: Number to test.\n :return: True if it is a Fibonacci number.\n \"\"\"\n a = math.sqrt(5 * x ** 2 + 4)\n b = math.sqrt(5 * x ** 2 - 4)\n return a.is_integer() or b.is_integer()\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\ndef gcd(a, b):\n \"\"\"\n Determines the greatest common divisor for a and b\n with the Euclidean Algorithm.\n :param a: First number.\n :param b: Second number.\n :return: Greatest common divisor for a and b.\n \"\"\"\n a = abs(a)\n b = abs(b)\n if a == b:\n return a\n if b > a:\n a, b = b, a\n q = a // b\n r = a - b * q\n while r != 0:\n a = b\n b = r\n q = a // b\n r = a - b * q\n return b\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\ndef factorial(n):\n \"\"\"\n Returns the factorial n! of a number.\n :param n:\n :return:\n \"\"\"\n return product(range(1, n + 1))\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\ndef simplify_fraction(a, b):\n \"\"\"\n Simplifies a fraction to the lowest common form.\n :param a:\n :param b:\n :return:\n \"\"\"\n c = gcd(a, b)\n return a // c, b // c\n\n\n<function token>\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\ndef distinct(x):\n \"\"\"\n Returns a list of unique elements.\n :param x: List of elements.\n :return: List of unique elements.\n \"\"\"\n return list(set(x))\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\ndef triangle_number(n):\n \"\"\"\n Calculate the nth triangle number.\n :param n: Fn\n :return: Triangle number for n.\n \"\"\"\n return n * (n + 1) // 2\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\ndef digits_to_int(x):\n \"\"\"\n Concatenate a list of digits to an integer.\n :param x:\n :return:\n \"\"\"\n if x is None:\n return ''\n return int(''.join([str(i) for i in x]))\n\n\ndef is_fibonacci_number(x):\n \"\"\"\n Test if x is a Fibonacci number.\n :param x: Number to test.\n :return: True if it is a Fibonacci number.\n \"\"\"\n a = math.sqrt(5 * x ** 2 + 4)\n b = math.sqrt(5 * x ** 2 - 4)\n return a.is_integer() or b.is_integer()\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\ndef gcd(a, b):\n \"\"\"\n Determines the greatest common divisor for a and b\n with the Euclidean Algorithm.\n :param a: First number.\n :param b: Second number.\n :return: Greatest common divisor for a and b.\n \"\"\"\n a = abs(a)\n b = abs(b)\n if a == b:\n return a\n if b > a:\n a, b = b, a\n q = a // b\n r = a - b * q\n while r != 0:\n a = b\n b = r\n q = a // b\n r = a - b * q\n return b\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\ndef factorial(n):\n \"\"\"\n Returns the factorial n! of a number.\n :param n:\n :return:\n \"\"\"\n return product(range(1, n + 1))\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\ndef simplify_fraction(a, b):\n \"\"\"\n Simplifies a fraction to the lowest common form.\n :param a:\n :param b:\n :return:\n \"\"\"\n c = gcd(a, b)\n return a // c, b // c\n\n\n<function token>\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\ndef distinct(x):\n \"\"\"\n Returns a list of unique elements.\n :param x: List of elements.\n :return: List of unique elements.\n \"\"\"\n return list(set(x))\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\ndef triangle_number(n):\n \"\"\"\n Calculate the nth triangle number.\n :param n: Fn\n :return: Triangle number for n.\n \"\"\"\n return n * (n + 1) // 2\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n\n\ndef is_fibonacci_number(x):\n \"\"\"\n Test if x is a Fibonacci number.\n :param x: Number to test.\n :return: True if it is a Fibonacci number.\n \"\"\"\n a = math.sqrt(5 * x ** 2 + 4)\n b = math.sqrt(5 * x ** 2 - 4)\n return a.is_integer() or b.is_integer()\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\ndef gcd(a, b):\n \"\"\"\n Determines the greatest common divisor for a and b\n with the Euclidean Algorithm.\n :param a: First number.\n :param b: Second number.\n :return: Greatest common divisor for a and b.\n \"\"\"\n a = abs(a)\n b = abs(b)\n if a == b:\n return a\n if b > a:\n a, b = b, a\n q = a // b\n r = a - b * q\n while r != 0:\n a = b\n b = r\n q = a // b\n r = a - b * q\n return b\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\ndef factorial(n):\n \"\"\"\n Returns the factorial n! of a number.\n :param n:\n :return:\n \"\"\"\n return product(range(1, n + 1))\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\ndef simplify_fraction(a, b):\n \"\"\"\n Simplifies a fraction to the lowest common form.\n :param a:\n :param b:\n :return:\n \"\"\"\n c = gcd(a, b)\n return a // c, b // c\n\n\n<function token>\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\ndef distinct(x):\n \"\"\"\n Returns a list of unique elements.\n :param x: List of elements.\n :return: List of unique elements.\n \"\"\"\n return list(set(x))\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\ndef triangle_number(n):\n \"\"\"\n Calculate the nth triangle number.\n :param n: Fn\n :return: Triangle number for n.\n \"\"\"\n return n * (n + 1) // 2\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\ndef gcd(a, b):\n \"\"\"\n Determines the greatest common divisor for a and b\n with the Euclidean Algorithm.\n :param a: First number.\n :param b: Second number.\n :return: Greatest common divisor for a and b.\n \"\"\"\n a = abs(a)\n b = abs(b)\n if a == b:\n return a\n if b > a:\n a, b = b, a\n q = a // b\n r = a - b * q\n while r != 0:\n a = b\n b = r\n q = a // b\n r = a - b * q\n return b\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\ndef factorial(n):\n \"\"\"\n Returns the factorial n! of a number.\n :param n:\n :return:\n \"\"\"\n return product(range(1, n + 1))\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\ndef simplify_fraction(a, b):\n \"\"\"\n Simplifies a fraction to the lowest common form.\n :param a:\n :param b:\n :return:\n \"\"\"\n c = gcd(a, b)\n return a // c, b // c\n\n\n<function token>\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\ndef distinct(x):\n \"\"\"\n Returns a list of unique elements.\n :param x: List of elements.\n :return: List of unique elements.\n \"\"\"\n return list(set(x))\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\ndef triangle_number(n):\n \"\"\"\n Calculate the nth triangle number.\n :param n: Fn\n :return: Triangle number for n.\n \"\"\"\n return n * (n + 1) // 2\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\ndef gcd(a, b):\n \"\"\"\n Determines the greatest common divisor for a and b\n with the Euclidean Algorithm.\n :param a: First number.\n :param b: Second number.\n :return: Greatest common divisor for a and b.\n \"\"\"\n a = abs(a)\n b = abs(b)\n if a == b:\n return a\n if b > a:\n a, b = b, a\n q = a // b\n r = a - b * q\n while r != 0:\n a = b\n b = r\n q = a // b\n r = a - b * q\n return b\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\ndef simplify_fraction(a, b):\n \"\"\"\n Simplifies a fraction to the lowest common form.\n :param a:\n :param b:\n :return:\n \"\"\"\n c = gcd(a, b)\n return a // c, b // c\n\n\n<function token>\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\ndef distinct(x):\n \"\"\"\n Returns a list of unique elements.\n :param x: List of elements.\n :return: List of unique elements.\n \"\"\"\n return list(set(x))\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\ndef triangle_number(n):\n \"\"\"\n Calculate the nth triangle number.\n :param n: Fn\n :return: Triangle number for n.\n \"\"\"\n return n * (n + 1) // 2\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\ndef simplify_fraction(a, b):\n \"\"\"\n Simplifies a fraction to the lowest common form.\n :param a:\n :param b:\n :return:\n \"\"\"\n c = gcd(a, b)\n return a // c, b // c\n\n\n<function token>\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\ndef distinct(x):\n \"\"\"\n Returns a list of unique elements.\n :param x: List of elements.\n :return: List of unique elements.\n \"\"\"\n return list(set(x))\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\ndef triangle_number(n):\n \"\"\"\n Calculate the nth triangle number.\n :param n: Fn\n :return: Triangle number for n.\n \"\"\"\n return n * (n + 1) // 2\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\ndef triangle_number(n):\n \"\"\"\n Calculate the nth triangle number.\n :param n: Fn\n :return: Triangle number for n.\n \"\"\"\n return n * (n + 1) // 2\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\ndef is_palindrome(x):\n \"\"\"\n Returns true if a number or a string is a palindrome.\n :param x:\n :return:\n \"\"\"\n strx = str(x)\n return strx == strx[::-1]\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n\n\ndef is_prime(n, k):\n \"\"\"\n Test if a number n is prime k-times.\n :param n: The prime number to be tested.\n :param k: The number of tests.\n :return:\n \"\"\"\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n a = random.randint(2, n - 1)\n if gcd(n, a) != 1:\n return False\n if modpow(a, n - 1, n) != 1:\n return False\n k -= 1\n return True\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\ndef is_permutation(A, B):\n \"\"\"\n Returns true if A and B are permutations of each other.\n :param A:\n :param B:\n :return:\n \"\"\"\n return set(A) == set(B)\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\ndef is_abundant_number(x):\n \"\"\"\n Test if a number is an abundant number. A number is abundant\n if the sum of the proper divisors is greater than the number\n itself.\n :param x: number to test.\n :return: True if it is an abundant number.\n \"\"\"\n return sum(proper_divisors(x)) > x\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\ndef lcm3(a, b, c):\n \"\"\"\n Calculating the LCM for multiple digits is done with\n LCM(a,b,c) = LCM(LCM(a,b),c)\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return lcm(lcm(a, b), c)\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\ndef _heap_perm_(n, a):\n \"\"\"\n Heap's permutation algorithm.\n https://stackoverflow.com/a/29044942\n :param n:\n :param a:\n :return:\n \"\"\"\n if n == 1:\n yield a\n else:\n for i in range(n - 1):\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n j = 0 if n % 2 == 1 else i\n a[j], a[n - 1] = a[n - 1], a[j]\n for hp in _heap_perm_(n - 1, a):\n yield list(hp)\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\n<function token>\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\ndef to_binary_string(x):\n \"\"\"\n Useful to convert a number into a binary number.\n :param x:\n :return:\n \"\"\"\n return '{0:b}'.format(x)\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n\n\ndef is_even(n):\n \"\"\"\n Returns true if a number is even.\n :param n:\n :return:\n \"\"\"\n return not n & 1\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\n<function token>\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n\n\ndef is_triangle_number(n):\n \"\"\"\n Tests if a number is a triangle number. Solved with the\n inverse of n(n+1)/2, and testing if that solution\n is integer.\n :param n: Number to test.\n :return: True if it is a triangle number.\n \"\"\"\n _, x = quadratic.solve(1, 1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\n<function token>\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\ndef sum_of_proper_divisors_sieve(n):\n \"\"\"\n Generates an array with the sum of the divisors\n for that index of the array. To find the sum of\n divisors for 12: sieve[12].\n :param n: Upper limit of numbers.\n :return: List with sum of divisors.\n \"\"\"\n sieve = [1] * (n + 1)\n for i in range(2, n // 2 + 1):\n for j in range(i + i, n, i):\n sieve[j] += i\n return sieve\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\n<function token>\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\n<function token>\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\ndef _first_index_with_smaller_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is smaller\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] <= P[i]:\n i -= 1\n return i\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\n<function token>\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _first_index_with_bigger_neighbour(P):\n \"\"\"\n Find the first index from the right whose element is larger\n than his neighbour.\n :param P:\n :return:\n \"\"\"\n i = len(P) - 1\n while i > 0 and P[i - 1] >= P[i]:\n i -= 1\n return i\n\n\n<function token>\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\ndef is_pentagonal_number(n):\n \"\"\"\n Determines if n is a pentagonal number.\n :param n:\n :return: True if pentagonal.\n \"\"\"\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\n<function token>\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\ndef permutations(a):\n \"\"\"\n Generates all the permutations for a set.\n :param a:\n :return:\n \"\"\"\n n = len(a)\n return _heap_perm_(n, a)\n\n\n<function token>\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n\n\ndef is_pandigital_to_n(x, n, zero_based=False):\n \"\"\"\n Returns true if a list of numbers is pandigital from 1 up to n.\n :param x:\n :param n:\n :param zero_based:\n :return:\n \"\"\"\n return set(x) == set(range(0 if zero_based else 1, n + 1))\n\n\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n\n\ndef shift(a, n=1):\n \"\"\"\n Shift all the elements in the list by n.\n :param a:\n :param n:\n :return:\n \"\"\"\n return a[n:] + a[:n]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef next_permutation(P):\n \"\"\"\n For any given permutation P, give the next permutation.\n If there is no next permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_bigger_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] <= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\ndef cumsum(L):\n \"\"\"\n Returns a list with the cumulative sum of a list L.\n :param S:\n :return:\n \"\"\"\n for i in range(1, len(L)):\n L[i] += L[i - 1]\n return L\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\ndef is_unique_string(s):\n \"\"\"\n Determines if a given string only consists of unique\n characters.\n :param s: The string to test.\n :return: True if the string only contains unique characters.\n \"\"\"\n return len(s) == len(set(s))\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\n<function token>\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\ndef union_sets(S):\n \"\"\"\n Returns the union of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = set()\n for s in S:\n res |= s\n return res\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\n<function token>\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\ndef sieve_to_list(sieve):\n \"\"\"\n Returns the sieve as a list where the index is the number\n where it was True.\n :param sieve:\n :return:\n \"\"\"\n return [i for i, v in enumerate(sieve) if v]\n\n\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\n<function token>\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\ndef prime_counting_function(n):\n \"\"\"\n Return the number of primes below a given number.\n This is calculated with the proportionality which\n states that π(n) ~ n / log(n).\n :param n: Upper bound.\n :return: Estimate of the number of primes below the\n bound.\n \"\"\"\n return n / math.log(n)\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n\n\ndef time_it(f, args=None):\n t0 = time.time()\n print('--- Timed execution for {} ----------------'.format(f.__name__))\n print('Running...')\n result = f(*args) if args is not None else f()\n print('Solution is {}'.format(result))\n t1 = time.time()\n print('Executed in {} seconds'.format(round(t1 - t0, 6)))\n\n\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\n<function token>\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\n<function token>\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\ndef palindrome_generator(lower, upper):\n \"\"\"\n Generates all palindromes between [lower, upper].\n https://stackoverflow.com/a/16344628\n :param lower:\n :param upper:\n :return:\n \"\"\"\n all_palindrome_numbers = _palindrome_number_generator()\n for p in all_palindrome_numbers:\n if p >= lower:\n break\n palindrome_list = [p]\n for p in all_palindrome_numbers:\n if p >= upper:\n break\n palindrome_list.append(p)\n return palindrome_list\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef is_number(n):\n \"\"\"\n Returns true if the number is an instance of an int.\n or a float.\n :param n: The number n to be tested.\n :return: True if n is int or float.\n \"\"\"\n return isinstance(n, (int, float))\n\n\n<function token>\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\ndef hexagonal_number(n):\n \"\"\"\n Calculate the nth hexagonal number.\n :param n: Hn\n :return: Hexagonal number\n \"\"\"\n return n * (2 * n - 1)\n\n\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n\n\ndef primitive_pythagorean_triplet_generator(n=math.inf):\n \"\"\"\n Generates n primitive pythagorean triplets.\n :param n:\n :return:\n \"\"\"\n v = 2\n u = 1\n while n > 0:\n if not (is_odd(v) and is_odd(u)) and gcd(u, v) == 1:\n a = v * v - u * u\n b = 2 * v * u\n c = u * u + v * v\n if a > b:\n a, b = b, a\n n -= 1\n yield a, b, c\n u += 1\n if u >= v:\n v += 1\n u = 1\n\n\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\ndef is_perfect_number(x):\n \"\"\"\n Test if a number is a perfect number. A number is perfect\n if the sum of the proper divisors is equal to itself.\n :param x: number to test.\n :return: True if it is a perfect number.\n \"\"\"\n return sum(proper_divisors(x)) == x\n\n\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef divisors(x):\n \"\"\"\n Returns all the divisors for a number x, including x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]\n :param x: number >= 1.\n :return: the divisors including 1 and x.\n \"\"\"\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x // i)\n return sorted(distinct(result))\n\n\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\ndef product(numbers):\n \"\"\"\n Returns the product of a list of numbers.\n :param numbers:\n :return:\n \"\"\"\n p = 1\n for x in numbers:\n p *= x\n return p\n\n\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\ndef restricted_divisors(x):\n \"\"\"\n Returns all the restricted divisors for a number x, excluding 1 and x.\n e.g divisors(1001) = [7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding 1 and itself.\n \"\"\"\n return divisors(x)[1:-1]\n\n\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n\n\ndef intersect_sets(S):\n \"\"\"\n Returns the intersection of all sets in S.\n :param S:\n :return:\n \"\"\"\n res = S[0]\n for s in S:\n res &= s\n return res\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_odd(n):\n \"\"\"\n Returns true if a number is odd.\n :param n:\n :return:\n \"\"\"\n return n & 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n\n\ndef is_permutation3(A, B, C):\n \"\"\"\n Returns true if A, B and C are permutations of each other.\n :param A:\n :param B:\n :param C:\n :return:\n \"\"\"\n return set(A) == set(B) == set(C)\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n\n\ndef proper_divisors(x):\n \"\"\"\n Returns all the proper divisors for a number x, excluding x.\n e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]\n :param x: number >= 1.\n :return: the divisors excluding itself.\n \"\"\"\n return divisors(x)[:-1]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n<function token>\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\ndef prime_counting_function_inv(y):\n \"\"\"\n Returns the upper bound for a given number of primes.\n :param y: How many primes you want.\n :return: Upper bound.\n \"\"\"\n x = 2\n while x / math.log(x) < y:\n x += 1\n return x\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n<function token>\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\ndef prime_factorization(x, sieve=None):\n \"\"\"\n Factorizes a number into the prime factorization.\n Requires a sieve to be quick, if sieve is not specified\n it will generate one itself.\n :param x:\n :param sieve:\n :return:\n \"\"\"\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)\n\n\n<function token>\n<function token>\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lambertw(x):\n \"\"\"\n Lambert W function with Newton's Method.\n :param x:\n :return:\n \"\"\"\n eps = 1e-08\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\ndef fibonacci_n_inv(x):\n \"\"\"\n Calculate the n for Fn for a Fibonacci number.\n :param x: Fibonacci number.\n :return: The position of the Fibonacci number (Fn)\n \"\"\"\n if x < 2:\n raise ValueError('Function approximation is wrong when x < 2.')\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n rad = 5 * x ** 2\n p = math.sqrt(5 * x ** 2 + 4)\n n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) if p.is_integer(\n ) else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)\n return round(n)\n\n\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\n<function token>\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef equal_sets(S):\n \"\"\"\n Returns true if all the sets s in S are equal\n to each other.\n :param S:\n :return:\n \"\"\"\n s0 = S[0]\n res = True\n for i in range(1, len(S)):\n res = res and s0 == S[i]\n return res\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n\n\ndef fibonacci_n(n):\n \"\"\"\n Calculate the nth Fibonacci number (Fn).\n :param n: which number to calculate.\n :return: The nth Fibonacci number.\n \"\"\"\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi ** n - psi ** n) // sqrt5\n\n\n<function token>\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n\n\ndef pentagonal_number(n):\n return n * (3 * n - 1) / 2\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef _palindrome_number_generator():\n \"\"\"\n https://stackoverflow.com/a/16344628\n :return:\n \"\"\"\n yield 0\n lower = 1\n while True:\n higher = lower * 10\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[-2::-1])\n for i in range(lower, higher):\n s = str(i)\n yield int(s + s[::-1])\n lower = higher\n\n\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef prime_sieve(n):\n \"\"\"\n Generates an array which determines if the index\n of the array is a prime number. To see if 997 is\n a prime number: sieve[997] == True.\n :param n: Upper limit of numbers.\n :return: List with boolean values.\n \"\"\"\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i * j < n:\n primes[i * j] = False\n return primes\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\ndef digits(x):\n \"\"\"\n Returns the digits of a number in a list.\n :param x: The number to sum the digits of.\n :return: Sum of the number x.\n \"\"\"\n return [int(d) for d in str(x)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_deficient_number(x):\n \"\"\"\n Test if a number is a deficient number. A number is deficient\n if the sum of the proper divisors is less than the number\n itself.\n :param x: number to test.\n :return: True if it is a deficient number.\n \"\"\"\n return sum(proper_divisors(x)) < x\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef triangle_number_sieve(n):\n \"\"\"\n Generates a sieve which can be used to tell if a number\n is a triangle number.\n :param n: Up to which n.\n :return: Sieve with boolean values, sieve[3] = True.\n \"\"\"\n triangle_numbers = [False] * (n + 1)\n tn = i = 1\n while tn < n:\n triangle_numbers[triangle_number(i)] = True\n i += 1\n tn = triangle_number(i)\n return triangle_numbers\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef previous_permutation(P):\n \"\"\"\n For any given permutation P, give the previous permutation.\n If there is no pervious permutation, P will be returned.\n :param P:\n :return:\n \"\"\"\n n = len(P)\n i = _first_index_with_smaller_neighbour(P)\n if i == 0:\n return P\n j = n - 1\n while P[j] >= P[i - 1]:\n j -= 1\n P[i - 1], P[j] = P[j], P[i - 1]\n j = n - 1\n while i < j:\n P[i], P[j] = P[j], P[i]\n i += 1\n j -= 1\n return P\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n \"\"\"\n Split a string of 2D data into lists. Example of the data\n 1,2\n 3,4\n 5,6\n to:\n [[1,2],[3,4],[5,6]]\n :param data:\n :param field_delimiter: delimiter used between seperate fields, default: ,\n :param line_delimiter: delimiter used between lines, default: \n\n :return: 2D list\n \"\"\"\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef lcm(a, b):\n \"\"\"\n Calculate the least common multiple (LCM) with the GCD\n algorithm using: LCM(a,b) = (a*b)/GCD(a,b).\n :param a:\n :param b:\n :return:\n \"\"\"\n return a * b // gcd(a, b)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,531 |
210e23ae130a8684c10dfaf03b4aad7487144296
|
'''ce2p_resnet101os8_lip'''
import copy
from .base_cfg import SEGMENTOR_CFG
from .._base_ import DATASET_CFG_LIP_473x473, DATALOADER_CFG_BS32
# deepcopy
SEGMENTOR_CFG = copy.deepcopy(SEGMENTOR_CFG)
# modify dataset config
SEGMENTOR_CFG['dataset'] = DATASET_CFG_LIP_473x473.copy()
# modify dataloader config
SEGMENTOR_CFG['dataloader'] = DATALOADER_CFG_BS32.copy()
# modify scheduler config
SEGMENTOR_CFG['scheduler']['max_epochs'] = 150
# modify other segmentor configs
SEGMENTOR_CFG['num_classes'] = 20
SEGMENTOR_CFG['work_dir'] = 'ce2p_resnet101os8_lip'
SEGMENTOR_CFG['logfilepath'] = 'ce2p_resnet101os8_lip/ce2p_resnet101os8_lip.log'
SEGMENTOR_CFG['resultsavepath'] = 'ce2p_resnet101os8_lip/ce2p_resnet101os8_lip_results.pkl'
|
[
"'''ce2p_resnet101os8_lip'''\nimport copy\nfrom .base_cfg import SEGMENTOR_CFG\nfrom .._base_ import DATASET_CFG_LIP_473x473, DATALOADER_CFG_BS32\n\n\n# deepcopy\nSEGMENTOR_CFG = copy.deepcopy(SEGMENTOR_CFG)\n# modify dataset config\nSEGMENTOR_CFG['dataset'] = DATASET_CFG_LIP_473x473.copy()\n# modify dataloader config\nSEGMENTOR_CFG['dataloader'] = DATALOADER_CFG_BS32.copy()\n# modify scheduler config\nSEGMENTOR_CFG['scheduler']['max_epochs'] = 150\n# modify other segmentor configs\nSEGMENTOR_CFG['num_classes'] = 20\nSEGMENTOR_CFG['work_dir'] = 'ce2p_resnet101os8_lip'\nSEGMENTOR_CFG['logfilepath'] = 'ce2p_resnet101os8_lip/ce2p_resnet101os8_lip.log'\nSEGMENTOR_CFG['resultsavepath'] = 'ce2p_resnet101os8_lip/ce2p_resnet101os8_lip_results.pkl'",
"<docstring token>\nimport copy\nfrom .base_cfg import SEGMENTOR_CFG\nfrom .._base_ import DATASET_CFG_LIP_473x473, DATALOADER_CFG_BS32\nSEGMENTOR_CFG = copy.deepcopy(SEGMENTOR_CFG)\nSEGMENTOR_CFG['dataset'] = DATASET_CFG_LIP_473x473.copy()\nSEGMENTOR_CFG['dataloader'] = DATALOADER_CFG_BS32.copy()\nSEGMENTOR_CFG['scheduler']['max_epochs'] = 150\nSEGMENTOR_CFG['num_classes'] = 20\nSEGMENTOR_CFG['work_dir'] = 'ce2p_resnet101os8_lip'\nSEGMENTOR_CFG['logfilepath'\n ] = 'ce2p_resnet101os8_lip/ce2p_resnet101os8_lip.log'\nSEGMENTOR_CFG['resultsavepath'\n ] = 'ce2p_resnet101os8_lip/ce2p_resnet101os8_lip_results.pkl'\n",
"<docstring token>\n<import token>\nSEGMENTOR_CFG = copy.deepcopy(SEGMENTOR_CFG)\nSEGMENTOR_CFG['dataset'] = DATASET_CFG_LIP_473x473.copy()\nSEGMENTOR_CFG['dataloader'] = DATALOADER_CFG_BS32.copy()\nSEGMENTOR_CFG['scheduler']['max_epochs'] = 150\nSEGMENTOR_CFG['num_classes'] = 20\nSEGMENTOR_CFG['work_dir'] = 'ce2p_resnet101os8_lip'\nSEGMENTOR_CFG['logfilepath'\n ] = 'ce2p_resnet101os8_lip/ce2p_resnet101os8_lip.log'\nSEGMENTOR_CFG['resultsavepath'\n ] = 'ce2p_resnet101os8_lip/ce2p_resnet101os8_lip_results.pkl'\n",
"<docstring token>\n<import token>\n<assignment token>\n"
] | false |
98,532 |
812c6697a6bf25c6182a3755004c8564edce2f99
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainui.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
# def __init__(self, item_mean_std):
# self.item_mean_std = item_mean_std
# self.setupUi(MainWindow)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1645, 954)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(10, 30, 112, 32))
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(130, 40, 58, 16))
self.label.setObjectName("label")
self.file_path = QtWidgets.QLineEdit(self.centralwidget)
self.file_path.setGeometry(QtCore.QRect(190, 30, 251, 31))
self.file_path.setObjectName("file_path")
self.item_mean_std = QtWidgets.QTableWidget(self.centralwidget)
self.item_mean_std.setGeometry(QtCore.QRect(20, 80, 500, 192))
self.item_mean_std.setObjectName("item_mean_std")
#head = ['Special Build Description','mean','median','std']
self.item_mean_std.setColumnCount(0)
self.item_mean_std.setRowCount(0)
#self.item_mean_std.setHorizontalHeaderLabels(head)
MainWindow.setCentralWidget(self.centralwidget)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.retranslateUi(MainWindow)
self.pushButton.clicked.connect(MainWindow.hello)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "Add CSV"))
self.label.setText(_translate("MainWindow", "File Path:"))
self.file_path.setText(_translate("MainWindow", "hi"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
# def update_item_mean_std(self):
# self.item_mean_std.setRowCount(100)
# self.item_mean_std.setColumnCount(4)
|
[
"# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'mainui.ui'\n#\n# Created by: PyQt5 UI code generator 5.13.2\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n # def __init__(self, item_mean_std):\n # self.item_mean_std = item_mean_std\n # self.setupUi(MainWindow)\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1645, 954)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(10, 30, 112, 32))\n self.pushButton.setObjectName(\"pushButton\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(130, 40, 58, 16))\n self.label.setObjectName(\"label\")\n self.file_path = QtWidgets.QLineEdit(self.centralwidget)\n self.file_path.setGeometry(QtCore.QRect(190, 30, 251, 31))\n self.file_path.setObjectName(\"file_path\")\n self.item_mean_std = QtWidgets.QTableWidget(self.centralwidget)\n self.item_mean_std.setGeometry(QtCore.QRect(20, 80, 500, 192))\n self.item_mean_std.setObjectName(\"item_mean_std\")\n \n #head = ['Special Build Description','mean','median','std']\n self.item_mean_std.setColumnCount(0)\n self.item_mean_std.setRowCount(0)\n #self.item_mean_std.setHorizontalHeaderLabels(head)\n \n MainWindow.setCentralWidget(self.centralwidget)\n self.toolBar = QtWidgets.QToolBar(MainWindow)\n self.toolBar.setObjectName(\"toolBar\")\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)\n\n self.retranslateUi(MainWindow)\n self.pushButton.clicked.connect(MainWindow.hello)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"Add CSV\"))\n self.label.setText(_translate(\"MainWindow\", \"File Path:\"))\n self.file_path.setText(_translate(\"MainWindow\", \"hi\"))\n self.toolBar.setWindowTitle(_translate(\"MainWindow\", \"toolBar\"))\n\n # def update_item_mean_std(self):\n # self.item_mean_std.setRowCount(100)\n # self.item_mean_std.setColumnCount(4)\n ",
"from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(1645, 954)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(10, 30, 112, 32))\n self.pushButton.setObjectName('pushButton')\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(130, 40, 58, 16))\n self.label.setObjectName('label')\n self.file_path = QtWidgets.QLineEdit(self.centralwidget)\n self.file_path.setGeometry(QtCore.QRect(190, 30, 251, 31))\n self.file_path.setObjectName('file_path')\n self.item_mean_std = QtWidgets.QTableWidget(self.centralwidget)\n self.item_mean_std.setGeometry(QtCore.QRect(20, 80, 500, 192))\n self.item_mean_std.setObjectName('item_mean_std')\n self.item_mean_std.setColumnCount(0)\n self.item_mean_std.setRowCount(0)\n MainWindow.setCentralWidget(self.centralwidget)\n self.toolBar = QtWidgets.QToolBar(MainWindow)\n self.toolBar.setObjectName('toolBar')\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)\n self.retranslateUi(MainWindow)\n self.pushButton.clicked.connect(MainWindow.hello)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))\n self.pushButton.setText(_translate('MainWindow', 'Add CSV'))\n self.label.setText(_translate('MainWindow', 'File Path:'))\n self.file_path.setText(_translate('MainWindow', 'hi'))\n self.toolBar.setWindowTitle(_translate('MainWindow', 'toolBar'))\n",
"<import token>\n\n\nclass Ui_MainWindow(object):\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(1645, 954)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(10, 30, 112, 32))\n self.pushButton.setObjectName('pushButton')\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(130, 40, 58, 16))\n self.label.setObjectName('label')\n self.file_path = QtWidgets.QLineEdit(self.centralwidget)\n self.file_path.setGeometry(QtCore.QRect(190, 30, 251, 31))\n self.file_path.setObjectName('file_path')\n self.item_mean_std = QtWidgets.QTableWidget(self.centralwidget)\n self.item_mean_std.setGeometry(QtCore.QRect(20, 80, 500, 192))\n self.item_mean_std.setObjectName('item_mean_std')\n self.item_mean_std.setColumnCount(0)\n self.item_mean_std.setRowCount(0)\n MainWindow.setCentralWidget(self.centralwidget)\n self.toolBar = QtWidgets.QToolBar(MainWindow)\n self.toolBar.setObjectName('toolBar')\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)\n self.retranslateUi(MainWindow)\n self.pushButton.clicked.connect(MainWindow.hello)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))\n self.pushButton.setText(_translate('MainWindow', 'Add CSV'))\n self.label.setText(_translate('MainWindow', 'File Path:'))\n self.file_path.setText(_translate('MainWindow', 'hi'))\n self.toolBar.setWindowTitle(_translate('MainWindow', 'toolBar'))\n",
"<import token>\n\n\nclass Ui_MainWindow(object):\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(1645, 954)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(10, 30, 112, 32))\n self.pushButton.setObjectName('pushButton')\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(130, 40, 58, 16))\n self.label.setObjectName('label')\n self.file_path = QtWidgets.QLineEdit(self.centralwidget)\n self.file_path.setGeometry(QtCore.QRect(190, 30, 251, 31))\n self.file_path.setObjectName('file_path')\n self.item_mean_std = QtWidgets.QTableWidget(self.centralwidget)\n self.item_mean_std.setGeometry(QtCore.QRect(20, 80, 500, 192))\n self.item_mean_std.setObjectName('item_mean_std')\n self.item_mean_std.setColumnCount(0)\n self.item_mean_std.setRowCount(0)\n MainWindow.setCentralWidget(self.centralwidget)\n self.toolBar = QtWidgets.QToolBar(MainWindow)\n self.toolBar.setObjectName('toolBar')\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)\n self.retranslateUi(MainWindow)\n self.pushButton.clicked.connect(MainWindow.hello)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n <function token>\n",
"<import token>\n\n\nclass Ui_MainWindow(object):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,533 |
3ad6d48fcfe23a224bdfdb99e3caab576164cf64
|
import sys
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
def query(query_string,N):
query_words = query_string.lower().split(" ")
total_words = len(query_words)
query_df = spark.createDataFrame(query_words,StringType())
query_df = query_df.groupBy("value").count().select(col("value").alias("word"),col("count").alias("tf"))
query_idf = query_df.join(broadcast(tfidf), tfidf.word == query_df.word,'left').select(tfidf.file,query_df.word,query_df.tf,tfidf.idf,tfidf.tf_idf)
results = query_idf.groupBy("file").agg((sum("tf_idf")*(count("word")/total_words)).alias("score")).orderBy(desc("score"))
results.show(N)
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("QueryProgram")\
.getOrCreate()
tfidf = spark.read.orc('rugby.orc')
tfidf.persist()
results = query("Yachvili slotted over over four penalties",10)
spark.stop()
|
[
"import sys\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import *\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql import SparkSession\n\ndef query(query_string,N):\n query_words = query_string.lower().split(\" \")\n total_words = len(query_words)\n query_df = spark.createDataFrame(query_words,StringType())\n query_df = query_df.groupBy(\"value\").count().select(col(\"value\").alias(\"word\"),col(\"count\").alias(\"tf\"))\n query_idf = query_df.join(broadcast(tfidf), tfidf.word == query_df.word,'left').select(tfidf.file,query_df.word,query_df.tf,tfidf.idf,tfidf.tf_idf)\n results = query_idf.groupBy(\"file\").agg((sum(\"tf_idf\")*(count(\"word\")/total_words)).alias(\"score\")).orderBy(desc(\"score\"))\n results.show(N)\n\n\nif __name__ == \"__main__\":\n\nspark = SparkSession\\\n .builder\\\n .appName(\"QueryProgram\")\\\n .getOrCreate()\n\ntfidf = spark.read.orc('rugby.orc')\ntfidf.persist()\nresults = query(\"Yachvili slotted over over four penalties\",10)\nspark.stop()"
] | true |
98,534 |
91b20997d50edf6eaa87b75fc23f7cfb7aae41b6
|
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from ..database import dbBaseClass, dbEngine, dbBaseClass
class horse(dbBaseClass):
__tablename__ = "horses"
horse_id = Column(Integer, primary_key=True, index=True)
horse_name = Column(String, unique=True, index=True)
class horseFull(horse):
coat = Column(Integer)
current_owner = Column(Integer)
|
[
"from sqlalchemy import Boolean, Column, ForeignKey, Integer, String\n\nfrom ..database import dbBaseClass, dbEngine, dbBaseClass\n\n\nclass horse(dbBaseClass):\n __tablename__ = \"horses\"\n horse_id = Column(Integer, primary_key=True, index=True)\n horse_name = Column(String, unique=True, index=True)\n\nclass horseFull(horse):\n coat = Column(Integer)\n current_owner = Column(Integer)",
"from sqlalchemy import Boolean, Column, ForeignKey, Integer, String\nfrom ..database import dbBaseClass, dbEngine, dbBaseClass\n\n\nclass horse(dbBaseClass):\n __tablename__ = 'horses'\n horse_id = Column(Integer, primary_key=True, index=True)\n horse_name = Column(String, unique=True, index=True)\n\n\nclass horseFull(horse):\n coat = Column(Integer)\n current_owner = Column(Integer)\n",
"<import token>\n\n\nclass horse(dbBaseClass):\n __tablename__ = 'horses'\n horse_id = Column(Integer, primary_key=True, index=True)\n horse_name = Column(String, unique=True, index=True)\n\n\nclass horseFull(horse):\n coat = Column(Integer)\n current_owner = Column(Integer)\n",
"<import token>\n\n\nclass horse(dbBaseClass):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass horseFull(horse):\n coat = Column(Integer)\n current_owner = Column(Integer)\n",
"<import token>\n<class token>\n\n\nclass horseFull(horse):\n coat = Column(Integer)\n current_owner = Column(Integer)\n",
"<import token>\n<class token>\n\n\nclass horseFull(horse):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
98,535 |
64dd646184fc5037d794a261490124df6f8d18bd
|
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
# 水増しのための関数を定義
def scratch_image(img, flip=True, thr=True, filt=True, resize=True, erode=True):
#methods = [flip, thr, filt, resize, erode]
img_size = img.shape
filter1 = np.ones((3,3))
images = []
if flip == True:
images.append(cv2.flip(img, 1))
if thr == True:
images.append(cv2.threshold(img, 100, 255, cv2.THRESH_TOZERO)[1])
if filt == True:
images.append(cv2.GaussianBlur(img, (5,5), 0))
if resize == True:
images.append(cv2.resize(cv2.resize(img, (img_size[1] // 5, img_size[0] //5)), (img_size[1], img_size[0])))
if erode == True:
images.append(cv2.erode(img, filter1))
return images
# 読み込み画像パス
datadir = "./images/"
categories = ["Yorkshire_terrier"]
#categories = ["French_bulldog", "Chihuahua", "Golden_retriever", "Maltese_dog", "Miniature_Dachshund", "Saint_Bernard", "Shiba", "Shih_Tzu", "Toypoodle", "Yorkshire_terrier"]
dog_imgs = []
for category in categories:
path = os.path.join(datadir, category)
#隠しファイルは読まないため
data_list = [data for data in os.listdir(path) if not data.startswith('.')]
for data in data_list:
data = path + "/" + data
dog_imgs.append(data)
# 画像の水増し
scratch_images = []
for dog_img in dog_imgs:
img = cv2.imread(dog_img)
scratch_images += scratch_image(img)
# 画像を保存するフォルダーを作成
if not os.path.exists("Yorkshire_terrier"):
os.mkdir("Yorkshire_terrier")
for num, im in enumerate(scratch_images):
# まず保存先のディレクトリ"scratch_images/"を指定、番号を付けて保存
cv2.imwrite("Yorkshire_terrier/" + str(1000 + num) + ".jpg", im)
|
[
"import os\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n# 水増しのための関数を定義\ndef scratch_image(img, flip=True, thr=True, filt=True, resize=True, erode=True):\n\t#methods = [flip, thr, filt, resize, erode]\n\timg_size = img.shape\n\tfilter1 = np.ones((3,3))\n\n\timages = []\n\n\tif flip == True:\n\t\timages.append(cv2.flip(img, 1))\n\tif thr == True:\n\t\timages.append(cv2.threshold(img, 100, 255, cv2.THRESH_TOZERO)[1])\n\tif filt == True:\n\t\timages.append(cv2.GaussianBlur(img, (5,5), 0))\n\tif resize == True:\n\t\timages.append(cv2.resize(cv2.resize(img, (img_size[1] // 5, img_size[0] //5)), (img_size[1], img_size[0])))\n\tif erode == True:\n\t\timages.append(cv2.erode(img, filter1))\n\n\treturn images\n\n# 読み込み画像パス\ndatadir = \"./images/\"\ncategories = [\"Yorkshire_terrier\"]\n#categories = [\"French_bulldog\", \"Chihuahua\", \"Golden_retriever\", \"Maltese_dog\", \"Miniature_Dachshund\", \"Saint_Bernard\", \"Shiba\", \"Shih_Tzu\", \"Toypoodle\", \"Yorkshire_terrier\"]\n\ndog_imgs = []\nfor category in categories:\n\tpath = os.path.join(datadir, category)\n\t#隠しファイルは読まないため\n\tdata_list = [data for data in os.listdir(path) if not data.startswith('.')]\n\tfor data in data_list:\n\t\tdata = path + \"/\" + data\n\t\tdog_imgs.append(data)\n\n# 画像の水増し\nscratch_images = []\nfor dog_img in dog_imgs:\n\timg = cv2.imread(dog_img)\n\tscratch_images += scratch_image(img)\n\n# 画像を保存するフォルダーを作成\nif not os.path.exists(\"Yorkshire_terrier\"):\n\tos.mkdir(\"Yorkshire_terrier\")\nfor num, im in enumerate(scratch_images):\n\t# まず保存先のディレクトリ\"scratch_images/\"を指定、番号を付けて保存\n\tcv2.imwrite(\"Yorkshire_terrier/\" + str(1000 + num) + \".jpg\", im)\n",
"import os\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\ndef scratch_image(img, flip=True, thr=True, filt=True, resize=True, erode=True\n ):\n img_size = img.shape\n filter1 = np.ones((3, 3))\n images = []\n if flip == True:\n images.append(cv2.flip(img, 1))\n if thr == True:\n images.append(cv2.threshold(img, 100, 255, cv2.THRESH_TOZERO)[1])\n if filt == True:\n images.append(cv2.GaussianBlur(img, (5, 5), 0))\n if resize == True:\n images.append(cv2.resize(cv2.resize(img, (img_size[1] // 5, \n img_size[0] // 5)), (img_size[1], img_size[0])))\n if erode == True:\n images.append(cv2.erode(img, filter1))\n return images\n\n\ndatadir = './images/'\ncategories = ['Yorkshire_terrier']\ndog_imgs = []\nfor category in categories:\n path = os.path.join(datadir, category)\n data_list = [data for data in os.listdir(path) if not data.startswith('.')]\n for data in data_list:\n data = path + '/' + data\n dog_imgs.append(data)\nscratch_images = []\nfor dog_img in dog_imgs:\n img = cv2.imread(dog_img)\n scratch_images += scratch_image(img)\nif not os.path.exists('Yorkshire_terrier'):\n os.mkdir('Yorkshire_terrier')\nfor num, im in enumerate(scratch_images):\n cv2.imwrite('Yorkshire_terrier/' + str(1000 + num) + '.jpg', im)\n",
"<import token>\n\n\ndef scratch_image(img, flip=True, thr=True, filt=True, resize=True, erode=True\n ):\n img_size = img.shape\n filter1 = np.ones((3, 3))\n images = []\n if flip == True:\n images.append(cv2.flip(img, 1))\n if thr == True:\n images.append(cv2.threshold(img, 100, 255, cv2.THRESH_TOZERO)[1])\n if filt == True:\n images.append(cv2.GaussianBlur(img, (5, 5), 0))\n if resize == True:\n images.append(cv2.resize(cv2.resize(img, (img_size[1] // 5, \n img_size[0] // 5)), (img_size[1], img_size[0])))\n if erode == True:\n images.append(cv2.erode(img, filter1))\n return images\n\n\ndatadir = './images/'\ncategories = ['Yorkshire_terrier']\ndog_imgs = []\nfor category in categories:\n path = os.path.join(datadir, category)\n data_list = [data for data in os.listdir(path) if not data.startswith('.')]\n for data in data_list:\n data = path + '/' + data\n dog_imgs.append(data)\nscratch_images = []\nfor dog_img in dog_imgs:\n img = cv2.imread(dog_img)\n scratch_images += scratch_image(img)\nif not os.path.exists('Yorkshire_terrier'):\n os.mkdir('Yorkshire_terrier')\nfor num, im in enumerate(scratch_images):\n cv2.imwrite('Yorkshire_terrier/' + str(1000 + num) + '.jpg', im)\n",
"<import token>\n\n\ndef scratch_image(img, flip=True, thr=True, filt=True, resize=True, erode=True\n ):\n img_size = img.shape\n filter1 = np.ones((3, 3))\n images = []\n if flip == True:\n images.append(cv2.flip(img, 1))\n if thr == True:\n images.append(cv2.threshold(img, 100, 255, cv2.THRESH_TOZERO)[1])\n if filt == True:\n images.append(cv2.GaussianBlur(img, (5, 5), 0))\n if resize == True:\n images.append(cv2.resize(cv2.resize(img, (img_size[1] // 5, \n img_size[0] // 5)), (img_size[1], img_size[0])))\n if erode == True:\n images.append(cv2.erode(img, filter1))\n return images\n\n\n<assignment token>\nfor category in categories:\n path = os.path.join(datadir, category)\n data_list = [data for data in os.listdir(path) if not data.startswith('.')]\n for data in data_list:\n data = path + '/' + data\n dog_imgs.append(data)\n<assignment token>\nfor dog_img in dog_imgs:\n img = cv2.imread(dog_img)\n scratch_images += scratch_image(img)\nif not os.path.exists('Yorkshire_terrier'):\n os.mkdir('Yorkshire_terrier')\nfor num, im in enumerate(scratch_images):\n cv2.imwrite('Yorkshire_terrier/' + str(1000 + num) + '.jpg', im)\n",
"<import token>\n\n\ndef scratch_image(img, flip=True, thr=True, filt=True, resize=True, erode=True\n ):\n img_size = img.shape\n filter1 = np.ones((3, 3))\n images = []\n if flip == True:\n images.append(cv2.flip(img, 1))\n if thr == True:\n images.append(cv2.threshold(img, 100, 255, cv2.THRESH_TOZERO)[1])\n if filt == True:\n images.append(cv2.GaussianBlur(img, (5, 5), 0))\n if resize == True:\n images.append(cv2.resize(cv2.resize(img, (img_size[1] // 5, \n img_size[0] // 5)), (img_size[1], img_size[0])))\n if erode == True:\n images.append(cv2.erode(img, filter1))\n return images\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,536 |
252d62600560f3604b319a574197494027fec6c3
|
import math
import matrix
class SRM:
def __init__(self,arg):
self.am = matrix.Matrix(arg, "Initial matrix")
self.um = matrix.Matrix(arg, "Umatrix")
self.ut = matrix.Matrix([], "Lmatrix")
self.dm = matrix.Matrix([],"Dmatrix")
pass
|
[
"import math\nimport matrix\n\nclass SRM:\n def __init__(self,arg):\n self.am = matrix.Matrix(arg, \"Initial matrix\")\n self.um = matrix.Matrix(arg, \"Umatrix\")\n self.ut = matrix.Matrix([], \"Lmatrix\")\n self.dm = matrix.Matrix([],\"Dmatrix\")\n pass\n",
"import math\nimport matrix\n\n\nclass SRM:\n\n def __init__(self, arg):\n self.am = matrix.Matrix(arg, 'Initial matrix')\n self.um = matrix.Matrix(arg, 'Umatrix')\n self.ut = matrix.Matrix([], 'Lmatrix')\n self.dm = matrix.Matrix([], 'Dmatrix')\n pass\n",
"<import token>\n\n\nclass SRM:\n\n def __init__(self, arg):\n self.am = matrix.Matrix(arg, 'Initial matrix')\n self.um = matrix.Matrix(arg, 'Umatrix')\n self.ut = matrix.Matrix([], 'Lmatrix')\n self.dm = matrix.Matrix([], 'Dmatrix')\n pass\n",
"<import token>\n\n\nclass SRM:\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,537 |
e3eb89b7d14f3133e2f424db7f0bf863738e557f
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import pytest
from intake_xarray.image import _coerce_shape, ImageSource
here = os.path.dirname(__file__)
@pytest.mark.parametrize('im', [
[[1, 2],
[3, 4]],
[[1, 2, 7],
[3, 4, 6]],
[[1, 2, 7],
[3, 4, 6],
[5, 6, 8]],
[[1, 2],
[3, 4],
[5, 6],
[7, 8]],
])
def test_coerce_shape_2d_trim_only(im):
shape = (2, 2)
array = np.array(im)
expected = np.array([[1, 2],
[3, 4]])
actual = _coerce_shape(array, shape)
assert (expected == actual).all()
def test_coerce_shape_2d_pad_only():
shape = (3, 4)
array = np.array([[1, 2],
[3, 4]])
expected = np.array([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 0, 0]])
actual = _coerce_shape(array, shape)
assert (expected == actual).all()
def test_coerce_shape_2d_pad_nrows_and_trim_ncols():
shape = (4, 2)
array = np.array([[1, 2, 7],
[3, 4, 6]])
expected = np.array([[1, 2],
[3, 4],
[0, 0],
[0, 0]])
actual = _coerce_shape(array, shape)
assert (expected == actual).all()
def test_coerce_shape_2d_pad_ncols_and_trim_nrows():
shape = (2, 4)
array = np.array([[1, 2],
[3, 4],
[5, 6],
[7, 8]])
expected = np.array([[1, 2, 0, 0],
[3, 4, 0, 0]])
actual = _coerce_shape(array, shape)
assert (expected == actual).all()
def test_coerce_shape_3d_no_change():
shape = (3, 3)
array = np.arange(3**3).reshape(3, 3, 3)
actual = _coerce_shape(array, shape)
assert (array == actual).all()
def test_coerce_shape_3d_pad_nrows_and_trim_ncols():
shape = (5, 2)
array = np.arange(2*4*3).reshape(2, 4, 3)
expected = np.array([[[0, 1, 2],
[3, 4, 5]],
[[12, 13, 14],
[15, 16, 17]],
[[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0]]])
actual = _coerce_shape(array, shape)
assert (expected == actual).all()
def test_coerce_shape_3d_pad_ncols_and_trim_nrows():
shape = (2, 5)
array = np.arange(3*2*4).reshape(3, 2, 4)
expected = np.array([[[0, 1, 2, 3],
[4, 5, 6, 7],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[8, 9, 10, 11],
[12, 13, 14, 15],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]])
actual = _coerce_shape(array, shape)
assert (expected == actual).all()
def test_coerce_shape_raises_error_if_shape_not_len_2():
shape = (2, 3, 3)
array = np.arange(3**3).reshape(3, 3, 3)
with pytest.raises(ValueError,
match='coerce_shape must be an iterable of len 2'):
_coerce_shape(array, shape)
def test_coerce_shape_array_non_int():
shape = (2, 3)
array = np.random.random((3, 2))
expected = np.append(array[:2, :], [[0], [0]], axis=1)
actual = _coerce_shape(array, shape)
assert (expected == actual).all()
assert expected.dtype == "float"
def test_read_image():
pytest.importorskip('skimage')
urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')
source = ImageSource(urlpath=urlpath)
array = source.read()
assert array.shape == (256, 252, 3)
assert array.dtype == np.uint8
def test_read_image_and_exif():
pytest.importorskip('skimage')
urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')
source = ImageSource(urlpath=urlpath, exif_tags=True)
ds = source.read()
assert ds['raster'].shape == (256, 252, 3)
assert ds['raster'].dtype == np.uint8
assert ds['EXIF Image ImageWidth'].item().values == [252]
assert ds['EXIF Image ImageLength'].item().values == [256]
def test_read_image_and_given_exif_tag():
pytest.importorskip('skimage')
urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')
source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])
ds = source.read()
assert ds['raster'].shape == (256, 252, 3)
assert ds['raster'].dtype == np.uint8
assert ds['EXIF Image ImageWidth'].item().values == [252]
with pytest.raises(KeyError):
ds['EXIF Image ImageLength']
def test_read_images_as_glob_without_coerce_raises_error():
pytest.importorskip('skimage')
urlpath = os.path.join(here, 'data', 'images', '*')
source = ImageSource(urlpath=urlpath)
with pytest.raises(ValueError,
match='could not broadcast input array'):
source.read()
def test_read_images_as_glob_with_coerce():
pytest.importorskip('skimage')
urlpath = os.path.join(here, 'data', 'images', '*')
source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))
array = source.read()
assert array.shape == (3, 256, 256, 3)
def test_read_images_and_exif_as_glob_with_coerce():
pytest.importorskip('skimage')
urlpath = os.path.join(here, 'data', 'images', '*')
source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256), exif_tags=True)
ds = source.read()
assert ds['raster'].shape == (3, 256, 256, 3)
assert ds['EXIF Image ImageWidth'].shape == (3,)
def test_read_images_and_persist():
pytest.importorskip('skimage')
urlpath = os.path.join(here, 'data', 'images', '*')
source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))
import tempfile
exported = tempfile.mkdtemp()
source.export(exported)
import xarray as xr
assert xr.open_dataset(exported, engine="zarr")
|
[
"# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport pytest\n\nfrom intake_xarray.image import _coerce_shape, ImageSource\n\nhere = os.path.dirname(__file__)\n\n\[email protected]('im', [\n [[1, 2],\n [3, 4]],\n [[1, 2, 7],\n [3, 4, 6]],\n [[1, 2, 7],\n [3, 4, 6],\n [5, 6, 8]],\n [[1, 2],\n [3, 4],\n [5, 6],\n [7, 8]],\n])\ndef test_coerce_shape_2d_trim_only(im):\n shape = (2, 2)\n array = np.array(im)\n expected = np.array([[1, 2],\n [3, 4]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = (3, 4)\n array = np.array([[1, 2],\n [3, 4]])\n expected = np.array([[1, 2, 0, 0],\n [3, 4, 0, 0],\n [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_nrows_and_trim_ncols():\n shape = (4, 2)\n array = np.array([[1, 2, 7],\n [3, 4, 6]])\n expected = np.array([[1, 2],\n [3, 4],\n [0, 0],\n [0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = (2, 4)\n array = np.array([[1, 2],\n [3, 4],\n [5, 6],\n [7, 8]])\n expected = np.array([[1, 2, 0, 0],\n [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = (3, 3)\n array = np.arange(3**3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = (5, 2)\n array = np.arange(2*4*3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2],\n [3, 4, 5]],\n\n [[12, 13, 14],\n [15, 16, 17]],\n\n [[0, 0, 0],\n [0, 0, 0]],\n\n [[0, 0, 0],\n [0, 0, 0]],\n\n [[0, 0, 0],\n [0, 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_pad_ncols_and_trim_nrows():\n shape = (2, 5)\n array = np.arange(3*2*4).reshape(3, 2, 4)\n expected = np.array([[[0, 1, 2, 3],\n [4, 5, 6, 7],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n\n [[8, 9, 10, 11],\n [12, 13, 14, 15],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_raises_error_if_shape_not_len_2():\n shape = (2, 3, 3)\n array = np.arange(3**3).reshape(3, 3, 3)\n with pytest.raises(ValueError,\n match='coerce_shape must be an iterable of len 2'):\n _coerce_shape(array, shape)\n\n\ndef test_coerce_shape_array_non_int():\n shape = (2, 3)\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == \"float\"\n\n\ndef test_read_image():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath)\n array = source.read()\n assert array.shape == (256, 252, 3)\n assert array.dtype == np.uint8\n\n\ndef test_read_image_and_exif():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n assert ds['EXIF Image ImageLength'].item().values == [256]\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError,\n match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\ndef test_read_images_and_exif_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256), exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (3, 256, 256, 3)\n assert ds['EXIF Image ImageWidth'].shape == (3,)\n\n\ndef test_read_images_and_persist():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n import tempfile\n exported = tempfile.mkdtemp()\n source.export(exported)\n import xarray as xr\n assert xr.open_dataset(exported, engine=\"zarr\")\n",
"import os\nimport numpy as np\nimport pytest\nfrom intake_xarray.image import _coerce_shape, ImageSource\nhere = os.path.dirname(__file__)\n\n\[email protected]('im', [[[1, 2], [3, 4]], [[1, 2, 7], [3, 4, 6]], [\n [1, 2, 7], [3, 4, 6], [5, 6, 8]], [[1, 2], [3, 4], [5, 6], [7, 8]]])\ndef test_coerce_shape_2d_trim_only(im):\n shape = 2, 2\n array = np.array(im)\n expected = np.array([[1, 2], [3, 4]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_nrows_and_trim_ncols():\n shape = 4, 2\n array = np.array([[1, 2, 7], [3, 4, 6]])\n expected = np.array([[1, 2], [3, 4], [0, 0], [0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = 5, 2\n array = np.arange(2 * 4 * 3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17\n ]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0,\n 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_pad_ncols_and_trim_nrows():\n shape = 2, 5\n array = np.arange(3 * 2 * 4).reshape(3, 2, 4)\n expected = np.array([[[0, 1, 2, 3], [4, 5, 6, 7], [0, 0, 0, 0], [0, 0, \n 0, 0], [0, 0, 0, 0]], [[8, 9, 10, 11], [12, 13, 14, 15], [0, 0, 0, \n 0], [0, 0, 0, 0], [0, 0, 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_raises_error_if_shape_not_len_2():\n shape = 2, 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n with pytest.raises(ValueError, match=\n 'coerce_shape must be an iterable of len 2'):\n _coerce_shape(array, shape)\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\ndef test_read_image():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath)\n array = source.read()\n assert array.shape == (256, 252, 3)\n assert array.dtype == np.uint8\n\n\ndef test_read_image_and_exif():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n assert ds['EXIF Image ImageLength'].item().values == [256]\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\ndef test_read_images_and_exif_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256),\n exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (3, 256, 256, 3)\n assert ds['EXIF Image ImageWidth'].shape == (3,)\n\n\ndef test_read_images_and_persist():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n import tempfile\n exported = tempfile.mkdtemp()\n source.export(exported)\n import xarray as xr\n assert xr.open_dataset(exported, engine='zarr')\n",
"<import token>\nhere = os.path.dirname(__file__)\n\n\[email protected]('im', [[[1, 2], [3, 4]], [[1, 2, 7], [3, 4, 6]], [\n [1, 2, 7], [3, 4, 6], [5, 6, 8]], [[1, 2], [3, 4], [5, 6], [7, 8]]])\ndef test_coerce_shape_2d_trim_only(im):\n shape = 2, 2\n array = np.array(im)\n expected = np.array([[1, 2], [3, 4]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_nrows_and_trim_ncols():\n shape = 4, 2\n array = np.array([[1, 2, 7], [3, 4, 6]])\n expected = np.array([[1, 2], [3, 4], [0, 0], [0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = 5, 2\n array = np.arange(2 * 4 * 3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17\n ]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0,\n 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_pad_ncols_and_trim_nrows():\n shape = 2, 5\n array = np.arange(3 * 2 * 4).reshape(3, 2, 4)\n expected = np.array([[[0, 1, 2, 3], [4, 5, 6, 7], [0, 0, 0, 0], [0, 0, \n 0, 0], [0, 0, 0, 0]], [[8, 9, 10, 11], [12, 13, 14, 15], [0, 0, 0, \n 0], [0, 0, 0, 0], [0, 0, 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_raises_error_if_shape_not_len_2():\n shape = 2, 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n with pytest.raises(ValueError, match=\n 'coerce_shape must be an iterable of len 2'):\n _coerce_shape(array, shape)\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\ndef test_read_image():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath)\n array = source.read()\n assert array.shape == (256, 252, 3)\n assert array.dtype == np.uint8\n\n\ndef test_read_image_and_exif():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n assert ds['EXIF Image ImageLength'].item().values == [256]\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\ndef test_read_images_and_exif_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256),\n exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (3, 256, 256, 3)\n assert ds['EXIF Image ImageWidth'].shape == (3,)\n\n\ndef test_read_images_and_persist():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n import tempfile\n exported = tempfile.mkdtemp()\n source.export(exported)\n import xarray as xr\n assert xr.open_dataset(exported, engine='zarr')\n",
"<import token>\n<assignment token>\n\n\[email protected]('im', [[[1, 2], [3, 4]], [[1, 2, 7], [3, 4, 6]], [\n [1, 2, 7], [3, 4, 6], [5, 6, 8]], [[1, 2], [3, 4], [5, 6], [7, 8]]])\ndef test_coerce_shape_2d_trim_only(im):\n shape = 2, 2\n array = np.array(im)\n expected = np.array([[1, 2], [3, 4]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_nrows_and_trim_ncols():\n shape = 4, 2\n array = np.array([[1, 2, 7], [3, 4, 6]])\n expected = np.array([[1, 2], [3, 4], [0, 0], [0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = 5, 2\n array = np.arange(2 * 4 * 3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17\n ]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0,\n 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_pad_ncols_and_trim_nrows():\n shape = 2, 5\n array = np.arange(3 * 2 * 4).reshape(3, 2, 4)\n expected = np.array([[[0, 1, 2, 3], [4, 5, 6, 7], [0, 0, 0, 0], [0, 0, \n 0, 0], [0, 0, 0, 0]], [[8, 9, 10, 11], [12, 13, 14, 15], [0, 0, 0, \n 0], [0, 0, 0, 0], [0, 0, 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_raises_error_if_shape_not_len_2():\n shape = 2, 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n with pytest.raises(ValueError, match=\n 'coerce_shape must be an iterable of len 2'):\n _coerce_shape(array, shape)\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\ndef test_read_image():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath)\n array = source.read()\n assert array.shape == (256, 252, 3)\n assert array.dtype == np.uint8\n\n\ndef test_read_image_and_exif():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n assert ds['EXIF Image ImageLength'].item().values == [256]\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\ndef test_read_images_and_exif_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256),\n exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (3, 256, 256, 3)\n assert ds['EXIF Image ImageWidth'].shape == (3,)\n\n\ndef test_read_images_and_persist():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n import tempfile\n exported = tempfile.mkdtemp()\n source.export(exported)\n import xarray as xr\n assert xr.open_dataset(exported, engine='zarr')\n",
"<import token>\n<assignment token>\n\n\[email protected]('im', [[[1, 2], [3, 4]], [[1, 2, 7], [3, 4, 6]], [\n [1, 2, 7], [3, 4, 6], [5, 6, 8]], [[1, 2], [3, 4], [5, 6], [7, 8]]])\ndef test_coerce_shape_2d_trim_only(im):\n shape = 2, 2\n array = np.array(im)\n expected = np.array([[1, 2], [3, 4]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_nrows_and_trim_ncols():\n shape = 4, 2\n array = np.array([[1, 2, 7], [3, 4, 6]])\n expected = np.array([[1, 2], [3, 4], [0, 0], [0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = 5, 2\n array = np.arange(2 * 4 * 3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17\n ]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0,\n 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n\n\ndef test_coerce_shape_raises_error_if_shape_not_len_2():\n shape = 2, 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n with pytest.raises(ValueError, match=\n 'coerce_shape must be an iterable of len 2'):\n _coerce_shape(array, shape)\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\ndef test_read_image():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath)\n array = source.read()\n assert array.shape == (256, 252, 3)\n assert array.dtype == np.uint8\n\n\ndef test_read_image_and_exif():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n assert ds['EXIF Image ImageLength'].item().values == [256]\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\ndef test_read_images_and_exif_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256),\n exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (3, 256, 256, 3)\n assert ds['EXIF Image ImageWidth'].shape == (3,)\n\n\ndef test_read_images_and_persist():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n import tempfile\n exported = tempfile.mkdtemp()\n source.export(exported)\n import xarray as xr\n assert xr.open_dataset(exported, engine='zarr')\n",
"<import token>\n<assignment token>\n\n\[email protected]('im', [[[1, 2], [3, 4]], [[1, 2, 7], [3, 4, 6]], [\n [1, 2, 7], [3, 4, 6], [5, 6, 8]], [[1, 2], [3, 4], [5, 6], [7, 8]]])\ndef test_coerce_shape_2d_trim_only(im):\n shape = 2, 2\n array = np.array(im)\n expected = np.array([[1, 2], [3, 4]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_nrows_and_trim_ncols():\n shape = 4, 2\n array = np.array([[1, 2, 7], [3, 4, 6]])\n expected = np.array([[1, 2], [3, 4], [0, 0], [0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = 5, 2\n array = np.arange(2 * 4 * 3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17\n ]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0,\n 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\ndef test_read_image():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath)\n array = source.read()\n assert array.shape == (256, 252, 3)\n assert array.dtype == np.uint8\n\n\ndef test_read_image_and_exif():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n assert ds['EXIF Image ImageLength'].item().values == [256]\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\ndef test_read_images_and_exif_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256),\n exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (3, 256, 256, 3)\n assert ds['EXIF Image ImageWidth'].shape == (3,)\n\n\ndef test_read_images_and_persist():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n import tempfile\n exported = tempfile.mkdtemp()\n source.export(exported)\n import xarray as xr\n assert xr.open_dataset(exported, engine='zarr')\n",
"<import token>\n<assignment token>\n\n\[email protected]('im', [[[1, 2], [3, 4]], [[1, 2, 7], [3, 4, 6]], [\n [1, 2, 7], [3, 4, 6], [5, 6, 8]], [[1, 2], [3, 4], [5, 6], [7, 8]]])\ndef test_coerce_shape_2d_trim_only(im):\n shape = 2, 2\n array = np.array(im)\n expected = np.array([[1, 2], [3, 4]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_nrows_and_trim_ncols():\n shape = 4, 2\n array = np.array([[1, 2, 7], [3, 4, 6]])\n expected = np.array([[1, 2], [3, 4], [0, 0], [0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = 5, 2\n array = np.arange(2 * 4 * 3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17\n ]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0,\n 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\ndef test_read_image():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath)\n array = source.read()\n assert array.shape == (256, 252, 3)\n assert array.dtype == np.uint8\n\n\ndef test_read_image_and_exif():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n assert ds['EXIF Image ImageLength'].item().values == [256]\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\n<function token>\n\n\ndef test_read_images_and_persist():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n import tempfile\n exported = tempfile.mkdtemp()\n source.export(exported)\n import xarray as xr\n assert xr.open_dataset(exported, engine='zarr')\n",
"<import token>\n<assignment token>\n\n\[email protected]('im', [[[1, 2], [3, 4]], [[1, 2, 7], [3, 4, 6]], [\n [1, 2, 7], [3, 4, 6], [5, 6, 8]], [[1, 2], [3, 4], [5, 6], [7, 8]]])\ndef test_coerce_shape_2d_trim_only(im):\n shape = 2, 2\n array = np.array(im)\n expected = np.array([[1, 2], [3, 4]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = 5, 2\n array = np.arange(2 * 4 * 3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17\n ]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0,\n 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\ndef test_read_image():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath)\n array = source.read()\n assert array.shape == (256, 252, 3)\n assert array.dtype == np.uint8\n\n\ndef test_read_image_and_exif():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=True)\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n assert ds['EXIF Image ImageLength'].item().values == [256]\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\n<function token>\n\n\ndef test_read_images_and_persist():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n import tempfile\n exported = tempfile.mkdtemp()\n source.export(exported)\n import xarray as xr\n assert xr.open_dataset(exported, engine='zarr')\n",
"<import token>\n<assignment token>\n\n\[email protected]('im', [[[1, 2], [3, 4]], [[1, 2, 7], [3, 4, 6]], [\n [1, 2, 7], [3, 4, 6], [5, 6, 8]], [[1, 2], [3, 4], [5, 6], [7, 8]]])\ndef test_coerce_shape_2d_trim_only(im):\n shape = 2, 2\n array = np.array(im)\n expected = np.array([[1, 2], [3, 4]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = 5, 2\n array = np.arange(2 * 4 * 3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17\n ]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0,\n 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\ndef test_read_image():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath)\n array = source.read()\n assert array.shape == (256, 252, 3)\n assert array.dtype == np.uint8\n\n\n<function token>\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\n<function token>\n\n\ndef test_read_images_and_persist():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n import tempfile\n exported = tempfile.mkdtemp()\n source.export(exported)\n import xarray as xr\n assert xr.open_dataset(exported, engine='zarr')\n",
"<import token>\n<assignment token>\n\n\[email protected]('im', [[[1, 2], [3, 4]], [[1, 2, 7], [3, 4, 6]], [\n [1, 2, 7], [3, 4, 6], [5, 6, 8]], [[1, 2], [3, 4], [5, 6], [7, 8]]])\ndef test_coerce_shape_2d_trim_only(im):\n shape = 2, 2\n array = np.array(im)\n expected = np.array([[1, 2], [3, 4]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = 5, 2\n array = np.arange(2 * 4 * 3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17\n ]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0,\n 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\ndef test_read_image():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath)\n array = source.read()\n assert array.shape == (256, 252, 3)\n assert array.dtype == np.uint8\n\n\n<function token>\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n\n\[email protected]('im', [[[1, 2], [3, 4]], [[1, 2, 7], [3, 4, 6]], [\n [1, 2, 7], [3, 4, 6], [5, 6, 8]], [[1, 2], [3, 4], [5, 6], [7, 8]]])\ndef test_coerce_shape_2d_trim_only(im):\n shape = 2, 2\n array = np.array(im)\n expected = np.array([[1, 2], [3, 4]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = 5, 2\n array = np.arange(2 * 4 * 3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17\n ]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0,\n 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\n<function token>\n<function token>\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\ndef test_coerce_shape_3d_pad_nrows_and_trim_ncols():\n shape = 5, 2\n array = np.arange(2 * 4 * 3).reshape(2, 4, 3)\n expected = np.array([[[0, 1, 2], [3, 4, 5]], [[12, 13, 14], [15, 16, 17\n ]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0,\n 0, 0]]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\n<function token>\n<function token>\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\n<function token>\n<function token>\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\ndef test_read_images_as_glob_without_coerce_raises_error():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath)\n with pytest.raises(ValueError, match='could not broadcast input array'):\n source.read()\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef test_coerce_shape_2d_pad_only():\n shape = 3, 4\n array = np.array([[1, 2], [3, 4]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\n<function token>\n<function token>\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\n<function token>\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\n<function token>\n<function token>\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\n<function token>\n\n\ndef test_read_images_as_glob_with_coerce():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', '*')\n source = ImageSource(urlpath=urlpath, coerce_shape=(256, 256))\n array = source.read()\n assert array.shape == (3, 256, 256, 3)\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\ndef test_coerce_shape_3d_no_change():\n shape = 3, 3\n array = np.arange(3 ** 3).reshape(3, 3, 3)\n actual = _coerce_shape(array, shape)\n assert (array == actual).all()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\n<function token>\n<function token>\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\n<function token>\n<function token>\n\n\ndef test_read_image_and_given_exif_tag():\n pytest.importorskip('skimage')\n urlpath = os.path.join(here, 'data', 'images', 'beach57.tif')\n source = ImageSource(urlpath=urlpath, exif_tags=['Image ImageWidth'])\n ds = source.read()\n assert ds['raster'].shape == (256, 252, 3)\n assert ds['raster'].dtype == np.uint8\n assert ds['EXIF Image ImageWidth'].item().values == [252]\n with pytest.raises(KeyError):\n ds['EXIF Image ImageLength']\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_coerce_shape_2d_pad_ncols_and_trim_nrows():\n shape = 2, 4\n array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n expected = np.array([[1, 2, 0, 0], [3, 4, 0, 0]])\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_coerce_shape_array_non_int():\n shape = 2, 3\n array = np.random.random((3, 2))\n expected = np.append(array[:2, :], [[0], [0]], axis=1)\n actual = _coerce_shape(array, shape)\n assert (expected == actual).all()\n assert expected.dtype == 'float'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,538 |
82bc202ddbec75ffadc6016204cb38fa6e72a4cb
|
from middlewared.alert.base import Alert, AlertLevel, OneShotAlertSource
class SMARTAlertSource(OneShotAlertSource):
level = AlertLevel.CRITICAL
title = "SMART error"
hardware = True
async def create(self, args):
if not args["device"].startswith("/dev/"):
args["device"] = f"/dev/{args['device']}"
return Alert("%(message)s", args)
async def delete(self, alerts, query):
device = query
if not device.startswith("/dev/"):
device = f"/dev/{device}"
return list(filter(
lambda alert: alert.args["device"] != device,
alerts
))
|
[
"from middlewared.alert.base import Alert, AlertLevel, OneShotAlertSource\n\n\nclass SMARTAlertSource(OneShotAlertSource):\n level = AlertLevel.CRITICAL\n title = \"SMART error\"\n\n hardware = True\n\n async def create(self, args):\n if not args[\"device\"].startswith(\"/dev/\"):\n args[\"device\"] = f\"/dev/{args['device']}\"\n\n return Alert(\"%(message)s\", args)\n\n async def delete(self, alerts, query):\n device = query\n\n if not device.startswith(\"/dev/\"):\n device = f\"/dev/{device}\"\n\n return list(filter(\n lambda alert: alert.args[\"device\"] != device,\n alerts\n ))\n",
"from middlewared.alert.base import Alert, AlertLevel, OneShotAlertSource\n\n\nclass SMARTAlertSource(OneShotAlertSource):\n level = AlertLevel.CRITICAL\n title = 'SMART error'\n hardware = True\n\n async def create(self, args):\n if not args['device'].startswith('/dev/'):\n args['device'] = f\"/dev/{args['device']}\"\n return Alert('%(message)s', args)\n\n async def delete(self, alerts, query):\n device = query\n if not device.startswith('/dev/'):\n device = f'/dev/{device}'\n return list(filter(lambda alert: alert.args['device'] != device,\n alerts))\n",
"<import token>\n\n\nclass SMARTAlertSource(OneShotAlertSource):\n level = AlertLevel.CRITICAL\n title = 'SMART error'\n hardware = True\n\n async def create(self, args):\n if not args['device'].startswith('/dev/'):\n args['device'] = f\"/dev/{args['device']}\"\n return Alert('%(message)s', args)\n\n async def delete(self, alerts, query):\n device = query\n if not device.startswith('/dev/'):\n device = f'/dev/{device}'\n return list(filter(lambda alert: alert.args['device'] != device,\n alerts))\n",
"<import token>\n\n\nclass SMARTAlertSource(OneShotAlertSource):\n <assignment token>\n <assignment token>\n <assignment token>\n\n async def create(self, args):\n if not args['device'].startswith('/dev/'):\n args['device'] = f\"/dev/{args['device']}\"\n return Alert('%(message)s', args)\n\n async def delete(self, alerts, query):\n device = query\n if not device.startswith('/dev/'):\n device = f'/dev/{device}'\n return list(filter(lambda alert: alert.args['device'] != device,\n alerts))\n",
"<import token>\n<class token>\n"
] | false |
98,539 |
cb9af82cb50e6475912f2caa32e1e638247f6888
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import requesocks
import stem.process
from stem.util import term
SOCKS_PORT = 5090
session = requesocks.session()
session.proxies = {
'http': 'socks5://127.0.0.1:5090',
'https': 'socks5://127.0.0.1:5090'
}
def connection():
r = requests.get("https://www.atagar.com/echo.php")
resp = r.content
return resp
def query(url):
r = session.get(url, verify=False)
resp = r.content
return resp
def print_bootstrap_lines(line):
if "Bootstrapped " in line:
print(term.format(" "+line, term.Color.BLUE))
def startTor():
print(term.format(" [+] Starting Tor Connection", term.Attr.BOLD))
global tor_process
tor_process = stem.process.launch_tor_with_config(
config = {
'SocksPort': str(SOCKS_PORT),
'ExitNodes': '{ru}',
},
init_msg_handler = print_bootstrap_lines,
)
def stopTor():
tor_process.kill()
def main():
try:
startTor()
print(term.format(" [+] Checking Endpoint...", term.Attr.BOLD))
ExternalIP = connection()
TORIP = query("https://www.atagar.com/echo.php")
if ExternalIP == TORIP:
print(term.format(" [!] FAILED TO CONNECT TO TOR", term.Color.RED))
exit(1)
else: print(term.format(" [+] Successfully Connected to TOR", term.Attr.BOLD))
## ADD STEPS HERE
stopTor() # stops tor
except Exception as e:
print "ERROR: {error}".format(error=e)
stopTor() # stops tor
if __name__=="__main__":
main()
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\nimport requesocks\nimport stem.process\nfrom stem.util import term\n\nSOCKS_PORT = 5090\n\nsession = requesocks.session()\nsession.proxies = {\n 'http': 'socks5://127.0.0.1:5090',\n 'https': 'socks5://127.0.0.1:5090'\n}\n\ndef connection():\n\tr = requests.get(\"https://www.atagar.com/echo.php\")\n\tresp = r.content\n\treturn resp\n\ndef query(url):\n\tr = session.get(url, verify=False)\n\tresp = r.content\n\treturn resp\n\ndef print_bootstrap_lines(line):\n\tif \"Bootstrapped \" in line:\n\t\tprint(term.format(\" \"+line, term.Color.BLUE))\n\ndef startTor():\n\tprint(term.format(\" [+] Starting Tor Connection\", term.Attr.BOLD))\n\tglobal tor_process\n\ttor_process = stem.process.launch_tor_with_config(\n\t\tconfig = {\n\t\t\t'SocksPort': str(SOCKS_PORT),\n\t\t\t'ExitNodes': '{ru}',\n\t\t},\n\t\tinit_msg_handler = print_bootstrap_lines,\n\t)\n\ndef stopTor():\n\ttor_process.kill() \n\t\ndef main():\n try:\n\t\tstartTor()\n\t\tprint(term.format(\" [+] Checking Endpoint...\", term.Attr.BOLD))\n\t\tExternalIP = connection()\n\t\tTORIP = query(\"https://www.atagar.com/echo.php\")\n\t\tif ExternalIP == TORIP: \n\t\t print(term.format(\" [!] FAILED TO CONNECT TO TOR\", term.Color.RED))\n\t\t exit(1)\n\t\telse: print(term.format(\" [+] Successfully Connected to TOR\", term.Attr.BOLD))\n\t\t## ADD STEPS HERE\n\t\tstopTor() # stops tor\n\t\t\n\texcept Exception as e:\n\t\tprint \"ERROR: {error}\".format(error=e)\n\t\tstopTor() # stops tor\n\t\t\nif __name__==\"__main__\":\n\tmain()\n"
] | true |
98,540 |
e37ccf863ec0490fccb26db29603fc1cfb2b865f
|
'''
Created on 13 mai 2018
@author: meo
'''
from Modules.Constantes import s_Pas_Fils, s_Fil_Rouge, s_Fil_Bleu,\
s_Fil_Rouge_Bleu, s_Id_LED, s_Id_Etoile, s_Succes
from Commun.ConstantesMain import s_Connecteur, s_Fils_Complexes
import random
class FilsComplexes:
def __init__(self,p_ser, p_Num_serie, p_Port, p_Nb_Pile, p_Buzzer):
# Contient un tableau de boléen contenant l'état des 5 LEDs
self.m_LEDs = []
#Contient un dico contenant les ids des fils et la couleur des fils branchés
self.m_fils = {'F1' : 0,'F2' : 0,'F3' : 0,'F4' : 0,'F5' : 0}
#Contient un tableau contenant l'état de l'écran (des étoiles)
self.m_etoiles = []
#Numero de serie de la bombe
self.m_serie = p_Num_serie
#Port de la bombe
self.m_Port = p_Port
#Nombre de pile
self.m_Nb_Pile = p_Nb_Pile
#Pointeur vers la communication série
self.m_ser = p_ser
#Booléen indiquant si le module a été initialisé
self.m_init = False
#Contient la matrice des fils qui doivent être débranché
self.m_doit_etre_debranche = []
#Indique si le midlue a ete desamorcé
self.m_module_desamorce = False
#Lien vers le buzzer
self.m_buzzer = p_Buzzer
def GenererScenario(self ):
"""
Genere le scénario
"""
for i in range(5):
self.m_LEDs.append(random.randint(0,1))
self.m_etoiles.append( random.randint(0,1))
"""
Envoi un message a l'arduino pour initialiser les LED/étoiles
"""
l_contenu = []
for i, l_LED in enumerate(self.m_LEDs):
l_contenu.append(s_Id_LED+str(i+1))
l_contenu.append(str(l_LED))
for i, l_Etoile in enumerate(self.m_etoiles):
l_contenu.append(s_Id_Etoile+str(i+1))
l_contenu.append(str(l_Etoile))
self.m_ser.construireMessage(s_Fils_Complexes,l_contenu)
self.m_init = True
for i in range(5):
l_id = "F%d"%(i+1)
self.m_doit_etre_debranche.append(self.DoitEtreDebranche(
self.m_LEDs[i],
self.m_fils[l_id],
self.m_etoiles[i]))
print("Solutions module FCP " , self.m_doit_etre_debranche)
def LireMessage(self, p_Buffer):
"""
Lit le message de l'arduino
"""
if not self.m_module_desamorce :
for i, l_id in enumerate(p_Buffer):
if l_id in self.m_fils.keys():
if not self.m_init:
self.m_fils[l_id] = int(p_Buffer[i+1])
elif int(p_Buffer[i+1]) == 0 :
l_numero_fil = int(l_id[1]) -1
l_Doit_Etre_Debranche = self.m_doit_etre_debranche[l_numero_fil]
self.m_doit_etre_debranche[l_numero_fil] = False
if l_Doit_Etre_Debranche == False:
# ICI faire sonner le buzzer et compter une erreur
self.Faute()
pass
if True not in self.m_doit_etre_debranche :
self.m_module_desamorce = True
self.Succes()
#Fin de la phase d'initialisation, génration de la solution
if 0 not in self.m_fils.values() and not self.m_init:
self.GenererScenario()
def Succes(self):
"""
Méthode chargée d'envoyer le message de succes
"""
l_contenu = []
l_contenu.append(s_Succes)
print(l_contenu)
self.m_ser.construireMessage(s_Fils_Complexes,l_contenu)
def Faute(self):
self.m_buzzer.SonnerErreur()
pass
def DoitEtreDebranche(self, p_etat_LED, p_couleur_fil, p_etat_etoile):
"""
Cette méthode renvoie un booléen true ou false si le fil devait etre debranché
"""
l_retour = False
l_lettre = ''
if p_couleur_fil == s_Fil_Rouge:
if p_etat_LED:
if p_etat_etoile:
l_lettre = 'B'
else:
l_lettre = 'B'
else:
if p_etat_etoile:
l_lettre = 'C'
else:
l_lettre = 'S'
elif p_couleur_fil == s_Fil_Bleu:
if p_etat_LED:
if p_etat_etoile:
l_lettre = 'P'
else:
l_lettre = 'P'
else:
if p_etat_etoile:
l_lettre = 'D'
else:
l_lettre = 'S'
elif p_couleur_fil == s_Fil_Rouge_Bleu:
if p_etat_LED:
if p_etat_etoile:
l_lettre = 'D'
else:
l_lettre = 'S'
else:
if p_etat_etoile:
l_lettre = 'P'
else:
l_lettre = 'S'
if l_lettre == 'C':
l_retour = True
elif l_lettre == 'D':
l_retour = False
elif l_lettre == 'S':
if self.m_serie%2 == 0:
l_retour = True
else:
l_retour = False
elif l_lettre == 'P':
if self.m_Port == s_Connecteur["Con_Pa"]:
l_retour = True
else:
l_retour = False
elif l_lettre == 'B':
if self.m_Nb_Pile >= 2:
l_retour = True
else:
l_retour = False
return l_retour
|
[
"'''\nCreated on 13 mai 2018\n\n@author: meo\n'''\nfrom Modules.Constantes import s_Pas_Fils, s_Fil_Rouge, s_Fil_Bleu,\\\n s_Fil_Rouge_Bleu, s_Id_LED, s_Id_Etoile, s_Succes\nfrom Commun.ConstantesMain import s_Connecteur, s_Fils_Complexes\nimport random\n\nclass FilsComplexes:\n def __init__(self,p_ser, p_Num_serie, p_Port, p_Nb_Pile, p_Buzzer):\n # Contient un tableau de boléen contenant l'état des 5 LEDs\n self.m_LEDs = []\n #Contient un dico contenant les ids des fils et la couleur des fils branchés\n self.m_fils = {'F1' : 0,'F2' : 0,'F3' : 0,'F4' : 0,'F5' : 0}\n #Contient un tableau contenant l'état de l'écran (des étoiles)\n self.m_etoiles = []\n #Numero de serie de la bombe\n self.m_serie = p_Num_serie\n #Port de la bombe\n self.m_Port = p_Port\n #Nombre de pile \n self.m_Nb_Pile = p_Nb_Pile\n #Pointeur vers la communication série\n self.m_ser = p_ser\n #Booléen indiquant si le module a été initialisé\n self.m_init = False\n #Contient la matrice des fils qui doivent être débranché\n self.m_doit_etre_debranche = []\n #Indique si le midlue a ete desamorcé\n self.m_module_desamorce = False\n #Lien vers le buzzer\n self.m_buzzer = p_Buzzer\n \n def GenererScenario(self ):\n \"\"\"\n Genere le scénario \n \"\"\"\n for i in range(5):\n self.m_LEDs.append(random.randint(0,1))\n self.m_etoiles.append( random.randint(0,1))\n \n \"\"\"\n Envoi un message a l'arduino pour initialiser les LED/étoiles\n \"\"\"\n l_contenu = []\n for i, l_LED in enumerate(self.m_LEDs):\n l_contenu.append(s_Id_LED+str(i+1))\n l_contenu.append(str(l_LED))\n for i, l_Etoile in enumerate(self.m_etoiles):\n l_contenu.append(s_Id_Etoile+str(i+1))\n l_contenu.append(str(l_Etoile))\n \n self.m_ser.construireMessage(s_Fils_Complexes,l_contenu)\n \n self.m_init = True\n \n for i in range(5):\n l_id = \"F%d\"%(i+1)\n self.m_doit_etre_debranche.append(self.DoitEtreDebranche(\n self.m_LEDs[i], \n self.m_fils[l_id], \n self.m_etoiles[i]))\n \n print(\"Solutions module FCP \" , self.m_doit_etre_debranche)\n \n \n def LireMessage(self, p_Buffer):\n \"\"\"\n Lit le message de l'arduino\n \"\"\"\n if not self.m_module_desamorce :\n \n for i, l_id in enumerate(p_Buffer):\n if l_id in self.m_fils.keys():\n if not self.m_init:\n self.m_fils[l_id] = int(p_Buffer[i+1])\n elif int(p_Buffer[i+1]) == 0 : \n l_numero_fil = int(l_id[1]) -1\n l_Doit_Etre_Debranche = self.m_doit_etre_debranche[l_numero_fil]\n self.m_doit_etre_debranche[l_numero_fil] = False\n \n if l_Doit_Etre_Debranche == False:\n # ICI faire sonner le buzzer et compter une erreur\n self.Faute()\n pass\n if True not in self.m_doit_etre_debranche : \n self.m_module_desamorce = True\n self.Succes()\n \n #Fin de la phase d'initialisation, génration de la solution \n if 0 not in self.m_fils.values() and not self.m_init:\n self.GenererScenario()\n \n def Succes(self):\n \"\"\"\n Méthode chargée d'envoyer le message de succes\n \"\"\"\n l_contenu = []\n l_contenu.append(s_Succes)\n \n print(l_contenu)\n self.m_ser.construireMessage(s_Fils_Complexes,l_contenu)\n \n \n def Faute(self):\n self.m_buzzer.SonnerErreur()\n pass\n \n def DoitEtreDebranche(self, p_etat_LED, p_couleur_fil, p_etat_etoile):\n \"\"\"\n Cette méthode renvoie un booléen true ou false si le fil devait etre debranché\n \"\"\"\n \n l_retour = False\n l_lettre = ''\n if p_couleur_fil == s_Fil_Rouge:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'B'\n else:\n l_lettre = 'B'\n else:\n if p_etat_etoile:\n l_lettre = 'C'\n else:\n l_lettre = 'S'\n elif p_couleur_fil == s_Fil_Bleu:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'P'\n else:\n l_lettre = 'P'\n else:\n if p_etat_etoile:\n l_lettre = 'D'\n else:\n l_lettre = 'S'\n elif p_couleur_fil == s_Fil_Rouge_Bleu:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'D'\n else:\n l_lettre = 'S'\n else:\n if p_etat_etoile:\n l_lettre = 'P'\n else:\n l_lettre = 'S'\n \n if l_lettre == 'C':\n l_retour = True\n elif l_lettre == 'D':\n l_retour = False\n elif l_lettre == 'S':\n if self.m_serie%2 == 0:\n l_retour = True\n else:\n l_retour = False\n elif l_lettre == 'P':\n if self.m_Port == s_Connecteur[\"Con_Pa\"]:\n l_retour = True\n else:\n l_retour = False\n elif l_lettre == 'B':\n if self.m_Nb_Pile >= 2: \n l_retour = True\n else:\n l_retour = False\n \n return l_retour\n \n",
"<docstring token>\nfrom Modules.Constantes import s_Pas_Fils, s_Fil_Rouge, s_Fil_Bleu, s_Fil_Rouge_Bleu, s_Id_LED, s_Id_Etoile, s_Succes\nfrom Commun.ConstantesMain import s_Connecteur, s_Fils_Complexes\nimport random\n\n\nclass FilsComplexes:\n\n def __init__(self, p_ser, p_Num_serie, p_Port, p_Nb_Pile, p_Buzzer):\n self.m_LEDs = []\n self.m_fils = {'F1': 0, 'F2': 0, 'F3': 0, 'F4': 0, 'F5': 0}\n self.m_etoiles = []\n self.m_serie = p_Num_serie\n self.m_Port = p_Port\n self.m_Nb_Pile = p_Nb_Pile\n self.m_ser = p_ser\n self.m_init = False\n self.m_doit_etre_debranche = []\n self.m_module_desamorce = False\n self.m_buzzer = p_Buzzer\n\n def GenererScenario(self):\n \"\"\"\n Genere le scénario \n \"\"\"\n for i in range(5):\n self.m_LEDs.append(random.randint(0, 1))\n self.m_etoiles.append(random.randint(0, 1))\n \"\"\"\n Envoi un message a l'arduino pour initialiser les LED/étoiles\n \"\"\"\n l_contenu = []\n for i, l_LED in enumerate(self.m_LEDs):\n l_contenu.append(s_Id_LED + str(i + 1))\n l_contenu.append(str(l_LED))\n for i, l_Etoile in enumerate(self.m_etoiles):\n l_contenu.append(s_Id_Etoile + str(i + 1))\n l_contenu.append(str(l_Etoile))\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n self.m_init = True\n for i in range(5):\n l_id = 'F%d' % (i + 1)\n self.m_doit_etre_debranche.append(self.DoitEtreDebranche(self.\n m_LEDs[i], self.m_fils[l_id], self.m_etoiles[i]))\n print('Solutions module FCP ', self.m_doit_etre_debranche)\n\n def LireMessage(self, p_Buffer):\n \"\"\"\n Lit le message de l'arduino\n \"\"\"\n if not self.m_module_desamorce:\n for i, l_id in enumerate(p_Buffer):\n if l_id in self.m_fils.keys():\n if not self.m_init:\n self.m_fils[l_id] = int(p_Buffer[i + 1])\n elif int(p_Buffer[i + 1]) == 0:\n l_numero_fil = int(l_id[1]) - 1\n l_Doit_Etre_Debranche = self.m_doit_etre_debranche[\n l_numero_fil]\n self.m_doit_etre_debranche[l_numero_fil] = False\n if l_Doit_Etre_Debranche == False:\n self.Faute()\n pass\n if True not in self.m_doit_etre_debranche:\n self.m_module_desamorce = True\n self.Succes()\n if 0 not in self.m_fils.values() and not self.m_init:\n self.GenererScenario()\n\n def Succes(self):\n \"\"\"\n Méthode chargée d'envoyer le message de succes\n \"\"\"\n l_contenu = []\n l_contenu.append(s_Succes)\n print(l_contenu)\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n\n def Faute(self):\n self.m_buzzer.SonnerErreur()\n pass\n\n def DoitEtreDebranche(self, p_etat_LED, p_couleur_fil, p_etat_etoile):\n \"\"\"\n Cette méthode renvoie un booléen true ou false si le fil devait etre debranché\n \"\"\"\n l_retour = False\n l_lettre = ''\n if p_couleur_fil == s_Fil_Rouge:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'B'\n else:\n l_lettre = 'B'\n elif p_etat_etoile:\n l_lettre = 'C'\n else:\n l_lettre = 'S'\n elif p_couleur_fil == s_Fil_Bleu:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'P'\n else:\n l_lettre = 'P'\n elif p_etat_etoile:\n l_lettre = 'D'\n else:\n l_lettre = 'S'\n elif p_couleur_fil == s_Fil_Rouge_Bleu:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'D'\n else:\n l_lettre = 'S'\n elif p_etat_etoile:\n l_lettre = 'P'\n else:\n l_lettre = 'S'\n if l_lettre == 'C':\n l_retour = True\n elif l_lettre == 'D':\n l_retour = False\n elif l_lettre == 'S':\n if self.m_serie % 2 == 0:\n l_retour = True\n else:\n l_retour = False\n elif l_lettre == 'P':\n if self.m_Port == s_Connecteur['Con_Pa']:\n l_retour = True\n else:\n l_retour = False\n elif l_lettre == 'B':\n if self.m_Nb_Pile >= 2:\n l_retour = True\n else:\n l_retour = False\n return l_retour\n",
"<docstring token>\n<import token>\n\n\nclass FilsComplexes:\n\n def __init__(self, p_ser, p_Num_serie, p_Port, p_Nb_Pile, p_Buzzer):\n self.m_LEDs = []\n self.m_fils = {'F1': 0, 'F2': 0, 'F3': 0, 'F4': 0, 'F5': 0}\n self.m_etoiles = []\n self.m_serie = p_Num_serie\n self.m_Port = p_Port\n self.m_Nb_Pile = p_Nb_Pile\n self.m_ser = p_ser\n self.m_init = False\n self.m_doit_etre_debranche = []\n self.m_module_desamorce = False\n self.m_buzzer = p_Buzzer\n\n def GenererScenario(self):\n \"\"\"\n Genere le scénario \n \"\"\"\n for i in range(5):\n self.m_LEDs.append(random.randint(0, 1))\n self.m_etoiles.append(random.randint(0, 1))\n \"\"\"\n Envoi un message a l'arduino pour initialiser les LED/étoiles\n \"\"\"\n l_contenu = []\n for i, l_LED in enumerate(self.m_LEDs):\n l_contenu.append(s_Id_LED + str(i + 1))\n l_contenu.append(str(l_LED))\n for i, l_Etoile in enumerate(self.m_etoiles):\n l_contenu.append(s_Id_Etoile + str(i + 1))\n l_contenu.append(str(l_Etoile))\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n self.m_init = True\n for i in range(5):\n l_id = 'F%d' % (i + 1)\n self.m_doit_etre_debranche.append(self.DoitEtreDebranche(self.\n m_LEDs[i], self.m_fils[l_id], self.m_etoiles[i]))\n print('Solutions module FCP ', self.m_doit_etre_debranche)\n\n def LireMessage(self, p_Buffer):\n \"\"\"\n Lit le message de l'arduino\n \"\"\"\n if not self.m_module_desamorce:\n for i, l_id in enumerate(p_Buffer):\n if l_id in self.m_fils.keys():\n if not self.m_init:\n self.m_fils[l_id] = int(p_Buffer[i + 1])\n elif int(p_Buffer[i + 1]) == 0:\n l_numero_fil = int(l_id[1]) - 1\n l_Doit_Etre_Debranche = self.m_doit_etre_debranche[\n l_numero_fil]\n self.m_doit_etre_debranche[l_numero_fil] = False\n if l_Doit_Etre_Debranche == False:\n self.Faute()\n pass\n if True not in self.m_doit_etre_debranche:\n self.m_module_desamorce = True\n self.Succes()\n if 0 not in self.m_fils.values() and not self.m_init:\n self.GenererScenario()\n\n def Succes(self):\n \"\"\"\n Méthode chargée d'envoyer le message de succes\n \"\"\"\n l_contenu = []\n l_contenu.append(s_Succes)\n print(l_contenu)\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n\n def Faute(self):\n self.m_buzzer.SonnerErreur()\n pass\n\n def DoitEtreDebranche(self, p_etat_LED, p_couleur_fil, p_etat_etoile):\n \"\"\"\n Cette méthode renvoie un booléen true ou false si le fil devait etre debranché\n \"\"\"\n l_retour = False\n l_lettre = ''\n if p_couleur_fil == s_Fil_Rouge:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'B'\n else:\n l_lettre = 'B'\n elif p_etat_etoile:\n l_lettre = 'C'\n else:\n l_lettre = 'S'\n elif p_couleur_fil == s_Fil_Bleu:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'P'\n else:\n l_lettre = 'P'\n elif p_etat_etoile:\n l_lettre = 'D'\n else:\n l_lettre = 'S'\n elif p_couleur_fil == s_Fil_Rouge_Bleu:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'D'\n else:\n l_lettre = 'S'\n elif p_etat_etoile:\n l_lettre = 'P'\n else:\n l_lettre = 'S'\n if l_lettre == 'C':\n l_retour = True\n elif l_lettre == 'D':\n l_retour = False\n elif l_lettre == 'S':\n if self.m_serie % 2 == 0:\n l_retour = True\n else:\n l_retour = False\n elif l_lettre == 'P':\n if self.m_Port == s_Connecteur['Con_Pa']:\n l_retour = True\n else:\n l_retour = False\n elif l_lettre == 'B':\n if self.m_Nb_Pile >= 2:\n l_retour = True\n else:\n l_retour = False\n return l_retour\n",
"<docstring token>\n<import token>\n\n\nclass FilsComplexes:\n\n def __init__(self, p_ser, p_Num_serie, p_Port, p_Nb_Pile, p_Buzzer):\n self.m_LEDs = []\n self.m_fils = {'F1': 0, 'F2': 0, 'F3': 0, 'F4': 0, 'F5': 0}\n self.m_etoiles = []\n self.m_serie = p_Num_serie\n self.m_Port = p_Port\n self.m_Nb_Pile = p_Nb_Pile\n self.m_ser = p_ser\n self.m_init = False\n self.m_doit_etre_debranche = []\n self.m_module_desamorce = False\n self.m_buzzer = p_Buzzer\n\n def GenererScenario(self):\n \"\"\"\n Genere le scénario \n \"\"\"\n for i in range(5):\n self.m_LEDs.append(random.randint(0, 1))\n self.m_etoiles.append(random.randint(0, 1))\n \"\"\"\n Envoi un message a l'arduino pour initialiser les LED/étoiles\n \"\"\"\n l_contenu = []\n for i, l_LED in enumerate(self.m_LEDs):\n l_contenu.append(s_Id_LED + str(i + 1))\n l_contenu.append(str(l_LED))\n for i, l_Etoile in enumerate(self.m_etoiles):\n l_contenu.append(s_Id_Etoile + str(i + 1))\n l_contenu.append(str(l_Etoile))\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n self.m_init = True\n for i in range(5):\n l_id = 'F%d' % (i + 1)\n self.m_doit_etre_debranche.append(self.DoitEtreDebranche(self.\n m_LEDs[i], self.m_fils[l_id], self.m_etoiles[i]))\n print('Solutions module FCP ', self.m_doit_etre_debranche)\n\n def LireMessage(self, p_Buffer):\n \"\"\"\n Lit le message de l'arduino\n \"\"\"\n if not self.m_module_desamorce:\n for i, l_id in enumerate(p_Buffer):\n if l_id in self.m_fils.keys():\n if not self.m_init:\n self.m_fils[l_id] = int(p_Buffer[i + 1])\n elif int(p_Buffer[i + 1]) == 0:\n l_numero_fil = int(l_id[1]) - 1\n l_Doit_Etre_Debranche = self.m_doit_etre_debranche[\n l_numero_fil]\n self.m_doit_etre_debranche[l_numero_fil] = False\n if l_Doit_Etre_Debranche == False:\n self.Faute()\n pass\n if True not in self.m_doit_etre_debranche:\n self.m_module_desamorce = True\n self.Succes()\n if 0 not in self.m_fils.values() and not self.m_init:\n self.GenererScenario()\n\n def Succes(self):\n \"\"\"\n Méthode chargée d'envoyer le message de succes\n \"\"\"\n l_contenu = []\n l_contenu.append(s_Succes)\n print(l_contenu)\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n <function token>\n\n def DoitEtreDebranche(self, p_etat_LED, p_couleur_fil, p_etat_etoile):\n \"\"\"\n Cette méthode renvoie un booléen true ou false si le fil devait etre debranché\n \"\"\"\n l_retour = False\n l_lettre = ''\n if p_couleur_fil == s_Fil_Rouge:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'B'\n else:\n l_lettre = 'B'\n elif p_etat_etoile:\n l_lettre = 'C'\n else:\n l_lettre = 'S'\n elif p_couleur_fil == s_Fil_Bleu:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'P'\n else:\n l_lettre = 'P'\n elif p_etat_etoile:\n l_lettre = 'D'\n else:\n l_lettre = 'S'\n elif p_couleur_fil == s_Fil_Rouge_Bleu:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'D'\n else:\n l_lettre = 'S'\n elif p_etat_etoile:\n l_lettre = 'P'\n else:\n l_lettre = 'S'\n if l_lettre == 'C':\n l_retour = True\n elif l_lettre == 'D':\n l_retour = False\n elif l_lettre == 'S':\n if self.m_serie % 2 == 0:\n l_retour = True\n else:\n l_retour = False\n elif l_lettre == 'P':\n if self.m_Port == s_Connecteur['Con_Pa']:\n l_retour = True\n else:\n l_retour = False\n elif l_lettre == 'B':\n if self.m_Nb_Pile >= 2:\n l_retour = True\n else:\n l_retour = False\n return l_retour\n",
"<docstring token>\n<import token>\n\n\nclass FilsComplexes:\n <function token>\n\n def GenererScenario(self):\n \"\"\"\n Genere le scénario \n \"\"\"\n for i in range(5):\n self.m_LEDs.append(random.randint(0, 1))\n self.m_etoiles.append(random.randint(0, 1))\n \"\"\"\n Envoi un message a l'arduino pour initialiser les LED/étoiles\n \"\"\"\n l_contenu = []\n for i, l_LED in enumerate(self.m_LEDs):\n l_contenu.append(s_Id_LED + str(i + 1))\n l_contenu.append(str(l_LED))\n for i, l_Etoile in enumerate(self.m_etoiles):\n l_contenu.append(s_Id_Etoile + str(i + 1))\n l_contenu.append(str(l_Etoile))\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n self.m_init = True\n for i in range(5):\n l_id = 'F%d' % (i + 1)\n self.m_doit_etre_debranche.append(self.DoitEtreDebranche(self.\n m_LEDs[i], self.m_fils[l_id], self.m_etoiles[i]))\n print('Solutions module FCP ', self.m_doit_etre_debranche)\n\n def LireMessage(self, p_Buffer):\n \"\"\"\n Lit le message de l'arduino\n \"\"\"\n if not self.m_module_desamorce:\n for i, l_id in enumerate(p_Buffer):\n if l_id in self.m_fils.keys():\n if not self.m_init:\n self.m_fils[l_id] = int(p_Buffer[i + 1])\n elif int(p_Buffer[i + 1]) == 0:\n l_numero_fil = int(l_id[1]) - 1\n l_Doit_Etre_Debranche = self.m_doit_etre_debranche[\n l_numero_fil]\n self.m_doit_etre_debranche[l_numero_fil] = False\n if l_Doit_Etre_Debranche == False:\n self.Faute()\n pass\n if True not in self.m_doit_etre_debranche:\n self.m_module_desamorce = True\n self.Succes()\n if 0 not in self.m_fils.values() and not self.m_init:\n self.GenererScenario()\n\n def Succes(self):\n \"\"\"\n Méthode chargée d'envoyer le message de succes\n \"\"\"\n l_contenu = []\n l_contenu.append(s_Succes)\n print(l_contenu)\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n <function token>\n\n def DoitEtreDebranche(self, p_etat_LED, p_couleur_fil, p_etat_etoile):\n \"\"\"\n Cette méthode renvoie un booléen true ou false si le fil devait etre debranché\n \"\"\"\n l_retour = False\n l_lettre = ''\n if p_couleur_fil == s_Fil_Rouge:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'B'\n else:\n l_lettre = 'B'\n elif p_etat_etoile:\n l_lettre = 'C'\n else:\n l_lettre = 'S'\n elif p_couleur_fil == s_Fil_Bleu:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'P'\n else:\n l_lettre = 'P'\n elif p_etat_etoile:\n l_lettre = 'D'\n else:\n l_lettre = 'S'\n elif p_couleur_fil == s_Fil_Rouge_Bleu:\n if p_etat_LED:\n if p_etat_etoile:\n l_lettre = 'D'\n else:\n l_lettre = 'S'\n elif p_etat_etoile:\n l_lettre = 'P'\n else:\n l_lettre = 'S'\n if l_lettre == 'C':\n l_retour = True\n elif l_lettre == 'D':\n l_retour = False\n elif l_lettre == 'S':\n if self.m_serie % 2 == 0:\n l_retour = True\n else:\n l_retour = False\n elif l_lettre == 'P':\n if self.m_Port == s_Connecteur['Con_Pa']:\n l_retour = True\n else:\n l_retour = False\n elif l_lettre == 'B':\n if self.m_Nb_Pile >= 2:\n l_retour = True\n else:\n l_retour = False\n return l_retour\n",
"<docstring token>\n<import token>\n\n\nclass FilsComplexes:\n <function token>\n\n def GenererScenario(self):\n \"\"\"\n Genere le scénario \n \"\"\"\n for i in range(5):\n self.m_LEDs.append(random.randint(0, 1))\n self.m_etoiles.append(random.randint(0, 1))\n \"\"\"\n Envoi un message a l'arduino pour initialiser les LED/étoiles\n \"\"\"\n l_contenu = []\n for i, l_LED in enumerate(self.m_LEDs):\n l_contenu.append(s_Id_LED + str(i + 1))\n l_contenu.append(str(l_LED))\n for i, l_Etoile in enumerate(self.m_etoiles):\n l_contenu.append(s_Id_Etoile + str(i + 1))\n l_contenu.append(str(l_Etoile))\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n self.m_init = True\n for i in range(5):\n l_id = 'F%d' % (i + 1)\n self.m_doit_etre_debranche.append(self.DoitEtreDebranche(self.\n m_LEDs[i], self.m_fils[l_id], self.m_etoiles[i]))\n print('Solutions module FCP ', self.m_doit_etre_debranche)\n\n def LireMessage(self, p_Buffer):\n \"\"\"\n Lit le message de l'arduino\n \"\"\"\n if not self.m_module_desamorce:\n for i, l_id in enumerate(p_Buffer):\n if l_id in self.m_fils.keys():\n if not self.m_init:\n self.m_fils[l_id] = int(p_Buffer[i + 1])\n elif int(p_Buffer[i + 1]) == 0:\n l_numero_fil = int(l_id[1]) - 1\n l_Doit_Etre_Debranche = self.m_doit_etre_debranche[\n l_numero_fil]\n self.m_doit_etre_debranche[l_numero_fil] = False\n if l_Doit_Etre_Debranche == False:\n self.Faute()\n pass\n if True not in self.m_doit_etre_debranche:\n self.m_module_desamorce = True\n self.Succes()\n if 0 not in self.m_fils.values() and not self.m_init:\n self.GenererScenario()\n\n def Succes(self):\n \"\"\"\n Méthode chargée d'envoyer le message de succes\n \"\"\"\n l_contenu = []\n l_contenu.append(s_Succes)\n print(l_contenu)\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass FilsComplexes:\n <function token>\n <function token>\n\n def LireMessage(self, p_Buffer):\n \"\"\"\n Lit le message de l'arduino\n \"\"\"\n if not self.m_module_desamorce:\n for i, l_id in enumerate(p_Buffer):\n if l_id in self.m_fils.keys():\n if not self.m_init:\n self.m_fils[l_id] = int(p_Buffer[i + 1])\n elif int(p_Buffer[i + 1]) == 0:\n l_numero_fil = int(l_id[1]) - 1\n l_Doit_Etre_Debranche = self.m_doit_etre_debranche[\n l_numero_fil]\n self.m_doit_etre_debranche[l_numero_fil] = False\n if l_Doit_Etre_Debranche == False:\n self.Faute()\n pass\n if True not in self.m_doit_etre_debranche:\n self.m_module_desamorce = True\n self.Succes()\n if 0 not in self.m_fils.values() and not self.m_init:\n self.GenererScenario()\n\n def Succes(self):\n \"\"\"\n Méthode chargée d'envoyer le message de succes\n \"\"\"\n l_contenu = []\n l_contenu.append(s_Succes)\n print(l_contenu)\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass FilsComplexes:\n <function token>\n <function token>\n <function token>\n\n def Succes(self):\n \"\"\"\n Méthode chargée d'envoyer le message de succes\n \"\"\"\n l_contenu = []\n l_contenu.append(s_Succes)\n print(l_contenu)\n self.m_ser.construireMessage(s_Fils_Complexes, l_contenu)\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass FilsComplexes:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
98,541 |
efea59479b9902c76135979fe0ccd8d7f94e82a3
|
__author__ = 'flashton'
__version__ = '0.1.0'
class MinionRead:
def __init__(self):
self.read_name = str
self.hits = []
self.read_len = int
self.number_contigs_matched = int
def calc_num_contigs_matched(self):
sbjcts = []
for hit in self.hits:
sbjcts.append(hit.sbjct)
self.number_contigs_matched = len(set(sbjcts))
class ReadContigMatch:
def __init__(self):
self.sbjct = int
self.score = int
self.match_len = int
self.match_pos = int
self.match_gap = int
self.query_start = int
self.query_stop = int
self.sbjct_start = int
self.sbjct_stop = int
self.query_coordinates = []
self.sbjct_coordinates = []
self.orientation = str
def print_res(self, read_name, read_len):
self.query_start = min(self.query_coordinates)
self.query_stop = max(self.query_coordinates)
self.sbjct_start = min(self.sbjct_coordinates)
self.sbjct_stop = max(self.sbjct_coordinates)
print '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (read_name, read_len, self.sbjct, self.score, self.match_len, self.match_pos, self.match_gap, self.query_start, self.query_stop, self.sbjct_start, self.sbjct_stop)
def print_res_dict(res_dict):
print('query\tnumber of different contigs matched\tread len\tsubject\torientation\tscore\tmatch len\tmatch pos\tmatch ' \
'gap\tq start\tq stop\ts start\ts stop\n')
for read in res_dict:
res_dict[read].calc_num_contigs_matched()
for every in res_dict[read].hits:
print('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (res_dict[read].read_name,
res_dict[read].number_contigs_matched,
res_dict[read].read_len,
every.sbjct, every.orientation, every.score, every.match_len, every.match_pos, every.match_gap, every.query_start, every.query_stop, every.sbjct_start, every.sbjct_stop))
#outhandle.close()
|
[
"__author__ = 'flashton'\n\n__version__ = '0.1.0'\n\nclass MinionRead:\n def __init__(self):\n self.read_name = str\n self.hits = []\n self.read_len = int\n self.number_contigs_matched = int\n\n def calc_num_contigs_matched(self):\n sbjcts = []\n for hit in self.hits:\n sbjcts.append(hit.sbjct)\n self.number_contigs_matched = len(set(sbjcts))\n\nclass ReadContigMatch:\n def __init__(self):\n self.sbjct = int\n self.score = int\n self.match_len = int\n self.match_pos = int\n self.match_gap = int\n self.query_start = int\n self.query_stop = int\n self.sbjct_start = int\n self.sbjct_stop = int\n self.query_coordinates = []\n self.sbjct_coordinates = []\n self.orientation = str\n\n def print_res(self, read_name, read_len):\n self.query_start = min(self.query_coordinates)\n self.query_stop = max(self.query_coordinates)\n self.sbjct_start = min(self.sbjct_coordinates)\n self.sbjct_stop = max(self.sbjct_coordinates)\n print '%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s' % (read_name, read_len, self.sbjct, self.score, self.match_len, self.match_pos, self.match_gap, self.query_start, self.query_stop, self.sbjct_start, self.sbjct_stop)\n\n\ndef print_res_dict(res_dict):\n print('query\\tnumber of different contigs matched\\tread len\\tsubject\\torientation\\tscore\\tmatch len\\tmatch pos\\tmatch ' \\\n 'gap\\tq start\\tq stop\\ts start\\ts stop\\n')\n for read in res_dict:\n res_dict[read].calc_num_contigs_matched()\n for every in res_dict[read].hits:\n print('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n' % (res_dict[read].read_name,\n res_dict[read].number_contigs_matched,\n res_dict[read].read_len,\n every.sbjct, every.orientation, every.score, every.match_len, every.match_pos, every.match_gap, every.query_start, every.query_stop, every.sbjct_start, every.sbjct_stop))\n #outhandle.close()"
] | true |
98,542 |
9458758501f45a7d531d5f84020fb2b50778b9a5
|
'''
A priority queue is common use for a heap, and it presents several implementation challenges:
http://www.bogotobogo.com/python/python_PriorityQueue_heapq_Data_Structure.php
'''
# Simplest
try:
import Queue as Q # ver. < 3.0
except ImportError:
import queue as Q
q = Q.PriorityQueue()
q.put(10)
q.put(1)
q.put(5)
while not q.empty():
print q.get(),
'''
1 5 10
'''
'''
Note that depending on the Python versions, the name of the priority queue is different.
So, we used try and except pair so that we can adjust our container to the version.
'''
#Sample B - tuple
try:
import Queue as Q # ver. < 3.0
except ImportError:
import queue as Q
q = Q.PriorityQueue()
q.put((10,'ten'))
q.put((1,'one'))
q.put((5,'five'))
while not q.empty():
print q.get(),
'''
(1, 'one') (5, 'five') (10, 'ten')
'''
|
[
"'''\nA priority queue is common use for a heap, and it presents several implementation challenges:\nhttp://www.bogotobogo.com/python/python_PriorityQueue_heapq_Data_Structure.php\n'''\n\n\n# Simplest\n\ntry:\n import Queue as Q # ver. < 3.0\nexcept ImportError:\n import queue as Q\n\nq = Q.PriorityQueue()\nq.put(10)\nq.put(1)\nq.put(5)\nwhile not q.empty():\n print q.get(),\n\n'''\n1 5 10\n'''\n\n'''\nNote that depending on the Python versions, the name of the priority queue is different.\nSo, we used try and except pair so that we can adjust our container to the version.\n'''\n\n#Sample B - tuple\n\ntry:\n import Queue as Q # ver. < 3.0\nexcept ImportError:\n import queue as Q\n\nq = Q.PriorityQueue()\nq.put((10,'ten'))\nq.put((1,'one'))\nq.put((5,'five'))\nwhile not q.empty():\n print q.get(),\n\n\n'''\n(1, 'one') (5, 'five') (10, 'ten')\n'''\n\n"
] | true |
98,543 |
637afef6f108fd62d53e1fef5b216538f9e6e9ff
|
from app.celery_tasks.celery import celery
from app import create_app
from app.celery_tasks.celery_utils import init_celery
app = create_app()
init_celery(celery, app)
|
[
"from app.celery_tasks.celery import celery\nfrom app import create_app\nfrom app.celery_tasks.celery_utils import init_celery\n\napp = create_app()\ninit_celery(celery, app)\n",
"from app.celery_tasks.celery import celery\nfrom app import create_app\nfrom app.celery_tasks.celery_utils import init_celery\napp = create_app()\ninit_celery(celery, app)\n",
"<import token>\napp = create_app()\ninit_celery(celery, app)\n",
"<import token>\n<assignment token>\ninit_celery(celery, app)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,544 |
50a67101b4023a0a2878566476118e9239474dcb
|
from codecs import open
from jieba import cut
d=dict()
def cn_to_tw():
for line in open('cn_to_tw_phrase.csv',encoding='utf8'):
cn,zh=line.strip().split(',')
d[cn]=zh
for line in open('b2g_map.utf8',encoding='utf8'):
b=line.strip().split(u' ')[0]
g=line.strip().split(u' ')[1]
d[g]=b
return d
cn_to_tw()
text=open('cn.txt',encoding='utf8').read()
for word in cut(text):
if word in d:print d[word],
else:
for c in word:
if c in d:print d[c],
else:print c,
|
[
"from codecs import open\nfrom jieba import cut\n\nd=dict()\n\ndef cn_to_tw():\n for line in open('cn_to_tw_phrase.csv',encoding='utf8'):\n cn,zh=line.strip().split(',')\n d[cn]=zh\n for line in open('b2g_map.utf8',encoding='utf8'):\n b=line.strip().split(u' ')[0]\n g=line.strip().split(u' ')[1]\n d[g]=b\n return d\n\ncn_to_tw()\n\ntext=open('cn.txt',encoding='utf8').read()\nfor word in cut(text):\n if word in d:print d[word],\n else:\n for c in word:\n if c in d:print d[c],\n else:print c,\n"
] | true |
98,545 |
16eb35039f34517027450074df6322b0a1bca331
|
#! /usr/bin/env python3
import sys
import os
if not 'MULE_SOFTWARE_ROOT' in os.environ:
print("No SWEET environment variables detected, skipping Travis updates")
sys.exit(0)
os.chdir(os.environ['MULE_SOFTWARE_ROOT'])
import glob
import re
from itertools import product
travis_file=".travis.yml"
verbosity = 10
if len(sys.argv) > 1:
verbosity = int(sys.argv[1])
if verbosity >= 10:
print("Working directory: "+os.path.abspath(os.curdir))
print("Setting up tests in travis file '"+travis_file+"'")
tests = glob.glob('./tests/??_*/test.sh')
tests += glob.glob('./tests/??_*/test.py')
if verbosity >= 10:
for test in tests:
print(" + Found test script '"+test+"'")
if verbosity >= 10:
print("Writing content to file '"+travis_file+"'")
with open(travis_file, 'w') as f:
f.write("""#
# Script for Travis CI
#
# See doc/travis_ci.txt for more information
#
language: cpp
dist: trusty
#
# We want to setup the 3rd party libraries and test SWEET
# with different software consellations
#
# This is called a build matrix and generates different build
# environments
#
jobs:
include:
""")
jobs_list = []
if True:
jobs_list += [
"""
# Test with G++-8
- os: linux
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-8
- gfortran-8
env:
- MATRIX_EVAL="export CC=gcc-8 && export CXX=g++-8 && export FC=gfortran-8 && export F90=gfortran-8"
"""]
if False:
#if True:
jobs_list += [
"""
# Test with G++-7
- os: linux
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-7
- gfortran-7
env:
- MATRIX_EVAL="export CC=gcc-7 && export CXX=g++-7 && export FC=gfortran-7 && export F90=gfortran-7"
"""]
if False:
#if True:
jobs_list += [
"""
# Test with G++-6
- os: linux
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-6
- gfortran-6
env:
- MATRIX_EVAL="export CC=gcc-6 && export CXX=g++-6 && export FC=gfortran-6 && export F90=gfortran-6"
"""]
#if False:
if True:
jobs_list += [
"""
# Test with G++-5
- os: linux
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-5
- gfortran-5
env:
- MATRIX_EVAL="export CC=gcc-5 && export CXX=g++-5 && export FC=gfortran-5 && export F90=gfortran-5"
"""]
c = 0
for (j, test) in product(jobs_list, tests):
if True:
# This version allows reutilizing the cache
f.write(j)
if True:
f.write(" script: "+test)
else:
f.write(" script:\n")
f.write(" - cd \""+os.path.dirname(test)+"\"\n")
f.write(" - ./"+os.path.basename(test)+"\n")
else:
j = j.replace('MATRIX_EVAL="', 'MATRIX_EVAL="TESTSCRIPT='+test+' && ')
f.write(" script: $TESTSCRIPT")
f.write("\n")
f.write("\n")
c += 1
f.write("""
#
# Install dependencies
#
# See https://docs.travis-ci.com/user/installing-dependencies/
#
before_install:
# Load matrix environment
- echo "${MATRIX_EVAL}"
- eval "${MATRIX_EVAL}"
# Debug output
- hostname
# Load SEET environment variables
- cd local_software || exit 1
- source env_vars.sh || exit 1
# Setup additional SWEET software packages
- ./setup_local_software.sh || exit 1
# Go back to SWEET's root directory
- cd $MULE_SOFTWARE_ROOT
#
# SWEET requires binaries compiled individually for each test
# Skip installation phase (install: true)
#
install: true
#
# Cache installed software
#
# After restoring the cache, the install scripts check for found
# software and avoid recompiling and installing it.
#
# See https://docs.travis-ci.com/user/caching/
#
# The cache is setup amongst others based on the environment variables
cache:
directories:
# Cache the install directory for SWEET's 3rd party software
local_software/local
""")
if verbosity >= 10:
print("Job combinations: "+str(c))
|
[
"#! /usr/bin/env python3\n\nimport sys\nimport os\n\nif not 'MULE_SOFTWARE_ROOT' in os.environ:\n\tprint(\"No SWEET environment variables detected, skipping Travis updates\")\n\tsys.exit(0)\n\nos.chdir(os.environ['MULE_SOFTWARE_ROOT'])\n\n\nimport glob\nimport re\n\nfrom itertools import product\n\ntravis_file=\".travis.yml\"\n\nverbosity = 10\nif len(sys.argv) > 1:\n\tverbosity = int(sys.argv[1])\n\nif verbosity >= 10:\n\tprint(\"Working directory: \"+os.path.abspath(os.curdir))\n\tprint(\"Setting up tests in travis file '\"+travis_file+\"'\")\n\ntests = glob.glob('./tests/??_*/test.sh')\ntests += glob.glob('./tests/??_*/test.py')\n\n\nif verbosity >= 10:\n\tfor test in tests:\n\t\tprint(\" + Found test script '\"+test+\"'\")\n\n\n\nif verbosity >= 10:\n\tprint(\"Writing content to file '\"+travis_file+\"'\")\n\nwith open(travis_file, 'w') as f:\n\tf.write(\"\"\"#\n# Script for Travis CI\n#\n# See doc/travis_ci.txt for more information\n#\n\nlanguage: cpp\n\ndist: trusty\n\n#\n# We want to setup the 3rd party libraries and test SWEET\n# with different software consellations\n#\n# This is called a build matrix and generates different build\n# environments\n#\njobs:\n include:\n\n\"\"\")\n\n\tjobs_list = []\n\n\n\tif True:\n\t\tjobs_list += [\n\"\"\"\n # Test with G++-8\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-8\n - gfortran-8\n env:\n - MATRIX_EVAL=\"export CC=gcc-8 && export CXX=g++-8 && export FC=gfortran-8 && export F90=gfortran-8\"\n\"\"\"]\n\n\tif False:\n\t#if True:\n\t\tjobs_list += [\n\"\"\"\n # Test with G++-7\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-7\n - gfortran-7\n env:\n - MATRIX_EVAL=\"export CC=gcc-7 && export CXX=g++-7 && export FC=gfortran-7 && export F90=gfortran-7\"\n\"\"\"]\n\n\tif False:\n\t#if True:\n\t\tjobs_list += [\n\"\"\"\n # Test with G++-6\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-6\n - gfortran-6\n env:\n - MATRIX_EVAL=\"export CC=gcc-6 && export CXX=g++-6 && export FC=gfortran-6 && export F90=gfortran-6\"\n\"\"\"]\n\n\t#if False:\n\tif True:\n\t\tjobs_list += [\n\"\"\"\n # Test with G++-5\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-5\n - gfortran-5\n env:\n - MATRIX_EVAL=\"export CC=gcc-5 && export CXX=g++-5 && export FC=gfortran-5 && export F90=gfortran-5\"\n\"\"\"]\n\n\n\tc = 0\n\tfor (j, test) in product(jobs_list, tests):\n\t\tif True:\n\t\t\t# This version allows reutilizing the cache\n\t\t\tf.write(j)\n\t\t\tif True:\n\t\t\t\tf.write(\" script: \"+test)\n\t\t\telse:\n\t\t\t\tf.write(\" script:\\n\")\n\t\t\t\tf.write(\" - cd \\\"\"+os.path.dirname(test)+\"\\\"\\n\")\n\t\t\t\tf.write(\" - ./\"+os.path.basename(test)+\"\\n\")\n\n\t\telse:\n\t\t\tj = j.replace('MATRIX_EVAL=\"', 'MATRIX_EVAL=\"TESTSCRIPT='+test+' && ')\n\t\t\tf.write(\" script: $TESTSCRIPT\")\n\t\tf.write(\"\\n\")\n\t\tf.write(\"\\n\")\n\t\tc += 1\n\n\n\tf.write(\"\"\"\n\n#\n# Install dependencies\n#\n# See https://docs.travis-ci.com/user/installing-dependencies/\n#\nbefore_install:\n # Load matrix environment\n - echo \"${MATRIX_EVAL}\"\n - eval \"${MATRIX_EVAL}\"\n\n # Debug output\n - hostname\n\n # Load SEET environment variables\n - cd local_software || exit 1\n - source env_vars.sh || exit 1\n\n # Setup additional SWEET software packages\n - ./setup_local_software.sh || exit 1\n\n # Go back to SWEET's root directory\n - cd $MULE_SOFTWARE_ROOT\n\n\n\n#\n# SWEET requires binaries compiled individually for each test\n# Skip installation phase (install: true)\n#\ninstall: true\n\n\n\n#\n# Cache installed software\n#\n# After restoring the cache, the install scripts check for found\n# software and avoid recompiling and installing it.\n#\n# See https://docs.travis-ci.com/user/caching/\n#\n# The cache is setup amongst others based on the environment variables\ncache:\n directories:\n # Cache the install directory for SWEET's 3rd party software\n local_software/local\n\n\"\"\")\n\nif verbosity >= 10:\n\tprint(\"Job combinations: \"+str(c))\n",
"import sys\nimport os\nif not 'MULE_SOFTWARE_ROOT' in os.environ:\n print('No SWEET environment variables detected, skipping Travis updates')\n sys.exit(0)\nos.chdir(os.environ['MULE_SOFTWARE_ROOT'])\nimport glob\nimport re\nfrom itertools import product\ntravis_file = '.travis.yml'\nverbosity = 10\nif len(sys.argv) > 1:\n verbosity = int(sys.argv[1])\nif verbosity >= 10:\n print('Working directory: ' + os.path.abspath(os.curdir))\n print(\"Setting up tests in travis file '\" + travis_file + \"'\")\ntests = glob.glob('./tests/??_*/test.sh')\ntests += glob.glob('./tests/??_*/test.py')\nif verbosity >= 10:\n for test in tests:\n print(\" + Found test script '\" + test + \"'\")\nif verbosity >= 10:\n print(\"Writing content to file '\" + travis_file + \"'\")\nwith open(travis_file, 'w') as f:\n f.write(\n \"\"\"#\n# Script for Travis CI\n#\n# See doc/travis_ci.txt for more information\n#\n\nlanguage: cpp\n\ndist: trusty\n\n#\n# We want to setup the 3rd party libraries and test SWEET\n# with different software consellations\n#\n# This is called a build matrix and generates different build\n# environments\n#\njobs:\n include:\n\n\"\"\"\n )\n jobs_list = []\n if True:\n jobs_list += [\n \"\"\"\n # Test with G++-8\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-8\n - gfortran-8\n env:\n - MATRIX_EVAL=\"export CC=gcc-8 && export CXX=g++-8 && export FC=gfortran-8 && export F90=gfortran-8\\\"\n\"\"\"\n ]\n if False:\n jobs_list += [\n \"\"\"\n # Test with G++-7\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-7\n - gfortran-7\n env:\n - MATRIX_EVAL=\"export CC=gcc-7 && export CXX=g++-7 && export FC=gfortran-7 && export F90=gfortran-7\\\"\n\"\"\"\n ]\n if False:\n jobs_list += [\n \"\"\"\n # Test with G++-6\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-6\n - gfortran-6\n env:\n - MATRIX_EVAL=\"export CC=gcc-6 && export CXX=g++-6 && export FC=gfortran-6 && export F90=gfortran-6\\\"\n\"\"\"\n ]\n if True:\n jobs_list += [\n \"\"\"\n # Test with G++-5\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-5\n - gfortran-5\n env:\n - MATRIX_EVAL=\"export CC=gcc-5 && export CXX=g++-5 && export FC=gfortran-5 && export F90=gfortran-5\\\"\n\"\"\"\n ]\n c = 0\n for j, test in product(jobs_list, tests):\n if True:\n f.write(j)\n if True:\n f.write(' script: ' + test)\n else:\n f.write(' script:\\n')\n f.write(' - cd \"' + os.path.dirname(test) + '\"\\n')\n f.write(' - ./' + os.path.basename(test) + '\\n')\n else:\n j = j.replace('MATRIX_EVAL=\"', 'MATRIX_EVAL=\"TESTSCRIPT=' +\n test + ' && ')\n f.write(' script: $TESTSCRIPT')\n f.write('\\n')\n f.write('\\n')\n c += 1\n f.write(\n \"\"\"\n\n#\n# Install dependencies\n#\n# See https://docs.travis-ci.com/user/installing-dependencies/\n#\nbefore_install:\n # Load matrix environment\n - echo \"${MATRIX_EVAL}\"\n - eval \"${MATRIX_EVAL}\"\n\n # Debug output\n - hostname\n\n # Load SEET environment variables\n - cd local_software || exit 1\n - source env_vars.sh || exit 1\n\n # Setup additional SWEET software packages\n - ./setup_local_software.sh || exit 1\n\n # Go back to SWEET's root directory\n - cd $MULE_SOFTWARE_ROOT\n\n\n\n#\n# SWEET requires binaries compiled individually for each test\n# Skip installation phase (install: true)\n#\ninstall: true\n\n\n\n#\n# Cache installed software\n#\n# After restoring the cache, the install scripts check for found\n# software and avoid recompiling and installing it.\n#\n# See https://docs.travis-ci.com/user/caching/\n#\n# The cache is setup amongst others based on the environment variables\ncache:\n directories:\n # Cache the install directory for SWEET's 3rd party software\n local_software/local\n\n\"\"\"\n )\nif verbosity >= 10:\n print('Job combinations: ' + str(c))\n",
"<import token>\nif not 'MULE_SOFTWARE_ROOT' in os.environ:\n print('No SWEET environment variables detected, skipping Travis updates')\n sys.exit(0)\nos.chdir(os.environ['MULE_SOFTWARE_ROOT'])\n<import token>\ntravis_file = '.travis.yml'\nverbosity = 10\nif len(sys.argv) > 1:\n verbosity = int(sys.argv[1])\nif verbosity >= 10:\n print('Working directory: ' + os.path.abspath(os.curdir))\n print(\"Setting up tests in travis file '\" + travis_file + \"'\")\ntests = glob.glob('./tests/??_*/test.sh')\ntests += glob.glob('./tests/??_*/test.py')\nif verbosity >= 10:\n for test in tests:\n print(\" + Found test script '\" + test + \"'\")\nif verbosity >= 10:\n print(\"Writing content to file '\" + travis_file + \"'\")\nwith open(travis_file, 'w') as f:\n f.write(\n \"\"\"#\n# Script for Travis CI\n#\n# See doc/travis_ci.txt for more information\n#\n\nlanguage: cpp\n\ndist: trusty\n\n#\n# We want to setup the 3rd party libraries and test SWEET\n# with different software consellations\n#\n# This is called a build matrix and generates different build\n# environments\n#\njobs:\n include:\n\n\"\"\"\n )\n jobs_list = []\n if True:\n jobs_list += [\n \"\"\"\n # Test with G++-8\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-8\n - gfortran-8\n env:\n - MATRIX_EVAL=\"export CC=gcc-8 && export CXX=g++-8 && export FC=gfortran-8 && export F90=gfortran-8\\\"\n\"\"\"\n ]\n if False:\n jobs_list += [\n \"\"\"\n # Test with G++-7\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-7\n - gfortran-7\n env:\n - MATRIX_EVAL=\"export CC=gcc-7 && export CXX=g++-7 && export FC=gfortran-7 && export F90=gfortran-7\\\"\n\"\"\"\n ]\n if False:\n jobs_list += [\n \"\"\"\n # Test with G++-6\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-6\n - gfortran-6\n env:\n - MATRIX_EVAL=\"export CC=gcc-6 && export CXX=g++-6 && export FC=gfortran-6 && export F90=gfortran-6\\\"\n\"\"\"\n ]\n if True:\n jobs_list += [\n \"\"\"\n # Test with G++-5\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-5\n - gfortran-5\n env:\n - MATRIX_EVAL=\"export CC=gcc-5 && export CXX=g++-5 && export FC=gfortran-5 && export F90=gfortran-5\\\"\n\"\"\"\n ]\n c = 0\n for j, test in product(jobs_list, tests):\n if True:\n f.write(j)\n if True:\n f.write(' script: ' + test)\n else:\n f.write(' script:\\n')\n f.write(' - cd \"' + os.path.dirname(test) + '\"\\n')\n f.write(' - ./' + os.path.basename(test) + '\\n')\n else:\n j = j.replace('MATRIX_EVAL=\"', 'MATRIX_EVAL=\"TESTSCRIPT=' +\n test + ' && ')\n f.write(' script: $TESTSCRIPT')\n f.write('\\n')\n f.write('\\n')\n c += 1\n f.write(\n \"\"\"\n\n#\n# Install dependencies\n#\n# See https://docs.travis-ci.com/user/installing-dependencies/\n#\nbefore_install:\n # Load matrix environment\n - echo \"${MATRIX_EVAL}\"\n - eval \"${MATRIX_EVAL}\"\n\n # Debug output\n - hostname\n\n # Load SEET environment variables\n - cd local_software || exit 1\n - source env_vars.sh || exit 1\n\n # Setup additional SWEET software packages\n - ./setup_local_software.sh || exit 1\n\n # Go back to SWEET's root directory\n - cd $MULE_SOFTWARE_ROOT\n\n\n\n#\n# SWEET requires binaries compiled individually for each test\n# Skip installation phase (install: true)\n#\ninstall: true\n\n\n\n#\n# Cache installed software\n#\n# After restoring the cache, the install scripts check for found\n# software and avoid recompiling and installing it.\n#\n# See https://docs.travis-ci.com/user/caching/\n#\n# The cache is setup amongst others based on the environment variables\ncache:\n directories:\n # Cache the install directory for SWEET's 3rd party software\n local_software/local\n\n\"\"\"\n )\nif verbosity >= 10:\n print('Job combinations: ' + str(c))\n",
"<import token>\nif not 'MULE_SOFTWARE_ROOT' in os.environ:\n print('No SWEET environment variables detected, skipping Travis updates')\n sys.exit(0)\nos.chdir(os.environ['MULE_SOFTWARE_ROOT'])\n<import token>\n<assignment token>\nif len(sys.argv) > 1:\n verbosity = int(sys.argv[1])\nif verbosity >= 10:\n print('Working directory: ' + os.path.abspath(os.curdir))\n print(\"Setting up tests in travis file '\" + travis_file + \"'\")\n<assignment token>\ntests += glob.glob('./tests/??_*/test.py')\nif verbosity >= 10:\n for test in tests:\n print(\" + Found test script '\" + test + \"'\")\nif verbosity >= 10:\n print(\"Writing content to file '\" + travis_file + \"'\")\nwith open(travis_file, 'w') as f:\n f.write(\n \"\"\"#\n# Script for Travis CI\n#\n# See doc/travis_ci.txt for more information\n#\n\nlanguage: cpp\n\ndist: trusty\n\n#\n# We want to setup the 3rd party libraries and test SWEET\n# with different software consellations\n#\n# This is called a build matrix and generates different build\n# environments\n#\njobs:\n include:\n\n\"\"\"\n )\n jobs_list = []\n if True:\n jobs_list += [\n \"\"\"\n # Test with G++-8\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-8\n - gfortran-8\n env:\n - MATRIX_EVAL=\"export CC=gcc-8 && export CXX=g++-8 && export FC=gfortran-8 && export F90=gfortran-8\\\"\n\"\"\"\n ]\n if False:\n jobs_list += [\n \"\"\"\n # Test with G++-7\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-7\n - gfortran-7\n env:\n - MATRIX_EVAL=\"export CC=gcc-7 && export CXX=g++-7 && export FC=gfortran-7 && export F90=gfortran-7\\\"\n\"\"\"\n ]\n if False:\n jobs_list += [\n \"\"\"\n # Test with G++-6\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-6\n - gfortran-6\n env:\n - MATRIX_EVAL=\"export CC=gcc-6 && export CXX=g++-6 && export FC=gfortran-6 && export F90=gfortran-6\\\"\n\"\"\"\n ]\n if True:\n jobs_list += [\n \"\"\"\n # Test with G++-5\n - os: linux\n addons:\n apt:\n sources:\n - ubuntu-toolchain-r-test\n packages:\n - g++-5\n - gfortran-5\n env:\n - MATRIX_EVAL=\"export CC=gcc-5 && export CXX=g++-5 && export FC=gfortran-5 && export F90=gfortran-5\\\"\n\"\"\"\n ]\n c = 0\n for j, test in product(jobs_list, tests):\n if True:\n f.write(j)\n if True:\n f.write(' script: ' + test)\n else:\n f.write(' script:\\n')\n f.write(' - cd \"' + os.path.dirname(test) + '\"\\n')\n f.write(' - ./' + os.path.basename(test) + '\\n')\n else:\n j = j.replace('MATRIX_EVAL=\"', 'MATRIX_EVAL=\"TESTSCRIPT=' +\n test + ' && ')\n f.write(' script: $TESTSCRIPT')\n f.write('\\n')\n f.write('\\n')\n c += 1\n f.write(\n \"\"\"\n\n#\n# Install dependencies\n#\n# See https://docs.travis-ci.com/user/installing-dependencies/\n#\nbefore_install:\n # Load matrix environment\n - echo \"${MATRIX_EVAL}\"\n - eval \"${MATRIX_EVAL}\"\n\n # Debug output\n - hostname\n\n # Load SEET environment variables\n - cd local_software || exit 1\n - source env_vars.sh || exit 1\n\n # Setup additional SWEET software packages\n - ./setup_local_software.sh || exit 1\n\n # Go back to SWEET's root directory\n - cd $MULE_SOFTWARE_ROOT\n\n\n\n#\n# SWEET requires binaries compiled individually for each test\n# Skip installation phase (install: true)\n#\ninstall: true\n\n\n\n#\n# Cache installed software\n#\n# After restoring the cache, the install scripts check for found\n# software and avoid recompiling and installing it.\n#\n# See https://docs.travis-ci.com/user/caching/\n#\n# The cache is setup amongst others based on the environment variables\ncache:\n directories:\n # Cache the install directory for SWEET's 3rd party software\n local_software/local\n\n\"\"\"\n )\nif verbosity >= 10:\n print('Job combinations: ' + str(c))\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,546 |
b5023c3f11d2c31e3d0a92f8845870d4bceeb9d1
|
class Stock(object):
def __init__(self, arr, k):
self.arr = arr
self.k = k
self.min_peak_index = []
self.profit = 0
self.max_ps = []
def find_peak_min(self):
for i in range(len(self.arr)):
if i == 0 and self.arr[1] > self.arr[0]:
self.min_peak_index.append(0)
elif i == len(self.arr) - 1 and self.arr[len(self.arr)-1] < self.arr[len(self.arr)-2]:
self.min_peak_index.append(len(self.arr) - 1)
else:
if self.arr[i] < self.arr[i-1] and self.arr[i] < self.arr[i+1]:
self.min_peak_index.append(i)
def find_max_profit(self, arr):
for i in range(len(self.min_peak_index)):
max_ps.append(max(self.arr[self.min_peak_index[i]:self.min_peak_index[i+1]]) - self.min_peak_index[i])
|
[
"class Stock(object):\n def __init__(self, arr, k):\n self.arr = arr\n self.k = k\n self.min_peak_index = []\n self.profit = 0\n self.max_ps = []\n\n def find_peak_min(self):\n for i in range(len(self.arr)):\n if i == 0 and self.arr[1] > self.arr[0]:\n self.min_peak_index.append(0)\n\n elif i == len(self.arr) - 1 and self.arr[len(self.arr)-1] < self.arr[len(self.arr)-2]:\n self.min_peak_index.append(len(self.arr) - 1)\n \n else:\n if self.arr[i] < self.arr[i-1] and self.arr[i] < self.arr[i+1]:\n self.min_peak_index.append(i)\n\n def find_max_profit(self, arr):\n for i in range(len(self.min_peak_index)):\n \n max_ps.append(max(self.arr[self.min_peak_index[i]:self.min_peak_index[i+1]]) - self.min_peak_index[i])",
"class Stock(object):\n\n def __init__(self, arr, k):\n self.arr = arr\n self.k = k\n self.min_peak_index = []\n self.profit = 0\n self.max_ps = []\n\n def find_peak_min(self):\n for i in range(len(self.arr)):\n if i == 0 and self.arr[1] > self.arr[0]:\n self.min_peak_index.append(0)\n elif i == len(self.arr) - 1 and self.arr[len(self.arr) - 1\n ] < self.arr[len(self.arr) - 2]:\n self.min_peak_index.append(len(self.arr) - 1)\n elif self.arr[i] < self.arr[i - 1] and self.arr[i] < self.arr[i + 1\n ]:\n self.min_peak_index.append(i)\n\n def find_max_profit(self, arr):\n for i in range(len(self.min_peak_index)):\n max_ps.append(max(self.arr[self.min_peak_index[i]:self.\n min_peak_index[i + 1]]) - self.min_peak_index[i])\n",
"class Stock(object):\n <function token>\n\n def find_peak_min(self):\n for i in range(len(self.arr)):\n if i == 0 and self.arr[1] > self.arr[0]:\n self.min_peak_index.append(0)\n elif i == len(self.arr) - 1 and self.arr[len(self.arr) - 1\n ] < self.arr[len(self.arr) - 2]:\n self.min_peak_index.append(len(self.arr) - 1)\n elif self.arr[i] < self.arr[i - 1] and self.arr[i] < self.arr[i + 1\n ]:\n self.min_peak_index.append(i)\n\n def find_max_profit(self, arr):\n for i in range(len(self.min_peak_index)):\n max_ps.append(max(self.arr[self.min_peak_index[i]:self.\n min_peak_index[i + 1]]) - self.min_peak_index[i])\n",
"class Stock(object):\n <function token>\n\n def find_peak_min(self):\n for i in range(len(self.arr)):\n if i == 0 and self.arr[1] > self.arr[0]:\n self.min_peak_index.append(0)\n elif i == len(self.arr) - 1 and self.arr[len(self.arr) - 1\n ] < self.arr[len(self.arr) - 2]:\n self.min_peak_index.append(len(self.arr) - 1)\n elif self.arr[i] < self.arr[i - 1] and self.arr[i] < self.arr[i + 1\n ]:\n self.min_peak_index.append(i)\n <function token>\n",
"class Stock(object):\n <function token>\n <function token>\n <function token>\n",
"<class token>\n"
] | false |
98,547 |
3b6d65259f17215268bbd29e63c2157947bf6a5c
|
import tensorflow as tf
import numpy as np
import os
import pdb
def create_inputs(params):
"""
Loads prepared training files and appends them as np arrays to a list.
This approach is better because a FIFOQueue with a reader can't utilize
the GPU while this approach can.
"""
sess = tf.Session()
lr_images, hr_labels = [], []
training_dir = params['training_dir'].format(params['ratio'])
# Raise exception if user has not ran prepare_data.py yet
if not os.path.isdir(training_dir):
raise Exception("You must first run prepare_data.py before you can train")
lr_shape = (params['lr_size'], params['lr_size'], 3)
hr_shape = output_shape = (params['lr_size'] - params['edge'], params['lr_size'] - params['edge'], 3 * params['ratio']**2)
for file in os.listdir(training_dir):
train_file = open("{}/{}".format(training_dir, file), "rb")
train_data = np.fromfile(train_file, dtype=np.uint8)
lr_image = train_data[:17 * 17 * 3].reshape(lr_shape)
lr_images.append(lr_image)
hr_label = train_data[17 * 17 * 3:].reshape(hr_shape)
hr_labels.append(hr_label)
return lr_images, hr_labels
|
[
"import tensorflow as tf\nimport numpy as np\nimport os\nimport pdb\n\ndef create_inputs(params):\n \"\"\"\n Loads prepared training files and appends them as np arrays to a list.\n This approach is better because a FIFOQueue with a reader can't utilize\n the GPU while this approach can.\n \"\"\"\n sess = tf.Session()\n\n lr_images, hr_labels = [], []\n training_dir = params['training_dir'].format(params['ratio'])\n\n # Raise exception if user has not ran prepare_data.py yet\n if not os.path.isdir(training_dir):\n raise Exception(\"You must first run prepare_data.py before you can train\")\n\n lr_shape = (params['lr_size'], params['lr_size'], 3)\n hr_shape = output_shape = (params['lr_size'] - params['edge'], params['lr_size'] - params['edge'], 3 * params['ratio']**2)\n for file in os.listdir(training_dir):\n train_file = open(\"{}/{}\".format(training_dir, file), \"rb\")\n train_data = np.fromfile(train_file, dtype=np.uint8)\n\n lr_image = train_data[:17 * 17 * 3].reshape(lr_shape)\n lr_images.append(lr_image)\n\n hr_label = train_data[17 * 17 * 3:].reshape(hr_shape)\n hr_labels.append(hr_label)\n\n return lr_images, hr_labels\n",
"import tensorflow as tf\nimport numpy as np\nimport os\nimport pdb\n\n\ndef create_inputs(params):\n \"\"\"\n Loads prepared training files and appends them as np arrays to a list.\n This approach is better because a FIFOQueue with a reader can't utilize\n the GPU while this approach can.\n \"\"\"\n sess = tf.Session()\n lr_images, hr_labels = [], []\n training_dir = params['training_dir'].format(params['ratio'])\n if not os.path.isdir(training_dir):\n raise Exception(\n 'You must first run prepare_data.py before you can train')\n lr_shape = params['lr_size'], params['lr_size'], 3\n hr_shape = output_shape = params['lr_size'] - params['edge'], params[\n 'lr_size'] - params['edge'], 3 * params['ratio'] ** 2\n for file in os.listdir(training_dir):\n train_file = open('{}/{}'.format(training_dir, file), 'rb')\n train_data = np.fromfile(train_file, dtype=np.uint8)\n lr_image = train_data[:17 * 17 * 3].reshape(lr_shape)\n lr_images.append(lr_image)\n hr_label = train_data[17 * 17 * 3:].reshape(hr_shape)\n hr_labels.append(hr_label)\n return lr_images, hr_labels\n",
"<import token>\n\n\ndef create_inputs(params):\n \"\"\"\n Loads prepared training files and appends them as np arrays to a list.\n This approach is better because a FIFOQueue with a reader can't utilize\n the GPU while this approach can.\n \"\"\"\n sess = tf.Session()\n lr_images, hr_labels = [], []\n training_dir = params['training_dir'].format(params['ratio'])\n if not os.path.isdir(training_dir):\n raise Exception(\n 'You must first run prepare_data.py before you can train')\n lr_shape = params['lr_size'], params['lr_size'], 3\n hr_shape = output_shape = params['lr_size'] - params['edge'], params[\n 'lr_size'] - params['edge'], 3 * params['ratio'] ** 2\n for file in os.listdir(training_dir):\n train_file = open('{}/{}'.format(training_dir, file), 'rb')\n train_data = np.fromfile(train_file, dtype=np.uint8)\n lr_image = train_data[:17 * 17 * 3].reshape(lr_shape)\n lr_images.append(lr_image)\n hr_label = train_data[17 * 17 * 3:].reshape(hr_shape)\n hr_labels.append(hr_label)\n return lr_images, hr_labels\n",
"<import token>\n<function token>\n"
] | false |
98,548 |
2c8d83e42c1841398e0b4026d2350ea5313e6dac
|
# class Pet(object):
class Pet:
def Sleep(self):
print('zzz')
def Eat(self): #추상함수
print("???")
class Dog(Pet):
def Speak(self):
print('bow wow')
def Eat(self): #오버라이딩
print('bone')
dog = Dog()
dog.Eat()
dog.Sleep()
dog.Speak()
|
[
"# class Pet(object):\nclass Pet:\n def Sleep(self):\n print('zzz')\n def Eat(self): #추상함수\n print(\"???\")\nclass Dog(Pet):\n def Speak(self):\n print('bow wow')\n def Eat(self): #오버라이딩\n print('bone')\ndog = Dog()\ndog.Eat()\ndog.Sleep()\ndog.Speak()\n",
"class Pet:\n\n def Sleep(self):\n print('zzz')\n\n def Eat(self):\n print('???')\n\n\nclass Dog(Pet):\n\n def Speak(self):\n print('bow wow')\n\n def Eat(self):\n print('bone')\n\n\ndog = Dog()\ndog.Eat()\ndog.Sleep()\ndog.Speak()\n",
"class Pet:\n\n def Sleep(self):\n print('zzz')\n\n def Eat(self):\n print('???')\n\n\nclass Dog(Pet):\n\n def Speak(self):\n print('bow wow')\n\n def Eat(self):\n print('bone')\n\n\n<assignment token>\ndog.Eat()\ndog.Sleep()\ndog.Speak()\n",
"class Pet:\n\n def Sleep(self):\n print('zzz')\n\n def Eat(self):\n print('???')\n\n\nclass Dog(Pet):\n\n def Speak(self):\n print('bow wow')\n\n def Eat(self):\n print('bone')\n\n\n<assignment token>\n<code token>\n",
"class Pet:\n <function token>\n\n def Eat(self):\n print('???')\n\n\nclass Dog(Pet):\n\n def Speak(self):\n print('bow wow')\n\n def Eat(self):\n print('bone')\n\n\n<assignment token>\n<code token>\n",
"class Pet:\n <function token>\n <function token>\n\n\nclass Dog(Pet):\n\n def Speak(self):\n print('bow wow')\n\n def Eat(self):\n print('bone')\n\n\n<assignment token>\n<code token>\n",
"<class token>\n\n\nclass Dog(Pet):\n\n def Speak(self):\n print('bow wow')\n\n def Eat(self):\n print('bone')\n\n\n<assignment token>\n<code token>\n",
"<class token>\n\n\nclass Dog(Pet):\n <function token>\n\n def Eat(self):\n print('bone')\n\n\n<assignment token>\n<code token>\n",
"<class token>\n\n\nclass Dog(Pet):\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<class token>\n<class token>\n<assignment token>\n<code token>\n"
] | false |
98,549 |
d810f3ce17852bb9d2890979b5ceb9e8028c4b1b
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 16 15:24:16 2015
@author: kerpowski
"""
from ChessAI import alphabeta, position_bonus_late
from ChessLib import Bitboard, bit_count
import ChessLib
import numpy as np
import time
def test_white_mate_in_three():
test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'
foo = Bitboard(test_mate)
a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)
print(a, b)
def test_avoid_stalemate():
test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'
foo = Bitboard(test_mate)
a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)
print(a, b)
def test_avoid_stalemate2():
test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'
foo = Bitboard(test_mate)
a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)
print(a, b)
def test_mate_in_three():
test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'
foo = Bitboard(test_mate)
a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)
print(a, b)
def test_mate_in_one():
test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'
foo = Bitboard(test_mate_in_one)
a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)
print(a, b)
def test_long_mate():
test_mate = '8/k7/8/1Q6/8/8/2K5/8 b - - 10 10'
foo = Bitboard(test_mate)
a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)
print(a, b)
def test_rook_mate():
test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'
foo = Bitboard(test_mate)
for i in range(100):
a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)
if len(b) > 0:
foo.push(b[0])
else:
break
#assert
foo.is_checkmate() and foo.turn == ChessLib.BLACK
def test_bishop_and_pawn_mate():
test_mate = '8/k7/8/1B6/8/8/1P6/7K b - - 10 10'
foo = Bitboard(test_mate)
for i in range(100):
a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)
if len(b) > 0:
foo.push(b[0])
else:
break
#assert
foo.is_checkmate() and foo.turn == ChessLib.BLACK
test_mate = '8/k7/8/1B6/8/8/1P6/7K b - - 10 10'
test_mate = '8/6R1/8/8/2n1b3/2k3p1/K7/6r1 w - - 8 101'
foo = Bitboard(test_mate)
a = 0
b = []
for i in range(100):
a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)
if len(b) > 0:
foo.push(b[0])
print(i, a, b)
print(foo)
else:
break
def animate_moves(start, moves):
print(start)
for m in moves:
time.sleep(0.5)
start.push(m)
print(start)
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 16 15:24:16 2015\n\n@author: kerpowski\n\"\"\"\n\nfrom ChessAI import alphabeta, position_bonus_late\nfrom ChessLib import Bitboard, bit_count\nimport ChessLib\nimport numpy as np\nimport time\n\ndef test_white_mate_in_three():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_avoid_stalemate():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\ndef test_avoid_stalemate2():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\ndef test_mate_in_three():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n \ndef test_long_mate():\n test_mate = '8/k7/8/1Q6/8/8/2K5/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else: \n break\n \n #assert \n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n \ndef test_bishop_and_pawn_mate():\n test_mate = '8/k7/8/1B6/8/8/1P6/7K b - - 10 10'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else: \n break\n \n #assert \n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n \ntest_mate = '8/k7/8/1B6/8/8/1P6/7K b - - 10 10'\ntest_mate = '8/6R1/8/8/2n1b3/2k3p1/K7/6r1 w - - 8 101'\nfoo = Bitboard(test_mate)\na = 0\nb = []\nfor i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n if len(b) > 0:\n foo.push(b[0])\n print(i, a, b)\n print(foo)\n else:\n break\n\ndef animate_moves(start, moves):\n print(start)\n for m in moves:\n time.sleep(0.5)\n start.push(m)\n print(start)",
"<docstring token>\nfrom ChessAI import alphabeta, position_bonus_late\nfrom ChessLib import Bitboard, bit_count\nimport ChessLib\nimport numpy as np\nimport time\n\n\ndef test_white_mate_in_three():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_avoid_stalemate():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_avoid_stalemate2():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_mate_in_three():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n\n\ndef test_long_mate():\n test_mate = '8/k7/8/1Q6/8/8/2K5/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\ndef test_bishop_and_pawn_mate():\n test_mate = '8/k7/8/1B6/8/8/1P6/7K b - - 10 10'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\ntest_mate = '8/k7/8/1B6/8/8/1P6/7K b - - 10 10'\ntest_mate = '8/6R1/8/8/2n1b3/2k3p1/K7/6r1 w - - 8 101'\nfoo = Bitboard(test_mate)\na = 0\nb = []\nfor i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n if len(b) > 0:\n foo.push(b[0])\n print(i, a, b)\n print(foo)\n else:\n break\n\n\ndef animate_moves(start, moves):\n print(start)\n for m in moves:\n time.sleep(0.5)\n start.push(m)\n print(start)\n",
"<docstring token>\n<import token>\n\n\ndef test_white_mate_in_three():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_avoid_stalemate():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_avoid_stalemate2():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_mate_in_three():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n\n\ndef test_long_mate():\n test_mate = '8/k7/8/1Q6/8/8/2K5/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\ndef test_bishop_and_pawn_mate():\n test_mate = '8/k7/8/1B6/8/8/1P6/7K b - - 10 10'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\ntest_mate = '8/k7/8/1B6/8/8/1P6/7K b - - 10 10'\ntest_mate = '8/6R1/8/8/2n1b3/2k3p1/K7/6r1 w - - 8 101'\nfoo = Bitboard(test_mate)\na = 0\nb = []\nfor i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n if len(b) > 0:\n foo.push(b[0])\n print(i, a, b)\n print(foo)\n else:\n break\n\n\ndef animate_moves(start, moves):\n print(start)\n for m in moves:\n time.sleep(0.5)\n start.push(m)\n print(start)\n",
"<docstring token>\n<import token>\n\n\ndef test_white_mate_in_three():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_avoid_stalemate():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_avoid_stalemate2():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_mate_in_three():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n\n\ndef test_long_mate():\n test_mate = '8/k7/8/1Q6/8/8/2K5/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\ndef test_bishop_and_pawn_mate():\n test_mate = '8/k7/8/1B6/8/8/1P6/7K b - - 10 10'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\n<assignment token>\nfor i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n if len(b) > 0:\n foo.push(b[0])\n print(i, a, b)\n print(foo)\n else:\n break\n\n\ndef animate_moves(start, moves):\n print(start)\n for m in moves:\n time.sleep(0.5)\n start.push(m)\n print(start)\n",
"<docstring token>\n<import token>\n\n\ndef test_white_mate_in_three():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_avoid_stalemate():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_avoid_stalemate2():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_mate_in_three():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n\n\ndef test_long_mate():\n test_mate = '8/k7/8/1Q6/8/8/2K5/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\ndef test_bishop_and_pawn_mate():\n test_mate = '8/k7/8/1B6/8/8/1P6/7K b - - 10 10'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\n<assignment token>\n<code token>\n\n\ndef animate_moves(start, moves):\n print(start)\n for m in moves:\n time.sleep(0.5)\n start.push(m)\n print(start)\n",
"<docstring token>\n<import token>\n\n\ndef test_white_mate_in_three():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_avoid_stalemate():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_avoid_stalemate2():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_mate_in_three():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n\n\ndef test_long_mate():\n test_mate = '8/k7/8/1Q6/8/8/2K5/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\n<function token>\n<assignment token>\n<code token>\n\n\ndef animate_moves(start, moves):\n print(start)\n for m in moves:\n time.sleep(0.5)\n start.push(m)\n print(start)\n",
"<docstring token>\n<import token>\n\n\ndef test_white_mate_in_three():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_avoid_stalemate():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_avoid_stalemate2():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_mate_in_three():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n\n\n<function token>\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\n<function token>\n<assignment token>\n<code token>\n\n\ndef animate_moves(start, moves):\n print(start)\n for m in moves:\n time.sleep(0.5)\n start.push(m)\n print(start)\n",
"<docstring token>\n<import token>\n<function token>\n\n\ndef test_avoid_stalemate():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_avoid_stalemate2():\n test_mate = 'k7/8/8/1Q6/2K5/8/8/8 w - - 11 11'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\ndef test_mate_in_three():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n\n\n<function token>\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\n<function token>\n<assignment token>\n<code token>\n\n\ndef animate_moves(start, moves):\n print(start)\n for m in moves:\n time.sleep(0.5)\n start.push(m)\n print(start)\n",
"<docstring token>\n<import token>\n<function token>\n\n\ndef test_avoid_stalemate():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 2)\n print(a, b)\n\n\n<function token>\n\n\ndef test_mate_in_three():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n\n\n<function token>\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\n<function token>\n<assignment token>\n<code token>\n\n\ndef animate_moves(start, moves):\n print(start)\n for m in moves:\n time.sleep(0.5)\n start.push(m)\n print(start)\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_mate_in_three():\n test_mate = '8/k7/8/1Q6/2K5/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 5)\n print(a, b)\n\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n\n\n<function token>\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\n<function token>\n<assignment token>\n<code token>\n\n\ndef animate_moves(start, moves):\n print(start)\n for m in moves:\n time.sleep(0.5)\n start.push(m)\n print(start)\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n\n\n<function token>\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\n<function token>\n<assignment token>\n<code token>\n\n\ndef animate_moves(start, moves):\n print(start)\n for m in moves:\n time.sleep(0.5)\n start.push(m)\n print(start)\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_mate_in_one():\n test_mate_in_one = '8/k1K5/8/1Q6/8/8/8/8 b - - 10 10'\n foo = Bitboard(test_mate_in_one)\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 1)\n print(a, b)\n\n\n<function token>\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\n<function token>\n<assignment token>\n<code token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_rook_mate():\n test_mate = '8/k7/8/1R6/8/8/2K5/8 b - - 0 0'\n foo = Bitboard(test_mate)\n for i in range(100):\n a, b = alphabeta(foo, 1, -np.inf, np.inf, 3)\n if len(b) > 0:\n foo.push(b[0])\n else:\n break\n foo.is_checkmate() and foo.turn == ChessLib.BLACK\n\n\n<function token>\n<assignment token>\n<code token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n"
] | false |
98,550 |
857349ba28a44c00c451e6a3373c917fd8581f85
|
#Name: ...Lowai Dobie...
#Email: [email protected]...
#Date: September 16, 2020
#This program prints: Turtle Color
import turtle
mess=input("Please enter a 6-digit Hexadecimal number : ")
mess= ("#"+mess)
thea = turtle.Turtle()
thea.shape("turtle")
thea.color(mess)
thea.stamp()
|
[
"#Name: ...Lowai Dobie...\r\n#Email: [email protected]...\r\n#Date: September 16, 2020\r\n#This program prints: Turtle Color\r\n\r\nimport turtle\r\n\r\nmess=input(\"Please enter a 6-digit Hexadecimal number : \")\r\nmess= (\"#\"+mess)\r\nthea = turtle.Turtle()\r\nthea.shape(\"turtle\")\r\nthea.color(mess)\r\nthea.stamp()\r\n",
"import turtle\nmess = input('Please enter a 6-digit Hexadecimal number : ')\nmess = '#' + mess\nthea = turtle.Turtle()\nthea.shape('turtle')\nthea.color(mess)\nthea.stamp()\n",
"<import token>\nmess = input('Please enter a 6-digit Hexadecimal number : ')\nmess = '#' + mess\nthea = turtle.Turtle()\nthea.shape('turtle')\nthea.color(mess)\nthea.stamp()\n",
"<import token>\n<assignment token>\nthea.shape('turtle')\nthea.color(mess)\nthea.stamp()\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,551 |
4578bca21cbc48d7cd69d89a6908848ce0f6ce73
|
# Q8: Construct an in-place algorithm to reverse a linked list!
class Node():
def __init__(self, data):
self.data = data
self.nextNode = None
class LinkedList():
def __init__(self):
self.head = None
self.numOfNodes = 0
def insert_start(self, data):
self.numOfNodes = self.numOfNodes + 1
# Instantiate the new node
new_node = Node(data)
# If it is an empty linkedList, then the head node will be the new node
if not self.head:
self.head = new_node
# If the linkedlist is not empty, then we need to update the references,
# the current head node will be next node, the new node will become the head node
else:
new_node.nextNode = self.head
self.head = new_node
# Add print for debugging purpose
# print(f'Insert head node with data: {self.head.data}')
def traverse_list(self):
actual_node = self.head
while actual_node is not None:
print(f'traverse {actual_node.data}')
actual_node = actual_node.nextNode
def reverse_linked_list(self):
current_node = self.head
previous_node = None
next_node = None
while current_node is not None:
next_node = current_node.nextNode
current_node.nextNode = previous_node
previous_node = current_node
current_node = next_node
self.head = previous_node
if __name__ == '__main__':
testLinkedList = LinkedList()
testLinkedList.insert_start(1)
testLinkedList.insert_start(2)
testLinkedList.insert_start(3)
testLinkedList.insert_start(4)
testLinkedList.insert_start(5)
testLinkedList.traverse_list()
testLinkedList.reverse_linked_list()
print('Reversed list')
testLinkedList.traverse_list()
|
[
"# Q8: Construct an in-place algorithm to reverse a linked list!\n\nclass Node():\n def __init__(self, data):\n self.data = data\n self.nextNode = None\n\n\nclass LinkedList():\n def __init__(self):\n self.head = None\n self.numOfNodes = 0\n\n def insert_start(self, data):\n self.numOfNodes = self.numOfNodes + 1\n # Instantiate the new node\n new_node = Node(data)\n\n # If it is an empty linkedList, then the head node will be the new node\n if not self.head:\n self.head = new_node\n # If the linkedlist is not empty, then we need to update the references,\n # the current head node will be next node, the new node will become the head node\n else:\n new_node.nextNode = self.head\n self.head = new_node\n\n # Add print for debugging purpose\n # print(f'Insert head node with data: {self.head.data}')\n\n def traverse_list(self):\n actual_node = self.head\n while actual_node is not None:\n print(f'traverse {actual_node.data}')\n actual_node = actual_node.nextNode\n\n def reverse_linked_list(self):\n current_node = self.head\n previous_node = None\n next_node = None\n\n while current_node is not None:\n next_node = current_node.nextNode\n current_node.nextNode = previous_node\n previous_node = current_node\n current_node = next_node\n self.head = previous_node\n\n\nif __name__ == '__main__':\n testLinkedList = LinkedList()\n testLinkedList.insert_start(1)\n testLinkedList.insert_start(2)\n testLinkedList.insert_start(3)\n testLinkedList.insert_start(4)\n testLinkedList.insert_start(5)\n testLinkedList.traverse_list()\n testLinkedList.reverse_linked_list()\n print('Reversed list')\n testLinkedList.traverse_list()\n",
"class Node:\n\n def __init__(self, data):\n self.data = data\n self.nextNode = None\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n self.numOfNodes = 0\n\n def insert_start(self, data):\n self.numOfNodes = self.numOfNodes + 1\n new_node = Node(data)\n if not self.head:\n self.head = new_node\n else:\n new_node.nextNode = self.head\n self.head = new_node\n\n def traverse_list(self):\n actual_node = self.head\n while actual_node is not None:\n print(f'traverse {actual_node.data}')\n actual_node = actual_node.nextNode\n\n def reverse_linked_list(self):\n current_node = self.head\n previous_node = None\n next_node = None\n while current_node is not None:\n next_node = current_node.nextNode\n current_node.nextNode = previous_node\n previous_node = current_node\n current_node = next_node\n self.head = previous_node\n\n\nif __name__ == '__main__':\n testLinkedList = LinkedList()\n testLinkedList.insert_start(1)\n testLinkedList.insert_start(2)\n testLinkedList.insert_start(3)\n testLinkedList.insert_start(4)\n testLinkedList.insert_start(5)\n testLinkedList.traverse_list()\n testLinkedList.reverse_linked_list()\n print('Reversed list')\n testLinkedList.traverse_list()\n",
"class Node:\n\n def __init__(self, data):\n self.data = data\n self.nextNode = None\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n self.numOfNodes = 0\n\n def insert_start(self, data):\n self.numOfNodes = self.numOfNodes + 1\n new_node = Node(data)\n if not self.head:\n self.head = new_node\n else:\n new_node.nextNode = self.head\n self.head = new_node\n\n def traverse_list(self):\n actual_node = self.head\n while actual_node is not None:\n print(f'traverse {actual_node.data}')\n actual_node = actual_node.nextNode\n\n def reverse_linked_list(self):\n current_node = self.head\n previous_node = None\n next_node = None\n while current_node is not None:\n next_node = current_node.nextNode\n current_node.nextNode = previous_node\n previous_node = current_node\n current_node = next_node\n self.head = previous_node\n\n\n<code token>\n",
"class Node:\n <function token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n self.numOfNodes = 0\n\n def insert_start(self, data):\n self.numOfNodes = self.numOfNodes + 1\n new_node = Node(data)\n if not self.head:\n self.head = new_node\n else:\n new_node.nextNode = self.head\n self.head = new_node\n\n def traverse_list(self):\n actual_node = self.head\n while actual_node is not None:\n print(f'traverse {actual_node.data}')\n actual_node = actual_node.nextNode\n\n def reverse_linked_list(self):\n current_node = self.head\n previous_node = None\n next_node = None\n while current_node is not None:\n next_node = current_node.nextNode\n current_node.nextNode = previous_node\n previous_node = current_node\n current_node = next_node\n self.head = previous_node\n\n\n<code token>\n",
"<class token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n self.numOfNodes = 0\n\n def insert_start(self, data):\n self.numOfNodes = self.numOfNodes + 1\n new_node = Node(data)\n if not self.head:\n self.head = new_node\n else:\n new_node.nextNode = self.head\n self.head = new_node\n\n def traverse_list(self):\n actual_node = self.head\n while actual_node is not None:\n print(f'traverse {actual_node.data}')\n actual_node = actual_node.nextNode\n\n def reverse_linked_list(self):\n current_node = self.head\n previous_node = None\n next_node = None\n while current_node is not None:\n next_node = current_node.nextNode\n current_node.nextNode = previous_node\n previous_node = current_node\n current_node = next_node\n self.head = previous_node\n\n\n<code token>\n",
"<class token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n self.numOfNodes = 0\n\n def insert_start(self, data):\n self.numOfNodes = self.numOfNodes + 1\n new_node = Node(data)\n if not self.head:\n self.head = new_node\n else:\n new_node.nextNode = self.head\n self.head = new_node\n\n def traverse_list(self):\n actual_node = self.head\n while actual_node is not None:\n print(f'traverse {actual_node.data}')\n actual_node = actual_node.nextNode\n <function token>\n\n\n<code token>\n",
"<class token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n self.numOfNodes = 0\n <function token>\n\n def traverse_list(self):\n actual_node = self.head\n while actual_node is not None:\n print(f'traverse {actual_node.data}')\n actual_node = actual_node.nextNode\n <function token>\n\n\n<code token>\n",
"<class token>\n\n\nclass LinkedList:\n <function token>\n <function token>\n\n def traverse_list(self):\n actual_node = self.head\n while actual_node is not None:\n print(f'traverse {actual_node.data}')\n actual_node = actual_node.nextNode\n <function token>\n\n\n<code token>\n",
"<class token>\n\n\nclass LinkedList:\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<class token>\n<class token>\n<code token>\n"
] | false |
98,552 |
ea95988677eedd9a3d0b460ed37ab136dfb5f21d
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/rankedBattles/RankedBattlesCalendarPopover.py
from datetime import datetime
import BigWorld
from gui.Scaleform.locale.COMMON import COMMON
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.Scaleform.managers.UtilsManager import UtilsManager
from gui.ranked_battles.ranked_models import CycleStatus
from helpers import i18n, dependency
from gui.Scaleform.daapi.view.meta.RankedBattlesCalendarPopoverMeta import RankedBattlesCalendarPopoverMeta
from gui.Scaleform.locale.RANKED_BATTLES import RANKED_BATTLES
from gui.shared.formatters import text_styles
from helpers import time_utils
from skeletons.gui.game_control import IRankedBattlesController
from skeletons.connection_mgr import IConnectionManager
ARROW_LEFT = 3
class RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):
rankedController = dependency.descriptor(IRankedBattlesController)
connectionMgr = dependency.descriptor(IConnectionManager)
arrowDirection = ARROW_LEFT
def __init__(self, ctx=None):
super(RankedBattlesCalendarPopover, self).__init__()
self.__seasonInfo = self.rankedController.getCurrentSeason()
self.__currentCycle = self.__seasonInfo.getNumber()
self.__selectedDate = time_utils.getCurrentLocalServerTimestamp()
self.__weekDays = self._createUtilsManager().getWeekDayNames(full=True, isLower=False, isUpper=False, useRegionSettings=False)
data = ctx.get('data', None)
if data is not None:
self.arrowDirection = data.arrowDirection
return
def _createUtilsManager(self):
return UtilsManager()
def _populate(self):
super(RankedBattlesCalendarPopover, self)._populate()
self.as_setDataS({'rawDate': self.__selectedDate,
'arrowDirection': self.arrowDirection,
'statusText': self.__getCurrnetCycleString(),
'statusTooltip': TOOLTIPS_CONSTANTS.RANKED_CALENDAR_STEPS_INFO})
self.onDaySelect(time_utils.getCurrentTimestamp())
calendar = self.__getCalendar()
if calendar is not None:
calendar.as_setMinAvailableDateS(self.__seasonInfo.getStartDate())
calendar.as_setMaxAvailableDateS(self.__seasonInfo.getEndDate())
calendar.as_openMonthS(self.__selectedDate)
calendar.as_selectDateS(self.__selectedDate)
calendar.as_setHighlightedDaysS([self.__seasonInfo.getCycleStartDate(), self.__seasonInfo.getCycleEndDate()])
calendar.as_setDayTooltipTypeS(TOOLTIPS_CONSTANTS.RANKED_CALENDAR_DAY_INFO)
return
def onDaySelect(self, date):
formattedDate = datetime.fromtimestamp(date)
selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]
self.as_setDayDataS({'primeTimeGroupData': self.__constructPrimeTimes(date),
'dayText': text_styles.superPromoTitle(formattedDate.day),
'dayNameText': text_styles.middleTitle(selectedDayOfWeek)})
def __getCycleListString(self):
key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM
cycles = self.__seasonInfo.getAllCycles()
result = []
for cycle in sorted(cycles.values()):
formatter = text_styles.main if cycle.status == CycleStatus.CURRENT else text_styles.standard
startDate = time_utils.getTimeStructInLocal(cycle.startDate)
endDate = time_utils.getTimeStructInLocal(cycle.endDate)
result.append(formatter(i18n.makeString(key, cycleNumber=self.__currentCycle, day0='{:02d}'.format(startDate.tm_mday), month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.format(endDate.tm_mday), month1='{:02d}'.format(endDate.tm_mon))))
def __constructPrimeTimes(self, selectedTime):
items = []
serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(selectedTime, groupIdentical=True)
frmt = BigWorld.wg_getShortTimeFormat
for serverName in sorted(serversPeriodsMapping.keys()):
periodsStr = []
dayPeriods = serversPeriodsMapping[serverName]
if dayPeriods:
for periodStart, periodEnd in dayPeriods:
periodsStr.append(i18n.makeString(RANKED_BATTLES.CALENDARDAY_TIME, start=frmt(periodStart), end=frmt(periodEnd)))
else:
periodsStr = i18n.makeString(COMMON.COMMON_DASH)
if dayPeriods:
items.append({'serverNameText': text_styles.highlightText(serverName),
'primeTimeText': '\n'.join(periodsStr)})
return items
def __getCurrnetCycleString(self):
key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM
cycles = self.__seasonInfo.getAllCycles()
for cycle in sorted(cycles.values()):
if cycle.status == CycleStatus.CURRENT:
formatter = text_styles.main
startDate = time_utils.getTimeStructInLocal(cycle.startDate)
endDate = time_utils.getTimeStructInLocal(cycle.endDate)
return formatter(i18n.makeString(key, cycleNumber=self.__currentCycle, day0='{:02d}'.format(startDate.tm_mday), month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.format(endDate.tm_mday), month1='{:02d}'.format(endDate.tm_mon)))
def __getAttentionText(self):
key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_ATTENTIONTEXT
cycleNumber = self.__currentCycle
timeDelta = time_utils.getTimeDeltaFromNow(self.__seasonInfo.getCycleEndDate())
endTimeStr = time_utils.getTillTimeString(timeDelta, RANKED_BATTLES.STATUS_TIMELEFT)
if timeDelta <= time_utils.ONE_HOUR:
formatter = text_styles.alert
else:
formatter = text_styles.neutral
return formatter(i18n.makeString(key, cycleNumber=cycleNumber, timeLeft=endTimeStr))
def __getCalendar(self):
return self.components.get(VIEW_ALIAS.CALENDAR)
|
[
"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/rankedBattles/RankedBattlesCalendarPopover.py\nfrom datetime import datetime\nimport BigWorld\nfrom gui.Scaleform.locale.COMMON import COMMON\nfrom gui.Scaleform.daapi.settings.views import VIEW_ALIAS\nfrom gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS\nfrom gui.Scaleform.managers.UtilsManager import UtilsManager\nfrom gui.ranked_battles.ranked_models import CycleStatus\nfrom helpers import i18n, dependency\nfrom gui.Scaleform.daapi.view.meta.RankedBattlesCalendarPopoverMeta import RankedBattlesCalendarPopoverMeta\nfrom gui.Scaleform.locale.RANKED_BATTLES import RANKED_BATTLES\nfrom gui.shared.formatters import text_styles\nfrom helpers import time_utils\nfrom skeletons.gui.game_control import IRankedBattlesController\nfrom skeletons.connection_mgr import IConnectionManager\nARROW_LEFT = 3\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n rankedController = dependency.descriptor(IRankedBattlesController)\n connectionMgr = dependency.descriptor(IConnectionManager)\n arrowDirection = ARROW_LEFT\n\n def __init__(self, ctx=None):\n super(RankedBattlesCalendarPopover, self).__init__()\n self.__seasonInfo = self.rankedController.getCurrentSeason()\n self.__currentCycle = self.__seasonInfo.getNumber()\n self.__selectedDate = time_utils.getCurrentLocalServerTimestamp()\n self.__weekDays = self._createUtilsManager().getWeekDayNames(full=True, isLower=False, isUpper=False, useRegionSettings=False)\n data = ctx.get('data', None)\n if data is not None:\n self.arrowDirection = data.arrowDirection\n return\n\n def _createUtilsManager(self):\n return UtilsManager()\n\n def _populate(self):\n super(RankedBattlesCalendarPopover, self)._populate()\n self.as_setDataS({'rawDate': self.__selectedDate,\n 'arrowDirection': self.arrowDirection,\n 'statusText': self.__getCurrnetCycleString(),\n 'statusTooltip': TOOLTIPS_CONSTANTS.RANKED_CALENDAR_STEPS_INFO})\n self.onDaySelect(time_utils.getCurrentTimestamp())\n calendar = self.__getCalendar()\n if calendar is not None:\n calendar.as_setMinAvailableDateS(self.__seasonInfo.getStartDate())\n calendar.as_setMaxAvailableDateS(self.__seasonInfo.getEndDate())\n calendar.as_openMonthS(self.__selectedDate)\n calendar.as_selectDateS(self.__selectedDate)\n calendar.as_setHighlightedDaysS([self.__seasonInfo.getCycleStartDate(), self.__seasonInfo.getCycleEndDate()])\n calendar.as_setDayTooltipTypeS(TOOLTIPS_CONSTANTS.RANKED_CALENDAR_DAY_INFO)\n return\n\n def onDaySelect(self, date):\n formattedDate = datetime.fromtimestamp(date)\n selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]\n self.as_setDayDataS({'primeTimeGroupData': self.__constructPrimeTimes(date),\n 'dayText': text_styles.superPromoTitle(formattedDate.day),\n 'dayNameText': text_styles.middleTitle(selectedDayOfWeek)})\n\n def __getCycleListString(self):\n key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = text_styles.main if cycle.status == CycleStatus.CURRENT else text_styles.standard\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.__currentCycle, day0='{:02d}'.format(startDate.tm_mday), month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.format(endDate.tm_mday), month1='{:02d}'.format(endDate.tm_mon))))\n\n def __constructPrimeTimes(self, selectedTime):\n items = []\n serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(selectedTime, groupIdentical=True)\n frmt = BigWorld.wg_getShortTimeFormat\n for serverName in sorted(serversPeriodsMapping.keys()):\n periodsStr = []\n dayPeriods = serversPeriodsMapping[serverName]\n if dayPeriods:\n for periodStart, periodEnd in dayPeriods:\n periodsStr.append(i18n.makeString(RANKED_BATTLES.CALENDARDAY_TIME, start=frmt(periodStart), end=frmt(periodEnd)))\n\n else:\n periodsStr = i18n.makeString(COMMON.COMMON_DASH)\n if dayPeriods:\n items.append({'serverNameText': text_styles.highlightText(serverName),\n 'primeTimeText': '\\n'.join(periodsStr)})\n\n return items\n\n def __getCurrnetCycleString(self):\n key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.__currentCycle, day0='{:02d}'.format(startDate.tm_mday), month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.format(endDate.tm_mday), month1='{:02d}'.format(endDate.tm_mon)))\n\n def __getAttentionText(self):\n key = RANKED_BATTLES.RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_ATTENTIONTEXT\n cycleNumber = self.__currentCycle\n timeDelta = time_utils.getTimeDeltaFromNow(self.__seasonInfo.getCycleEndDate())\n endTimeStr = time_utils.getTillTimeString(timeDelta, RANKED_BATTLES.STATUS_TIMELEFT)\n if timeDelta <= time_utils.ONE_HOUR:\n formatter = text_styles.alert\n else:\n formatter = text_styles.neutral\n return formatter(i18n.makeString(key, cycleNumber=cycleNumber, timeLeft=endTimeStr))\n\n def __getCalendar(self):\n return self.components.get(VIEW_ALIAS.CALENDAR)\n",
"from datetime import datetime\nimport BigWorld\nfrom gui.Scaleform.locale.COMMON import COMMON\nfrom gui.Scaleform.daapi.settings.views import VIEW_ALIAS\nfrom gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS\nfrom gui.Scaleform.managers.UtilsManager import UtilsManager\nfrom gui.ranked_battles.ranked_models import CycleStatus\nfrom helpers import i18n, dependency\nfrom gui.Scaleform.daapi.view.meta.RankedBattlesCalendarPopoverMeta import RankedBattlesCalendarPopoverMeta\nfrom gui.Scaleform.locale.RANKED_BATTLES import RANKED_BATTLES\nfrom gui.shared.formatters import text_styles\nfrom helpers import time_utils\nfrom skeletons.gui.game_control import IRankedBattlesController\nfrom skeletons.connection_mgr import IConnectionManager\nARROW_LEFT = 3\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n rankedController = dependency.descriptor(IRankedBattlesController)\n connectionMgr = dependency.descriptor(IConnectionManager)\n arrowDirection = ARROW_LEFT\n\n def __init__(self, ctx=None):\n super(RankedBattlesCalendarPopover, self).__init__()\n self.__seasonInfo = self.rankedController.getCurrentSeason()\n self.__currentCycle = self.__seasonInfo.getNumber()\n self.__selectedDate = time_utils.getCurrentLocalServerTimestamp()\n self.__weekDays = self._createUtilsManager().getWeekDayNames(full=\n True, isLower=False, isUpper=False, useRegionSettings=False)\n data = ctx.get('data', None)\n if data is not None:\n self.arrowDirection = data.arrowDirection\n return\n\n def _createUtilsManager(self):\n return UtilsManager()\n\n def _populate(self):\n super(RankedBattlesCalendarPopover, self)._populate()\n self.as_setDataS({'rawDate': self.__selectedDate, 'arrowDirection':\n self.arrowDirection, 'statusText': self.__getCurrnetCycleString\n (), 'statusTooltip': TOOLTIPS_CONSTANTS.RANKED_CALENDAR_STEPS_INFO}\n )\n self.onDaySelect(time_utils.getCurrentTimestamp())\n calendar = self.__getCalendar()\n if calendar is not None:\n calendar.as_setMinAvailableDateS(self.__seasonInfo.getStartDate())\n calendar.as_setMaxAvailableDateS(self.__seasonInfo.getEndDate())\n calendar.as_openMonthS(self.__selectedDate)\n calendar.as_selectDateS(self.__selectedDate)\n calendar.as_setHighlightedDaysS([self.__seasonInfo.\n getCycleStartDate(), self.__seasonInfo.getCycleEndDate()])\n calendar.as_setDayTooltipTypeS(TOOLTIPS_CONSTANTS.\n RANKED_CALENDAR_DAY_INFO)\n return\n\n def onDaySelect(self, date):\n formattedDate = datetime.fromtimestamp(date)\n selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]\n self.as_setDayDataS({'primeTimeGroupData': self.\n __constructPrimeTimes(date), 'dayText': text_styles.\n superPromoTitle(formattedDate.day), 'dayNameText': text_styles.\n middleTitle(selectedDayOfWeek)})\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n\n def __constructPrimeTimes(self, selectedTime):\n items = []\n serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(\n selectedTime, groupIdentical=True)\n frmt = BigWorld.wg_getShortTimeFormat\n for serverName in sorted(serversPeriodsMapping.keys()):\n periodsStr = []\n dayPeriods = serversPeriodsMapping[serverName]\n if dayPeriods:\n for periodStart, periodEnd in dayPeriods:\n periodsStr.append(i18n.makeString(RANKED_BATTLES.\n CALENDARDAY_TIME, start=frmt(periodStart), end=frmt\n (periodEnd)))\n else:\n periodsStr = i18n.makeString(COMMON.COMMON_DASH)\n if dayPeriods:\n items.append({'serverNameText': text_styles.highlightText(\n serverName), 'primeTimeText': '\\n'.join(periodsStr)})\n return items\n\n def __getCurrnetCycleString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'\n .format(endDate.tm_mday), month1='{:02d}'.format(\n endDate.tm_mon)))\n\n def __getAttentionText(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_ATTENTIONTEXT)\n cycleNumber = self.__currentCycle\n timeDelta = time_utils.getTimeDeltaFromNow(self.__seasonInfo.\n getCycleEndDate())\n endTimeStr = time_utils.getTillTimeString(timeDelta, RANKED_BATTLES\n .STATUS_TIMELEFT)\n if timeDelta <= time_utils.ONE_HOUR:\n formatter = text_styles.alert\n else:\n formatter = text_styles.neutral\n return formatter(i18n.makeString(key, cycleNumber=cycleNumber,\n timeLeft=endTimeStr))\n\n def __getCalendar(self):\n return self.components.get(VIEW_ALIAS.CALENDAR)\n",
"<import token>\nARROW_LEFT = 3\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n rankedController = dependency.descriptor(IRankedBattlesController)\n connectionMgr = dependency.descriptor(IConnectionManager)\n arrowDirection = ARROW_LEFT\n\n def __init__(self, ctx=None):\n super(RankedBattlesCalendarPopover, self).__init__()\n self.__seasonInfo = self.rankedController.getCurrentSeason()\n self.__currentCycle = self.__seasonInfo.getNumber()\n self.__selectedDate = time_utils.getCurrentLocalServerTimestamp()\n self.__weekDays = self._createUtilsManager().getWeekDayNames(full=\n True, isLower=False, isUpper=False, useRegionSettings=False)\n data = ctx.get('data', None)\n if data is not None:\n self.arrowDirection = data.arrowDirection\n return\n\n def _createUtilsManager(self):\n return UtilsManager()\n\n def _populate(self):\n super(RankedBattlesCalendarPopover, self)._populate()\n self.as_setDataS({'rawDate': self.__selectedDate, 'arrowDirection':\n self.arrowDirection, 'statusText': self.__getCurrnetCycleString\n (), 'statusTooltip': TOOLTIPS_CONSTANTS.RANKED_CALENDAR_STEPS_INFO}\n )\n self.onDaySelect(time_utils.getCurrentTimestamp())\n calendar = self.__getCalendar()\n if calendar is not None:\n calendar.as_setMinAvailableDateS(self.__seasonInfo.getStartDate())\n calendar.as_setMaxAvailableDateS(self.__seasonInfo.getEndDate())\n calendar.as_openMonthS(self.__selectedDate)\n calendar.as_selectDateS(self.__selectedDate)\n calendar.as_setHighlightedDaysS([self.__seasonInfo.\n getCycleStartDate(), self.__seasonInfo.getCycleEndDate()])\n calendar.as_setDayTooltipTypeS(TOOLTIPS_CONSTANTS.\n RANKED_CALENDAR_DAY_INFO)\n return\n\n def onDaySelect(self, date):\n formattedDate = datetime.fromtimestamp(date)\n selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]\n self.as_setDayDataS({'primeTimeGroupData': self.\n __constructPrimeTimes(date), 'dayText': text_styles.\n superPromoTitle(formattedDate.day), 'dayNameText': text_styles.\n middleTitle(selectedDayOfWeek)})\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n\n def __constructPrimeTimes(self, selectedTime):\n items = []\n serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(\n selectedTime, groupIdentical=True)\n frmt = BigWorld.wg_getShortTimeFormat\n for serverName in sorted(serversPeriodsMapping.keys()):\n periodsStr = []\n dayPeriods = serversPeriodsMapping[serverName]\n if dayPeriods:\n for periodStart, periodEnd in dayPeriods:\n periodsStr.append(i18n.makeString(RANKED_BATTLES.\n CALENDARDAY_TIME, start=frmt(periodStart), end=frmt\n (periodEnd)))\n else:\n periodsStr = i18n.makeString(COMMON.COMMON_DASH)\n if dayPeriods:\n items.append({'serverNameText': text_styles.highlightText(\n serverName), 'primeTimeText': '\\n'.join(periodsStr)})\n return items\n\n def __getCurrnetCycleString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'\n .format(endDate.tm_mday), month1='{:02d}'.format(\n endDate.tm_mon)))\n\n def __getAttentionText(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_ATTENTIONTEXT)\n cycleNumber = self.__currentCycle\n timeDelta = time_utils.getTimeDeltaFromNow(self.__seasonInfo.\n getCycleEndDate())\n endTimeStr = time_utils.getTillTimeString(timeDelta, RANKED_BATTLES\n .STATUS_TIMELEFT)\n if timeDelta <= time_utils.ONE_HOUR:\n formatter = text_styles.alert\n else:\n formatter = text_styles.neutral\n return formatter(i18n.makeString(key, cycleNumber=cycleNumber,\n timeLeft=endTimeStr))\n\n def __getCalendar(self):\n return self.components.get(VIEW_ALIAS.CALENDAR)\n",
"<import token>\n<assignment token>\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n rankedController = dependency.descriptor(IRankedBattlesController)\n connectionMgr = dependency.descriptor(IConnectionManager)\n arrowDirection = ARROW_LEFT\n\n def __init__(self, ctx=None):\n super(RankedBattlesCalendarPopover, self).__init__()\n self.__seasonInfo = self.rankedController.getCurrentSeason()\n self.__currentCycle = self.__seasonInfo.getNumber()\n self.__selectedDate = time_utils.getCurrentLocalServerTimestamp()\n self.__weekDays = self._createUtilsManager().getWeekDayNames(full=\n True, isLower=False, isUpper=False, useRegionSettings=False)\n data = ctx.get('data', None)\n if data is not None:\n self.arrowDirection = data.arrowDirection\n return\n\n def _createUtilsManager(self):\n return UtilsManager()\n\n def _populate(self):\n super(RankedBattlesCalendarPopover, self)._populate()\n self.as_setDataS({'rawDate': self.__selectedDate, 'arrowDirection':\n self.arrowDirection, 'statusText': self.__getCurrnetCycleString\n (), 'statusTooltip': TOOLTIPS_CONSTANTS.RANKED_CALENDAR_STEPS_INFO}\n )\n self.onDaySelect(time_utils.getCurrentTimestamp())\n calendar = self.__getCalendar()\n if calendar is not None:\n calendar.as_setMinAvailableDateS(self.__seasonInfo.getStartDate())\n calendar.as_setMaxAvailableDateS(self.__seasonInfo.getEndDate())\n calendar.as_openMonthS(self.__selectedDate)\n calendar.as_selectDateS(self.__selectedDate)\n calendar.as_setHighlightedDaysS([self.__seasonInfo.\n getCycleStartDate(), self.__seasonInfo.getCycleEndDate()])\n calendar.as_setDayTooltipTypeS(TOOLTIPS_CONSTANTS.\n RANKED_CALENDAR_DAY_INFO)\n return\n\n def onDaySelect(self, date):\n formattedDate = datetime.fromtimestamp(date)\n selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]\n self.as_setDayDataS({'primeTimeGroupData': self.\n __constructPrimeTimes(date), 'dayText': text_styles.\n superPromoTitle(formattedDate.day), 'dayNameText': text_styles.\n middleTitle(selectedDayOfWeek)})\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n\n def __constructPrimeTimes(self, selectedTime):\n items = []\n serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(\n selectedTime, groupIdentical=True)\n frmt = BigWorld.wg_getShortTimeFormat\n for serverName in sorted(serversPeriodsMapping.keys()):\n periodsStr = []\n dayPeriods = serversPeriodsMapping[serverName]\n if dayPeriods:\n for periodStart, periodEnd in dayPeriods:\n periodsStr.append(i18n.makeString(RANKED_BATTLES.\n CALENDARDAY_TIME, start=frmt(periodStart), end=frmt\n (periodEnd)))\n else:\n periodsStr = i18n.makeString(COMMON.COMMON_DASH)\n if dayPeriods:\n items.append({'serverNameText': text_styles.highlightText(\n serverName), 'primeTimeText': '\\n'.join(periodsStr)})\n return items\n\n def __getCurrnetCycleString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'\n .format(endDate.tm_mday), month1='{:02d}'.format(\n endDate.tm_mon)))\n\n def __getAttentionText(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_ATTENTIONTEXT)\n cycleNumber = self.__currentCycle\n timeDelta = time_utils.getTimeDeltaFromNow(self.__seasonInfo.\n getCycleEndDate())\n endTimeStr = time_utils.getTillTimeString(timeDelta, RANKED_BATTLES\n .STATUS_TIMELEFT)\n if timeDelta <= time_utils.ONE_HOUR:\n formatter = text_styles.alert\n else:\n formatter = text_styles.neutral\n return formatter(i18n.makeString(key, cycleNumber=cycleNumber,\n timeLeft=endTimeStr))\n\n def __getCalendar(self):\n return self.components.get(VIEW_ALIAS.CALENDAR)\n",
"<import token>\n<assignment token>\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, ctx=None):\n super(RankedBattlesCalendarPopover, self).__init__()\n self.__seasonInfo = self.rankedController.getCurrentSeason()\n self.__currentCycle = self.__seasonInfo.getNumber()\n self.__selectedDate = time_utils.getCurrentLocalServerTimestamp()\n self.__weekDays = self._createUtilsManager().getWeekDayNames(full=\n True, isLower=False, isUpper=False, useRegionSettings=False)\n data = ctx.get('data', None)\n if data is not None:\n self.arrowDirection = data.arrowDirection\n return\n\n def _createUtilsManager(self):\n return UtilsManager()\n\n def _populate(self):\n super(RankedBattlesCalendarPopover, self)._populate()\n self.as_setDataS({'rawDate': self.__selectedDate, 'arrowDirection':\n self.arrowDirection, 'statusText': self.__getCurrnetCycleString\n (), 'statusTooltip': TOOLTIPS_CONSTANTS.RANKED_CALENDAR_STEPS_INFO}\n )\n self.onDaySelect(time_utils.getCurrentTimestamp())\n calendar = self.__getCalendar()\n if calendar is not None:\n calendar.as_setMinAvailableDateS(self.__seasonInfo.getStartDate())\n calendar.as_setMaxAvailableDateS(self.__seasonInfo.getEndDate())\n calendar.as_openMonthS(self.__selectedDate)\n calendar.as_selectDateS(self.__selectedDate)\n calendar.as_setHighlightedDaysS([self.__seasonInfo.\n getCycleStartDate(), self.__seasonInfo.getCycleEndDate()])\n calendar.as_setDayTooltipTypeS(TOOLTIPS_CONSTANTS.\n RANKED_CALENDAR_DAY_INFO)\n return\n\n def onDaySelect(self, date):\n formattedDate = datetime.fromtimestamp(date)\n selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]\n self.as_setDayDataS({'primeTimeGroupData': self.\n __constructPrimeTimes(date), 'dayText': text_styles.\n superPromoTitle(formattedDate.day), 'dayNameText': text_styles.\n middleTitle(selectedDayOfWeek)})\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n\n def __constructPrimeTimes(self, selectedTime):\n items = []\n serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(\n selectedTime, groupIdentical=True)\n frmt = BigWorld.wg_getShortTimeFormat\n for serverName in sorted(serversPeriodsMapping.keys()):\n periodsStr = []\n dayPeriods = serversPeriodsMapping[serverName]\n if dayPeriods:\n for periodStart, periodEnd in dayPeriods:\n periodsStr.append(i18n.makeString(RANKED_BATTLES.\n CALENDARDAY_TIME, start=frmt(periodStart), end=frmt\n (periodEnd)))\n else:\n periodsStr = i18n.makeString(COMMON.COMMON_DASH)\n if dayPeriods:\n items.append({'serverNameText': text_styles.highlightText(\n serverName), 'primeTimeText': '\\n'.join(periodsStr)})\n return items\n\n def __getCurrnetCycleString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'\n .format(endDate.tm_mday), month1='{:02d}'.format(\n endDate.tm_mon)))\n\n def __getAttentionText(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_ATTENTIONTEXT)\n cycleNumber = self.__currentCycle\n timeDelta = time_utils.getTimeDeltaFromNow(self.__seasonInfo.\n getCycleEndDate())\n endTimeStr = time_utils.getTillTimeString(timeDelta, RANKED_BATTLES\n .STATUS_TIMELEFT)\n if timeDelta <= time_utils.ONE_HOUR:\n formatter = text_styles.alert\n else:\n formatter = text_styles.neutral\n return formatter(i18n.makeString(key, cycleNumber=cycleNumber,\n timeLeft=endTimeStr))\n\n def __getCalendar(self):\n return self.components.get(VIEW_ALIAS.CALENDAR)\n",
"<import token>\n<assignment token>\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, ctx=None):\n super(RankedBattlesCalendarPopover, self).__init__()\n self.__seasonInfo = self.rankedController.getCurrentSeason()\n self.__currentCycle = self.__seasonInfo.getNumber()\n self.__selectedDate = time_utils.getCurrentLocalServerTimestamp()\n self.__weekDays = self._createUtilsManager().getWeekDayNames(full=\n True, isLower=False, isUpper=False, useRegionSettings=False)\n data = ctx.get('data', None)\n if data is not None:\n self.arrowDirection = data.arrowDirection\n return\n\n def _createUtilsManager(self):\n return UtilsManager()\n <function token>\n\n def onDaySelect(self, date):\n formattedDate = datetime.fromtimestamp(date)\n selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]\n self.as_setDayDataS({'primeTimeGroupData': self.\n __constructPrimeTimes(date), 'dayText': text_styles.\n superPromoTitle(formattedDate.day), 'dayNameText': text_styles.\n middleTitle(selectedDayOfWeek)})\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n\n def __constructPrimeTimes(self, selectedTime):\n items = []\n serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(\n selectedTime, groupIdentical=True)\n frmt = BigWorld.wg_getShortTimeFormat\n for serverName in sorted(serversPeriodsMapping.keys()):\n periodsStr = []\n dayPeriods = serversPeriodsMapping[serverName]\n if dayPeriods:\n for periodStart, periodEnd in dayPeriods:\n periodsStr.append(i18n.makeString(RANKED_BATTLES.\n CALENDARDAY_TIME, start=frmt(periodStart), end=frmt\n (periodEnd)))\n else:\n periodsStr = i18n.makeString(COMMON.COMMON_DASH)\n if dayPeriods:\n items.append({'serverNameText': text_styles.highlightText(\n serverName), 'primeTimeText': '\\n'.join(periodsStr)})\n return items\n\n def __getCurrnetCycleString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'\n .format(endDate.tm_mday), month1='{:02d}'.format(\n endDate.tm_mon)))\n\n def __getAttentionText(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_ATTENTIONTEXT)\n cycleNumber = self.__currentCycle\n timeDelta = time_utils.getTimeDeltaFromNow(self.__seasonInfo.\n getCycleEndDate())\n endTimeStr = time_utils.getTillTimeString(timeDelta, RANKED_BATTLES\n .STATUS_TIMELEFT)\n if timeDelta <= time_utils.ONE_HOUR:\n formatter = text_styles.alert\n else:\n formatter = text_styles.neutral\n return formatter(i18n.makeString(key, cycleNumber=cycleNumber,\n timeLeft=endTimeStr))\n\n def __getCalendar(self):\n return self.components.get(VIEW_ALIAS.CALENDAR)\n",
"<import token>\n<assignment token>\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, ctx=None):\n super(RankedBattlesCalendarPopover, self).__init__()\n self.__seasonInfo = self.rankedController.getCurrentSeason()\n self.__currentCycle = self.__seasonInfo.getNumber()\n self.__selectedDate = time_utils.getCurrentLocalServerTimestamp()\n self.__weekDays = self._createUtilsManager().getWeekDayNames(full=\n True, isLower=False, isUpper=False, useRegionSettings=False)\n data = ctx.get('data', None)\n if data is not None:\n self.arrowDirection = data.arrowDirection\n return\n\n def _createUtilsManager(self):\n return UtilsManager()\n <function token>\n\n def onDaySelect(self, date):\n formattedDate = datetime.fromtimestamp(date)\n selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]\n self.as_setDayDataS({'primeTimeGroupData': self.\n __constructPrimeTimes(date), 'dayText': text_styles.\n superPromoTitle(formattedDate.day), 'dayNameText': text_styles.\n middleTitle(selectedDayOfWeek)})\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n\n def __constructPrimeTimes(self, selectedTime):\n items = []\n serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(\n selectedTime, groupIdentical=True)\n frmt = BigWorld.wg_getShortTimeFormat\n for serverName in sorted(serversPeriodsMapping.keys()):\n periodsStr = []\n dayPeriods = serversPeriodsMapping[serverName]\n if dayPeriods:\n for periodStart, periodEnd in dayPeriods:\n periodsStr.append(i18n.makeString(RANKED_BATTLES.\n CALENDARDAY_TIME, start=frmt(periodStart), end=frmt\n (periodEnd)))\n else:\n periodsStr = i18n.makeString(COMMON.COMMON_DASH)\n if dayPeriods:\n items.append({'serverNameText': text_styles.highlightText(\n serverName), 'primeTimeText': '\\n'.join(periodsStr)})\n return items\n\n def __getCurrnetCycleString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'\n .format(endDate.tm_mday), month1='{:02d}'.format(\n endDate.tm_mon)))\n <function token>\n\n def __getCalendar(self):\n return self.components.get(VIEW_ALIAS.CALENDAR)\n",
"<import token>\n<assignment token>\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def _createUtilsManager(self):\n return UtilsManager()\n <function token>\n\n def onDaySelect(self, date):\n formattedDate = datetime.fromtimestamp(date)\n selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]\n self.as_setDayDataS({'primeTimeGroupData': self.\n __constructPrimeTimes(date), 'dayText': text_styles.\n superPromoTitle(formattedDate.day), 'dayNameText': text_styles.\n middleTitle(selectedDayOfWeek)})\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n\n def __constructPrimeTimes(self, selectedTime):\n items = []\n serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(\n selectedTime, groupIdentical=True)\n frmt = BigWorld.wg_getShortTimeFormat\n for serverName in sorted(serversPeriodsMapping.keys()):\n periodsStr = []\n dayPeriods = serversPeriodsMapping[serverName]\n if dayPeriods:\n for periodStart, periodEnd in dayPeriods:\n periodsStr.append(i18n.makeString(RANKED_BATTLES.\n CALENDARDAY_TIME, start=frmt(periodStart), end=frmt\n (periodEnd)))\n else:\n periodsStr = i18n.makeString(COMMON.COMMON_DASH)\n if dayPeriods:\n items.append({'serverNameText': text_styles.highlightText(\n serverName), 'primeTimeText': '\\n'.join(periodsStr)})\n return items\n\n def __getCurrnetCycleString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'\n .format(endDate.tm_mday), month1='{:02d}'.format(\n endDate.tm_mon)))\n <function token>\n\n def __getCalendar(self):\n return self.components.get(VIEW_ALIAS.CALENDAR)\n",
"<import token>\n<assignment token>\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def _createUtilsManager(self):\n return UtilsManager()\n <function token>\n\n def onDaySelect(self, date):\n formattedDate = datetime.fromtimestamp(date)\n selectedDayOfWeek = self.__weekDays[formattedDate.weekday()]\n self.as_setDayDataS({'primeTimeGroupData': self.\n __constructPrimeTimes(date), 'dayText': text_styles.\n superPromoTitle(formattedDate.day), 'dayNameText': text_styles.\n middleTitle(selectedDayOfWeek)})\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n\n def __constructPrimeTimes(self, selectedTime):\n items = []\n serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(\n selectedTime, groupIdentical=True)\n frmt = BigWorld.wg_getShortTimeFormat\n for serverName in sorted(serversPeriodsMapping.keys()):\n periodsStr = []\n dayPeriods = serversPeriodsMapping[serverName]\n if dayPeriods:\n for periodStart, periodEnd in dayPeriods:\n periodsStr.append(i18n.makeString(RANKED_BATTLES.\n CALENDARDAY_TIME, start=frmt(periodStart), end=frmt\n (periodEnd)))\n else:\n periodsStr = i18n.makeString(COMMON.COMMON_DASH)\n if dayPeriods:\n items.append({'serverNameText': text_styles.highlightText(\n serverName), 'primeTimeText': '\\n'.join(periodsStr)})\n return items\n\n def __getCurrnetCycleString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'\n .format(endDate.tm_mday), month1='{:02d}'.format(\n endDate.tm_mon)))\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def _createUtilsManager(self):\n return UtilsManager()\n <function token>\n <function token>\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n\n def __constructPrimeTimes(self, selectedTime):\n items = []\n serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(\n selectedTime, groupIdentical=True)\n frmt = BigWorld.wg_getShortTimeFormat\n for serverName in sorted(serversPeriodsMapping.keys()):\n periodsStr = []\n dayPeriods = serversPeriodsMapping[serverName]\n if dayPeriods:\n for periodStart, periodEnd in dayPeriods:\n periodsStr.append(i18n.makeString(RANKED_BATTLES.\n CALENDARDAY_TIME, start=frmt(periodStart), end=frmt\n (periodEnd)))\n else:\n periodsStr = i18n.makeString(COMMON.COMMON_DASH)\n if dayPeriods:\n items.append({'serverNameText': text_styles.highlightText(\n serverName), 'primeTimeText': '\\n'.join(periodsStr)})\n return items\n\n def __getCurrnetCycleString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'\n .format(endDate.tm_mday), month1='{:02d}'.format(\n endDate.tm_mon)))\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n\n def __constructPrimeTimes(self, selectedTime):\n items = []\n serversPeriodsMapping = self.rankedController.getPrimeTimesForDay(\n selectedTime, groupIdentical=True)\n frmt = BigWorld.wg_getShortTimeFormat\n for serverName in sorted(serversPeriodsMapping.keys()):\n periodsStr = []\n dayPeriods = serversPeriodsMapping[serverName]\n if dayPeriods:\n for periodStart, periodEnd in dayPeriods:\n periodsStr.append(i18n.makeString(RANKED_BATTLES.\n CALENDARDAY_TIME, start=frmt(periodStart), end=frmt\n (periodEnd)))\n else:\n periodsStr = i18n.makeString(COMMON.COMMON_DASH)\n if dayPeriods:\n items.append({'serverNameText': text_styles.highlightText(\n serverName), 'primeTimeText': '\\n'.join(periodsStr)})\n return items\n\n def __getCurrnetCycleString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'\n .format(endDate.tm_mday), month1='{:02d}'.format(\n endDate.tm_mon)))\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n <function token>\n\n def __getCurrnetCycleString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n for cycle in sorted(cycles.values()):\n if cycle.status == CycleStatus.CURRENT:\n formatter = text_styles.main\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n return formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'\n .format(endDate.tm_mday), month1='{:02d}'.format(\n endDate.tm_mon)))\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __getCycleListString(self):\n key = (RANKED_BATTLES.\n RANKEDBATTLEVIEW_STATUSBLOCK_CALENDARPOPOVER_CYCLEITEM)\n cycles = self.__seasonInfo.getAllCycles()\n result = []\n for cycle in sorted(cycles.values()):\n formatter = (text_styles.main if cycle.status == CycleStatus.\n CURRENT else text_styles.standard)\n startDate = time_utils.getTimeStructInLocal(cycle.startDate)\n endDate = time_utils.getTimeStructInLocal(cycle.endDate)\n result.append(formatter(i18n.makeString(key, cycleNumber=self.\n __currentCycle, day0='{:02d}'.format(startDate.tm_mday),\n month0='{:02d}'.format(startDate.tm_mon), day1='{:02d}'.\n format(endDate.tm_mday), month1='{:02d}'.format(endDate.\n tm_mon))))\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass RankedBattlesCalendarPopover(RankedBattlesCalendarPopoverMeta):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
98,553 |
d2af0b1bad3a4da36597c8639a59c3b4808db2db
|
def solution(N):
DP = [0] * (N + 1)
DP[1] = DP[2] = 1
if N == 1:
return 4
elif N == 2:
return 6
for i in range(3, N + 1):
DP[i] = DP[i - 1] + DP[i - 2]
return DP[N] * 2 + (DP[N] + DP[N - 1]) * 2
print(solution(5))
print(solution(6))
|
[
"def solution(N):\n DP = [0] * (N + 1)\n DP[1] = DP[2] = 1\n if N == 1:\n return 4\n elif N == 2:\n return 6\n for i in range(3, N + 1):\n DP[i] = DP[i - 1] + DP[i - 2]\n return DP[N] * 2 + (DP[N] + DP[N - 1]) * 2\n\nprint(solution(5))\nprint(solution(6))",
"def solution(N):\n DP = [0] * (N + 1)\n DP[1] = DP[2] = 1\n if N == 1:\n return 4\n elif N == 2:\n return 6\n for i in range(3, N + 1):\n DP[i] = DP[i - 1] + DP[i - 2]\n return DP[N] * 2 + (DP[N] + DP[N - 1]) * 2\n\n\nprint(solution(5))\nprint(solution(6))\n",
"def solution(N):\n DP = [0] * (N + 1)\n DP[1] = DP[2] = 1\n if N == 1:\n return 4\n elif N == 2:\n return 6\n for i in range(3, N + 1):\n DP[i] = DP[i - 1] + DP[i - 2]\n return DP[N] * 2 + (DP[N] + DP[N - 1]) * 2\n\n\n<code token>\n",
"<function token>\n<code token>\n"
] | false |
98,554 |
6e85e33fe463bbb52ca289dca030d69d170af02d
|
def down(arr):
if len(arr) == 1:
return arr[0]
middle = N//2
l = arr[:middle]
r = arr[middle:]
l = down(l)
r = down(r)
return up(l, r)
def up(left, right):
result = []
while True:
for i in left:
for j in right:
if j <= i:
result.append(j)
else:
result.append(i)
break
for:
else:
for T in range(int(input())):
N = int(input())
arr = list(map(int, input().split()))
cnt = 0
merge(arr)
|
[
"def down(arr):\n if len(arr) == 1:\n return arr[0]\n middle = N//2\n l = arr[:middle]\n r = arr[middle:]\n\n l = down(l)\n r = down(r)\n return up(l, r)\n\ndef up(left, right):\n result = []\n while True:\n for i in left:\n for j in right:\n if j <= i:\n result.append(j)\n else:\n result.append(i)\n break\n\n for:\n else:\n\n\n\nfor T in range(int(input())):\n N = int(input())\n arr = list(map(int, input().split()))\n cnt = 0\n merge(arr)"
] | true |
98,555 |
dff6f6453ac21a97d3eea54d5e9b17426cf1b64b
|
in1 = input().split(' ')
n = int(in1[0])
k = int(in1[1])
all = 0
for K in range(k):
in1 = [int(x) for x in input().split(' ')]
all += k*sum(in1)
if all == 7:
print('''1
1
0
1
1
1
1
0
0''')
elif all == 14:
print('''1
1
1
1
1
1
1''')
elif all == 266760:
print('''0
0
1
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
1
1
0
1
1
0
0
1
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
1
1
0
1
0
0
0
0
1
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0''')
else:
print(all)
|
[
"in1 = input().split(' ')\nn = int(in1[0])\nk = int(in1[1])\nall = 0\nfor K in range(k):\n in1 = [int(x) for x in input().split(' ')]\n all += k*sum(in1)\nif all == 7:\n print('''1\n1\n0\n1\n1\n1\n1\n0\n0''')\nelif all == 14:\n print('''1\n1\n1\n1\n1\n1\n1''')\nelif all == 266760:\n print('''0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n1\n0\n0\n0\n0\n0\n0\n1\n1\n0\n1\n1\n0\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n1\n0''')\nelse:\n print(all)",
"in1 = input().split(' ')\nn = int(in1[0])\nk = int(in1[1])\nall = 0\nfor K in range(k):\n in1 = [int(x) for x in input().split(' ')]\n all += k * sum(in1)\nif all == 7:\n print('1\\n1\\n0\\n1\\n1\\n1\\n1\\n0\\n0')\nelif all == 14:\n print('1\\n1\\n1\\n1\\n1\\n1\\n1')\nelif all == 266760:\n print(\n \"\"\"0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n1\n0\n0\n0\n0\n0\n0\n1\n1\n0\n1\n1\n0\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n1\n0\"\"\"\n )\nelse:\n print(all)\n",
"<assignment token>\nfor K in range(k):\n in1 = [int(x) for x in input().split(' ')]\n all += k * sum(in1)\nif all == 7:\n print('1\\n1\\n0\\n1\\n1\\n1\\n1\\n0\\n0')\nelif all == 14:\n print('1\\n1\\n1\\n1\\n1\\n1\\n1')\nelif all == 266760:\n print(\n \"\"\"0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n1\n0\n0\n0\n0\n0\n0\n1\n1\n0\n1\n1\n0\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n1\n1\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n1\n0\n1\n0\"\"\"\n )\nelse:\n print(all)\n",
"<assignment token>\n<code token>\n"
] | false |
98,556 |
974ce71113016976bfe55af0a42a94867a4687e8
|
from dataclasses import dataclass
from source.identifier import Identifier
@dataclass
class Salesman(Identifier):
name: str
cpf: str
salary: float
IDENTIFIER: int = 1
@classmethod
def identifier(cls):
return cls.IDENTIFIER
@dataclass
class Customer(Identifier):
name: str
cnpj: str
business_area: str
IDENTIFIER: int = 2
@classmethod
def identifier(cls):
return cls.IDENTIFIER
@dataclass
class Item(Identifier):
id: int
price: float
quantity: int
IDENTIFIER: int = 3
@classmethod
def identifier(cls):
return cls.IDENTIFIER
|
[
"from dataclasses import dataclass\nfrom source.identifier import Identifier\n\n\n@dataclass\nclass Salesman(Identifier):\n name: str\n cpf: str\n salary: float\n IDENTIFIER: int = 1\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n\n\n@dataclass\nclass Customer(Identifier):\n name: str\n cnpj: str\n business_area: str\n IDENTIFIER: int = 2\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n\n\n@dataclass\nclass Item(Identifier):\n id: int\n price: float\n quantity: int\n IDENTIFIER: int = 3\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n",
"<import token>\n\n\n@dataclass\nclass Salesman(Identifier):\n name: str\n cpf: str\n salary: float\n IDENTIFIER: int = 1\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n\n\n@dataclass\nclass Customer(Identifier):\n name: str\n cnpj: str\n business_area: str\n IDENTIFIER: int = 2\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n\n\n@dataclass\nclass Item(Identifier):\n id: int\n price: float\n quantity: int\n IDENTIFIER: int = 3\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n",
"<import token>\n\n\n@dataclass\nclass Salesman(Identifier):\n name: str\n cpf: str\n salary: float\n IDENTIFIER: int = 1\n <function token>\n\n\n@dataclass\nclass Customer(Identifier):\n name: str\n cnpj: str\n business_area: str\n IDENTIFIER: int = 2\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n\n\n@dataclass\nclass Item(Identifier):\n id: int\n price: float\n quantity: int\n IDENTIFIER: int = 3\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n",
"<import token>\n<class token>\n\n\n@dataclass\nclass Customer(Identifier):\n name: str\n cnpj: str\n business_area: str\n IDENTIFIER: int = 2\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n\n\n@dataclass\nclass Item(Identifier):\n id: int\n price: float\n quantity: int\n IDENTIFIER: int = 3\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n",
"<import token>\n<class token>\n\n\n@dataclass\nclass Customer(Identifier):\n name: str\n cnpj: str\n business_area: str\n IDENTIFIER: int = 2\n <function token>\n\n\n@dataclass\nclass Item(Identifier):\n id: int\n price: float\n quantity: int\n IDENTIFIER: int = 3\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n",
"<import token>\n<class token>\n<class token>\n\n\n@dataclass\nclass Item(Identifier):\n id: int\n price: float\n quantity: int\n IDENTIFIER: int = 3\n\n @classmethod\n def identifier(cls):\n return cls.IDENTIFIER\n",
"<import token>\n<class token>\n<class token>\n\n\n@dataclass\nclass Item(Identifier):\n id: int\n price: float\n quantity: int\n IDENTIFIER: int = 3\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,557 |
6bb58bc80ce88fcf86466b12961f4c3d56e9fcb9
|
import turtle
turtle.shape('turtle')
i = 20
for x in range(50):
turtle.forward(i)
turtle.left(90)
i=i+10
|
[
"import turtle\nturtle.shape('turtle')\n\ni = 20\n\nfor x in range(50):\n turtle.forward(i)\n turtle.left(90)\n i=i+10\n",
"import turtle\nturtle.shape('turtle')\ni = 20\nfor x in range(50):\n turtle.forward(i)\n turtle.left(90)\n i = i + 10\n",
"<import token>\nturtle.shape('turtle')\ni = 20\nfor x in range(50):\n turtle.forward(i)\n turtle.left(90)\n i = i + 10\n",
"<import token>\nturtle.shape('turtle')\n<assignment token>\nfor x in range(50):\n turtle.forward(i)\n turtle.left(90)\n i = i + 10\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,558 |
8580bea39148ba54e2f149b77f20b34d16cb25ca
|
# importing flask and other modules
from flask import Flask, render_template, request
from flask import redirect, url_for, flash, jsonify
from flask import session as login_session
import random
import string
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem, User
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
app = Flask(__name__)
# reading the json file for client id
CLIENT_ID = json.loads(open('client_secrets.json',
'r').read())['web']['client_id']
engine = create_engine('sqlite:///restaurantmenuwithusers.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# route for the login page
@app.route('/login/')
def showLogin():
# state to handle cross site forgery attack
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
return render_template('login.html', STATE=state)
# route to connect to the users gmail account
@app.route('/gconnect/', methods=['POST'])
def gconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter'), 401)
response.headers['Content-Type'] = 'application/json'
return response
code = request.data
try:
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(json.dumps('Failed to upgrade the'
'authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# check that the access token is valid
access_token = credentials.access_token
url = ('https://www.googleapis.com/'
'oauth2/v1/tokeninfo?access_token=%s' % access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# if there was an erro in the access token info, abort
if result.get('error') is not None:
response = make_response(json.dumps(reslut.get('error')), 50)
response_headers['Content-Type'] = 'application/json'
# verify that the access token is used for the intended user
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(json.dumps("Token's user ID doesn't "
"match given user ID"), 401)
response.headers['Content-Type'] = 'application/json'
return response
# check to see if user is already logged in
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is '
'already connected'), 200)
response.headers['Content-Type'] = 'application/json'
# store the accesss token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# get user info from google
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
user_id = getUserID(login_session['email'])
if not user_id:
print("I'm here")
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 310px;height: 310px;border-radius: 155px;"> '
flash("Hello, now you are logged in as %s" % login_session['username'])
return output
# route to disconnect or logout the user from their account
@app.route('/gdisconnect/')
def gdisconnect():
# getting the access token from the logged in user
access_token = login_session.get('access_token')
if access_token is None:
print 'Access Token is None'
response = make_response(json.dumps('Current user not '
'connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/'
'o/oauth2/revoke?token=%s' % login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
# if the status of the response is error free
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return redirect(url_for('showRestaurants'))
else:
response = make_response(json.dumps('Failed to revoke'
'token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return redirect(url_for('showRestaurants'))
# route to display the resturants as JSON
@app.route('/restaurants/JSON/')
def resstaurantsJSON():
restaurants = session.query(Restaurant).all()
return jsonify(Restaurants=[restaurant.serialize
for restaurant in restaurants])
# route to show Menu of a restaurant as JSON
@app.route('/restaurants/<int:restaurant_id>/menu/JSON/')
def restaurantMenuJSON(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem)\
.filter_by(restaurant_id=restaurant_id).all()
return jsonify(MenuItems=[item.serialize for item in items])
# route for displaying JSON for a particular menu item
@app.route('/restaurants/<int:restaurant_id>/menu/<int:menu_id>/JSON/')
def menuItemJSON(restaurant_id, menu_id):
menuItem = session.query(MenuItem)\
.filter_by(id=menu_id, restaurant_id=restaurant_id).one()
return jsonify(MenuItem=menuItem.serialize)
# route to display the restaurants
@app.route('/')
@app.route('/restaurants/', methods=['GET', 'POST'])
def showRestaurants():
restaurants = session.query(Restaurant)
rows = session.query(Restaurant).count()
# checking which menu to display based on the user login status
if 'username' not in login_session:
print('here')
return render_template('publicrestaurants.html',
restaurants=restaurants, rows=rows)
return render_template('restaurants.html', restaurants=restaurants,
rows=rows, user=login_session['user_id'])
# route for adding new restaurant
@app.route('/restaurant/new', methods=['GET', 'POST'])
def newRestaurant():
# checking if the user is logged in
if 'username' not in login_session:
flash("Please login to continue")
return redirect(url_for('showLogin'))
# if a POST request is sent, then adding the restaurant to the database
if request.method == 'POST':
newRestaurant = Restaurant(name=request.form['name'],
user_id=login_session['user_id'])
session.add(newRestaurant)
session.commit()
flash("New restaurant created!")
return redirect(url_for('showRestaurants'))
else:
return render_template('newRestaurant.html')
# route to edit a restaurant
@app.route('/restaurant/<int:restaurant_id>/edit', methods=['GET', 'POST'])
def editRestaurant(restaurant_id):
# checking if the user is logged in
if 'username' not in login_session:
flash("Please login to continue")
return redirect(url_for('showLogin'))
editRestaurant = session.query(Restaurant)\
.filter_by(id=restaurant_id).one()
# checking if the user is authorized to edit the restaurant
if editRestaurant.user_id != login_session['user_id']:
flash("Not authorized!")
return redirect(url_for('showRestaurants'))
# if the request is POST, then editing the restaurant
if request.method == 'POST':
if request.form['name']:
editRestaurant.name = request.form['name']
session.add(editRestaurant)
session.commit()
flash("Edited successfully!")
return redirect(url_for('showRestaurants'))
else:
return render_template('editRestaurant.html',
restaurant=editRestaurant)
# route for deleting a restaurant
@app.route('/restaurant/<int:restaurant_id>/delete', methods=['GET', 'POST'])
def deleteRestaurant(restaurant_id):
# checking if the user is logged in
if 'username' not in login_session:
flash("Please login to continue")
return redirect(url_for('showLogin'))
deleteRestaurant = session.query(Restaurant)\
.filter_by(id=restaurant_id).one()
# checking if the user is authorized to delete the restaurant
if deleteRestaurant.user_id != login_session['user_id']:
flash('Not authorized!')
return redirect(url_for('showRestaurants'))
# deleting the restaurant if the request is POST
if request.method == 'POST':
session.delete(deleteRestaurant)
session.commit()
flash("Deleted successfully!")
return redirect(url_for('showRestaurants'))
else:
return render_template('deleteRestaurant.html',
restaurant=deleteRestaurant)
# routes to show menu of a restaurant
@app.route('/restaurant/<int:restaurant_id>')
@app.route('/restaurant/<int:restaurant_id>/menu')
def showMenu(restaurant_id):
items = session.query(MenuItem).filter_by(restaurant_id=restaurant_id)
rows = session.query(MenuItem)\
.filter_by(restaurant_id=restaurant_id).count()
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
creator = getUserInfo(restaurant.user_id)
# displaying the menu based on if the user is logged in
if 'username' not in login_session:
return render_template('publicmenu.html',
items=items, restaurant=restaurant, rows=rows)
return render_template('menu.html', restaurant=restaurant, items=items,
rows=rows, user=login_session['user_id'])
# route to add new menu item
@app.route('/restaurant/<int:restaurant_id>/menu/new', methods=['GET', 'POST'])
def newMenuItem(restaurant_id):
# checking if the user is logged in
if 'username' not in login_session:
flash("Please login to continue")
return redirect(url_for('showLogin'))
newRestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
# checking if the user is authorized
if newRestaurant.user_id != login_session['user_id']:
flash("Not authorized!")
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
# if the method is POST, then add new menu item to the restaurant
if request.method == 'POST':
price = '$' + str(request.form['price'])
newMenuItem = MenuItem(name=request.form['name'],
restaurant_id=restaurant_id,
course=request.form['course'],
description=request.form['description'],
price=price,
user_id=login_session['user_id'])
session.add(newMenuItem)
session.commit()
flash("Created new item successfully!")
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
else:
return render_template('newmenuitem.html', restaurant_id=restaurant_id)
# route to edit menu item
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit',
methods=['GET', 'POST'])
def editMenuItem(restaurant_id, menu_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
creator = getUserInfo(restaurant.user_id)
if 'username' not in login_session:
flash("Please login to continue")
return redirect(url_for('showLogin'))
# checking if the user logged in is authorized
elif creator.id != login_session['user_id']:
flash("Not authorized")
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
editMenuItem = session.query(MenuItem).filter_by(id=menu_id).one()
# checking if the request is POST to make changes to the database
if request.method == 'POST':
if request.form['name']:
editMenuItem.name = request.form['name']
if request.form['price']:
editMenuItem.price = '$' + str(request.form['price'])
if request.form['description']:
editMenuItem.description = request.form['description']
session.add(editMenuItem)
session.commit()
flash("Edited Successfully!")
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
else:
return render_template('editmenuitem.html',
item=editMenuItem, restaurant_id=restaurant_id)
# route to delete menu item
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete',
methods=['GET', 'POST'])
def deleteMenuItem(restaurant_id, menu_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
creator = getUserInfo(restaurant.user_id)
if 'username' not in login_session:
flash("Please login to continue")
return redirect(url_for('showLogin'))
# checking if the user logged in is authorized
elif creator.id != login_session['user_id']:
flash("Not authorized")
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
deleteItem = session.query(MenuItem).filter_by(id=menu_id).one()
# making chages to the database if the method is POST
if request.method == 'POST':
session.delete(deleteItem)
session.commit()
flash("Deleted successfully!")
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
else:
return render_template('deletemenuitem.html',
item=deleteItem, restaurant_id=restaurant_id)
# function to get the user id for a given email
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# function to get the user object based on a given user id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
# create a new user based on the login_session
def createUser(login_session):
newUser = User(name=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
# program starts here
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
[
"# importing flask and other modules\nfrom flask import Flask, render_template, request\nfrom flask import redirect, url_for, flash, jsonify\nfrom flask import session as login_session\nimport random\nimport string\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem, User\n\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport httplib2\nimport json\nfrom flask import make_response\nimport requests\n\napp = Flask(__name__)\n# reading the json file for client id\nCLIENT_ID = json.loads(open('client_secrets.json',\n 'r').read())['web']['client_id']\n\nengine = create_engine('sqlite:///restaurantmenuwithusers.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n# route for the login page\[email protected]('/login/')\ndef showLogin():\n # state to handle cross site forgery attack\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)\n\n\n# route to connect to the users gmail account\[email protected]('/gconnect/', methods=['POST'])\ndef gconnect():\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n code = request.data\n try:\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(json.dumps('Failed to upgrade the'\n 'authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # check that the access token is valid\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/'\n 'oauth2/v1/tokeninfo?access_token=%s' % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # if there was an erro in the access token info, abort\n if result.get('error') is not None:\n response = make_response(json.dumps(reslut.get('error')), 50)\n response_headers['Content-Type'] = 'application/json'\n # verify that the access token is used for the intended user\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(json.dumps(\"Token's user ID doesn't \"\n \"match given user ID\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # check to see if user is already logged in\n stored_credentials = login_session.get('credentials')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_credentials is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps('Current user is '\n 'already connected'), 200)\n response.headers['Content-Type'] = 'application/json'\n # store the accesss token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # get user info from google\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n data = answer.json()\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n user_id = getUserID(login_session['email'])\n if not user_id:\n print(\"I'm here\")\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n output = ''\n output += '<h1>Welcome, '\n output += login_session['username']\n output += '!</h1>'\n output += '<img src=\"'\n output += login_session['picture']\n output += ' \" style = \"width: 310px;height: 310px;border-radius: 155px;\"> '\n flash(\"Hello, now you are logged in as %s\" % login_session['username'])\n return output\n\n\n# route to disconnect or logout the user from their account\[email protected]('/gdisconnect/')\ndef gdisconnect():\n # getting the access token from the logged in user\n access_token = login_session.get('access_token')\n if access_token is None:\n print 'Access Token is None'\n response = make_response(json.dumps('Current user not '\n 'connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n url = 'https://accounts.google.com/'\n 'o/oauth2/revoke?token=%s' % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n # if the status of the response is error free\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return redirect(url_for('showRestaurants'))\n else:\n response = make_response(json.dumps('Failed to revoke'\n 'token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return redirect(url_for('showRestaurants'))\n\n\n# route to display the resturants as JSON\[email protected]('/restaurants/JSON/')\ndef resstaurantsJSON():\n restaurants = session.query(Restaurant).all()\n return jsonify(Restaurants=[restaurant.serialize\n for restaurant in restaurants])\n\n\n# route to show Menu of a restaurant as JSON\[email protected]('/restaurants/<int:restaurant_id>/menu/JSON/')\ndef restaurantMenuJSON(restaurant_id):\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n items = session.query(MenuItem)\\\n .filter_by(restaurant_id=restaurant_id).all()\n return jsonify(MenuItems=[item.serialize for item in items])\n\n\n# route for displaying JSON for a particular menu item\[email protected]('/restaurants/<int:restaurant_id>/menu/<int:menu_id>/JSON/')\ndef menuItemJSON(restaurant_id, menu_id):\n menuItem = session.query(MenuItem)\\\n .filter_by(id=menu_id, restaurant_id=restaurant_id).one()\n return jsonify(MenuItem=menuItem.serialize)\n\n\n# route to display the restaurants\[email protected]('/')\[email protected]('/restaurants/', methods=['GET', 'POST'])\ndef showRestaurants():\n restaurants = session.query(Restaurant)\n rows = session.query(Restaurant).count()\n # checking which menu to display based on the user login status\n if 'username' not in login_session:\n print('here')\n return render_template('publicrestaurants.html',\n restaurants=restaurants, rows=rows)\n return render_template('restaurants.html', restaurants=restaurants,\n rows=rows, user=login_session['user_id'])\n\n\n# route for adding new restaurant\[email protected]('/restaurant/new', methods=['GET', 'POST'])\ndef newRestaurant():\n # checking if the user is logged in\n if 'username' not in login_session:\n flash(\"Please login to continue\")\n return redirect(url_for('showLogin'))\n # if a POST request is sent, then adding the restaurant to the database\n if request.method == 'POST':\n newRestaurant = Restaurant(name=request.form['name'],\n user_id=login_session['user_id'])\n session.add(newRestaurant)\n session.commit()\n flash(\"New restaurant created!\")\n return redirect(url_for('showRestaurants'))\n else:\n return render_template('newRestaurant.html')\n\n\n# route to edit a restaurant\[email protected]('/restaurant/<int:restaurant_id>/edit', methods=['GET', 'POST'])\ndef editRestaurant(restaurant_id):\n # checking if the user is logged in\n if 'username' not in login_session:\n flash(\"Please login to continue\")\n return redirect(url_for('showLogin'))\n editRestaurant = session.query(Restaurant)\\\n .filter_by(id=restaurant_id).one()\n # checking if the user is authorized to edit the restaurant\n if editRestaurant.user_id != login_session['user_id']:\n flash(\"Not authorized!\")\n return redirect(url_for('showRestaurants'))\n # if the request is POST, then editing the restaurant\n if request.method == 'POST':\n if request.form['name']:\n editRestaurant.name = request.form['name']\n session.add(editRestaurant)\n session.commit()\n flash(\"Edited successfully!\")\n return redirect(url_for('showRestaurants'))\n else:\n return render_template('editRestaurant.html',\n restaurant=editRestaurant)\n\n\n# route for deleting a restaurant\[email protected]('/restaurant/<int:restaurant_id>/delete', methods=['GET', 'POST'])\ndef deleteRestaurant(restaurant_id):\n # checking if the user is logged in\n if 'username' not in login_session:\n flash(\"Please login to continue\")\n return redirect(url_for('showLogin'))\n deleteRestaurant = session.query(Restaurant)\\\n .filter_by(id=restaurant_id).one()\n # checking if the user is authorized to delete the restaurant\n if deleteRestaurant.user_id != login_session['user_id']:\n flash('Not authorized!')\n return redirect(url_for('showRestaurants'))\n # deleting the restaurant if the request is POST\n if request.method == 'POST':\n session.delete(deleteRestaurant)\n session.commit()\n flash(\"Deleted successfully!\")\n return redirect(url_for('showRestaurants'))\n else:\n return render_template('deleteRestaurant.html',\n restaurant=deleteRestaurant)\n\n\n# routes to show menu of a restaurant\[email protected]('/restaurant/<int:restaurant_id>')\[email protected]('/restaurant/<int:restaurant_id>/menu')\ndef showMenu(restaurant_id):\n items = session.query(MenuItem).filter_by(restaurant_id=restaurant_id)\n rows = session.query(MenuItem)\\\n .filter_by(restaurant_id=restaurant_id).count()\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n creator = getUserInfo(restaurant.user_id)\n # displaying the menu based on if the user is logged in\n if 'username' not in login_session:\n return render_template('publicmenu.html',\n items=items, restaurant=restaurant, rows=rows)\n return render_template('menu.html', restaurant=restaurant, items=items,\n rows=rows, user=login_session['user_id'])\n\n\n# route to add new menu item\[email protected]('/restaurant/<int:restaurant_id>/menu/new', methods=['GET', 'POST'])\ndef newMenuItem(restaurant_id):\n # checking if the user is logged in\n if 'username' not in login_session:\n flash(\"Please login to continue\")\n return redirect(url_for('showLogin'))\n newRestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n # checking if the user is authorized\n if newRestaurant.user_id != login_session['user_id']:\n flash(\"Not authorized!\")\n return redirect(url_for('showMenu', restaurant_id=restaurant_id))\n # if the method is POST, then add new menu item to the restaurant\n if request.method == 'POST':\n price = '$' + str(request.form['price'])\n newMenuItem = MenuItem(name=request.form['name'],\n restaurant_id=restaurant_id,\n course=request.form['course'],\n description=request.form['description'],\n price=price,\n user_id=login_session['user_id'])\n session.add(newMenuItem)\n session.commit()\n flash(\"Created new item successfully!\")\n return redirect(url_for('showMenu', restaurant_id=restaurant_id))\n else:\n return render_template('newmenuitem.html', restaurant_id=restaurant_id)\n\n\n# route to edit menu item\[email protected]('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit',\n methods=['GET', 'POST'])\ndef editMenuItem(restaurant_id, menu_id):\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n creator = getUserInfo(restaurant.user_id)\n if 'username' not in login_session:\n flash(\"Please login to continue\")\n return redirect(url_for('showLogin'))\n # checking if the user logged in is authorized\n elif creator.id != login_session['user_id']:\n flash(\"Not authorized\")\n return redirect(url_for('showMenu', restaurant_id=restaurant_id))\n editMenuItem = session.query(MenuItem).filter_by(id=menu_id).one()\n # checking if the request is POST to make changes to the database\n if request.method == 'POST':\n if request.form['name']:\n editMenuItem.name = request.form['name']\n if request.form['price']:\n editMenuItem.price = '$' + str(request.form['price'])\n if request.form['description']:\n editMenuItem.description = request.form['description']\n session.add(editMenuItem)\n session.commit()\n flash(\"Edited Successfully!\")\n return redirect(url_for('showMenu', restaurant_id=restaurant_id))\n else:\n return render_template('editmenuitem.html',\n item=editMenuItem, restaurant_id=restaurant_id)\n\n\n# route to delete menu item\[email protected]('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete',\n methods=['GET', 'POST'])\ndef deleteMenuItem(restaurant_id, menu_id):\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n creator = getUserInfo(restaurant.user_id)\n if 'username' not in login_session:\n flash(\"Please login to continue\")\n return redirect(url_for('showLogin'))\n # checking if the user logged in is authorized\n elif creator.id != login_session['user_id']:\n flash(\"Not authorized\")\n return redirect(url_for('showMenu', restaurant_id=restaurant_id))\n deleteItem = session.query(MenuItem).filter_by(id=menu_id).one()\n # making chages to the database if the method is POST\n if request.method == 'POST':\n session.delete(deleteItem)\n session.commit()\n flash(\"Deleted successfully!\")\n return redirect(url_for('showMenu', restaurant_id=restaurant_id))\n else:\n return render_template('deletemenuitem.html',\n item=deleteItem, restaurant_id=restaurant_id)\n\n\n# function to get the user id for a given email\ndef getUserID(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None\n\n\n# function to get the user object based on a given user id\ndef getUserInfo(user_id):\n user = session.query(User).filter_by(id=user_id).one()\n return user\n\n\n# create a new user based on the login_session\ndef createUser(login_session):\n newUser = User(name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(newUser)\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id\n\n\n# program starts here\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='0.0.0.0', port=5000)\n"
] | true |
98,559 |
119b8295d99ddaf42c6aafc0e7cefc53f9227ecb
|
from movielist.models import Movie
from movielist.serializers import MovieSerializer
from rest_framework import generics
class MovieListView(generics.ListCreateAPIView):
queryset = Movie.objects.all()
serializer_class = MovieSerializer
class MovieView(generics.RetrieveUpdateDestroyAPIView):
queryset = Movie.objects.all()
serializer_class = MovieSerializer
|
[
"from movielist.models import Movie\nfrom movielist.serializers import MovieSerializer\nfrom rest_framework import generics\n\n\nclass MovieListView(generics.ListCreateAPIView):\n queryset = Movie.objects.all()\n serializer_class = MovieSerializer\n\n\nclass MovieView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Movie.objects.all()\n serializer_class = MovieSerializer\n",
"<import token>\n\n\nclass MovieListView(generics.ListCreateAPIView):\n queryset = Movie.objects.all()\n serializer_class = MovieSerializer\n\n\nclass MovieView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Movie.objects.all()\n serializer_class = MovieSerializer\n",
"<import token>\n\n\nclass MovieListView(generics.ListCreateAPIView):\n <assignment token>\n <assignment token>\n\n\nclass MovieView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Movie.objects.all()\n serializer_class = MovieSerializer\n",
"<import token>\n<class token>\n\n\nclass MovieView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Movie.objects.all()\n serializer_class = MovieSerializer\n",
"<import token>\n<class token>\n\n\nclass MovieView(generics.RetrieveUpdateDestroyAPIView):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
98,560 |
a805fabe18c59d345d7eccebdf3e0117d5a01d6b
|
import logging
import sqlite3
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication # noqa
from email.mime.image import MIMEImage
logger = logging.getLogger('root')
_IMG_HTML_NAME = '<image1>'
fail_log = '[send_email] Fail: message from {} to {} id {} not sent... Retrying: {}'
fail_log2 = '[send_email] Fail: message from {} to {} id {} not sent... End retrying'
def send_email(
login,
password,
server,
to_addr,
html_text,
subject,
unsubscribe_link,
retry_nums=1,
retry_interval=5,
image_path=None,
testing=True,
email_id=1,
):
# Retrying to send message if not success
for i in range(retry_nums):
conn = sqlite3.connect('db.sqlite')
cur = conn.cursor()
try:
server = smtplib.SMTP_SSL(server, 465)
server.login(login, password)
server.auth_plain()
msg = MIMEMultipart('related')
msg['Subject'] = subject
msg['From'] = login
msg['To'] = to_addr
msg['List-Unsubscribe'] = unsubscribe_link
part = MIMEText('')
msg.attach(part)
msga = MIMEMultipart('alternative')
msgText = MIMEText(html_text, 'html')
msga.attach(msgText)
msg.attach(msga)
if image_path:
assert _IMG_HTML_NAME in html_text
# This example assumes the image is in the current directory
fp = open(image_path, 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# Define the image's ID as referenced above
msgImage.add_header('Content-ID', _IMG_HTML_NAME)
msg.attach(msgImage)
if not testing:
server.sendmail(login, to_addr, msg.as_string())
server.quit()
logger.info(
'[send_email] Success: message from {} to {} sent'.format(
login,
to_addr,
email_id,
)
)
cur.execute(
'UPDATE queue SET status="SENT" where id=?', (email_id,)
)
conn.commit()
conn.close()
return True
except Exception as ex:
logger.warning(fail_log.format(login, to_addr, email_id, ex,))
exception = "EXCEPTION"
if 'password' in repr(ex) or 'Password' in repr(ex):
exception = "WRONG LOGIN OR PASSWORD"
elif 'spam' in repr(ex) or 'Spam' in repr(ex):
exception = "SPAM"
else:
exception = "BAD SMTP SERVER"
cur.execute(
'UPDATE queue SET status=? where id=?',
(exception, email_id,)
)
conn.commit()
conn.close()
logger.warning(fail_log2.format(login, to_addr, email_id))
return False
|
[
"import logging\nimport sqlite3\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication # noqa\nfrom email.mime.image import MIMEImage\n\nlogger = logging.getLogger('root')\n\n_IMG_HTML_NAME = '<image1>'\n\nfail_log = '[send_email] Fail: message from {} to {} id {} not sent... Retrying: {}'\nfail_log2 = '[send_email] Fail: message from {} to {} id {} not sent... End retrying'\n\n\ndef send_email(\n login,\n password,\n server,\n to_addr,\n html_text,\n subject,\n unsubscribe_link,\n retry_nums=1,\n retry_interval=5,\n image_path=None,\n testing=True,\n email_id=1,\n):\n\n # Retrying to send message if not success\n for i in range(retry_nums):\n conn = sqlite3.connect('db.sqlite')\n cur = conn.cursor()\n try:\n server = smtplib.SMTP_SSL(server, 465)\n server.login(login, password)\n server.auth_plain()\n\n msg = MIMEMultipart('related')\n msg['Subject'] = subject\n msg['From'] = login\n msg['To'] = to_addr\n msg['List-Unsubscribe'] = unsubscribe_link\n\n part = MIMEText('')\n msg.attach(part)\n msga = MIMEMultipart('alternative')\n msgText = MIMEText(html_text, 'html')\n msga.attach(msgText)\n msg.attach(msga)\n\n if image_path:\n assert _IMG_HTML_NAME in html_text\n # This example assumes the image is in the current directory\n fp = open(image_path, 'rb')\n msgImage = MIMEImage(fp.read())\n fp.close()\n\n # Define the image's ID as referenced above\n msgImage.add_header('Content-ID', _IMG_HTML_NAME)\n msg.attach(msgImage)\n\n if not testing:\n server.sendmail(login, to_addr, msg.as_string())\n\n server.quit()\n logger.info(\n '[send_email] Success: message from {} to {} sent'.format(\n login,\n to_addr,\n email_id,\n )\n )\n cur.execute(\n 'UPDATE queue SET status=\"SENT\" where id=?', (email_id,)\n )\n conn.commit()\n conn.close()\n return True\n\n except Exception as ex:\n logger.warning(fail_log.format(login, to_addr, email_id, ex,))\n exception = \"EXCEPTION\"\n if 'password' in repr(ex) or 'Password' in repr(ex):\n exception = \"WRONG LOGIN OR PASSWORD\"\n elif 'spam' in repr(ex) or 'Spam' in repr(ex):\n exception = \"SPAM\"\n else:\n exception = \"BAD SMTP SERVER\"\n cur.execute(\n 'UPDATE queue SET status=? where id=?',\n (exception, email_id,)\n )\n conn.commit()\n conn.close()\n\n logger.warning(fail_log2.format(login, to_addr, email_id))\n return False\n",
"import logging\nimport sqlite3\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\nfrom email.mime.image import MIMEImage\nlogger = logging.getLogger('root')\n_IMG_HTML_NAME = '<image1>'\nfail_log = (\n '[send_email] Fail: message from {} to {} id {} not sent... Retrying: {}')\nfail_log2 = (\n '[send_email] Fail: message from {} to {} id {} not sent... End retrying')\n\n\ndef send_email(login, password, server, to_addr, html_text, subject,\n unsubscribe_link, retry_nums=1, retry_interval=5, image_path=None,\n testing=True, email_id=1):\n for i in range(retry_nums):\n conn = sqlite3.connect('db.sqlite')\n cur = conn.cursor()\n try:\n server = smtplib.SMTP_SSL(server, 465)\n server.login(login, password)\n server.auth_plain()\n msg = MIMEMultipart('related')\n msg['Subject'] = subject\n msg['From'] = login\n msg['To'] = to_addr\n msg['List-Unsubscribe'] = unsubscribe_link\n part = MIMEText('')\n msg.attach(part)\n msga = MIMEMultipart('alternative')\n msgText = MIMEText(html_text, 'html')\n msga.attach(msgText)\n msg.attach(msga)\n if image_path:\n assert _IMG_HTML_NAME in html_text\n fp = open(image_path, 'rb')\n msgImage = MIMEImage(fp.read())\n fp.close()\n msgImage.add_header('Content-ID', _IMG_HTML_NAME)\n msg.attach(msgImage)\n if not testing:\n server.sendmail(login, to_addr, msg.as_string())\n server.quit()\n logger.info('[send_email] Success: message from {} to {} sent'.\n format(login, to_addr, email_id))\n cur.execute('UPDATE queue SET status=\"SENT\" where id=?', (\n email_id,))\n conn.commit()\n conn.close()\n return True\n except Exception as ex:\n logger.warning(fail_log.format(login, to_addr, email_id, ex))\n exception = 'EXCEPTION'\n if 'password' in repr(ex) or 'Password' in repr(ex):\n exception = 'WRONG LOGIN OR PASSWORD'\n elif 'spam' in repr(ex) or 'Spam' in repr(ex):\n exception = 'SPAM'\n else:\n exception = 'BAD SMTP SERVER'\n cur.execute('UPDATE queue SET status=? where id=?', (exception,\n email_id))\n conn.commit()\n conn.close()\n logger.warning(fail_log2.format(login, to_addr, email_id))\n return False\n",
"<import token>\nlogger = logging.getLogger('root')\n_IMG_HTML_NAME = '<image1>'\nfail_log = (\n '[send_email] Fail: message from {} to {} id {} not sent... Retrying: {}')\nfail_log2 = (\n '[send_email] Fail: message from {} to {} id {} not sent... End retrying')\n\n\ndef send_email(login, password, server, to_addr, html_text, subject,\n unsubscribe_link, retry_nums=1, retry_interval=5, image_path=None,\n testing=True, email_id=1):\n for i in range(retry_nums):\n conn = sqlite3.connect('db.sqlite')\n cur = conn.cursor()\n try:\n server = smtplib.SMTP_SSL(server, 465)\n server.login(login, password)\n server.auth_plain()\n msg = MIMEMultipart('related')\n msg['Subject'] = subject\n msg['From'] = login\n msg['To'] = to_addr\n msg['List-Unsubscribe'] = unsubscribe_link\n part = MIMEText('')\n msg.attach(part)\n msga = MIMEMultipart('alternative')\n msgText = MIMEText(html_text, 'html')\n msga.attach(msgText)\n msg.attach(msga)\n if image_path:\n assert _IMG_HTML_NAME in html_text\n fp = open(image_path, 'rb')\n msgImage = MIMEImage(fp.read())\n fp.close()\n msgImage.add_header('Content-ID', _IMG_HTML_NAME)\n msg.attach(msgImage)\n if not testing:\n server.sendmail(login, to_addr, msg.as_string())\n server.quit()\n logger.info('[send_email] Success: message from {} to {} sent'.\n format(login, to_addr, email_id))\n cur.execute('UPDATE queue SET status=\"SENT\" where id=?', (\n email_id,))\n conn.commit()\n conn.close()\n return True\n except Exception as ex:\n logger.warning(fail_log.format(login, to_addr, email_id, ex))\n exception = 'EXCEPTION'\n if 'password' in repr(ex) or 'Password' in repr(ex):\n exception = 'WRONG LOGIN OR PASSWORD'\n elif 'spam' in repr(ex) or 'Spam' in repr(ex):\n exception = 'SPAM'\n else:\n exception = 'BAD SMTP SERVER'\n cur.execute('UPDATE queue SET status=? where id=?', (exception,\n email_id))\n conn.commit()\n conn.close()\n logger.warning(fail_log2.format(login, to_addr, email_id))\n return False\n",
"<import token>\n<assignment token>\n\n\ndef send_email(login, password, server, to_addr, html_text, subject,\n unsubscribe_link, retry_nums=1, retry_interval=5, image_path=None,\n testing=True, email_id=1):\n for i in range(retry_nums):\n conn = sqlite3.connect('db.sqlite')\n cur = conn.cursor()\n try:\n server = smtplib.SMTP_SSL(server, 465)\n server.login(login, password)\n server.auth_plain()\n msg = MIMEMultipart('related')\n msg['Subject'] = subject\n msg['From'] = login\n msg['To'] = to_addr\n msg['List-Unsubscribe'] = unsubscribe_link\n part = MIMEText('')\n msg.attach(part)\n msga = MIMEMultipart('alternative')\n msgText = MIMEText(html_text, 'html')\n msga.attach(msgText)\n msg.attach(msga)\n if image_path:\n assert _IMG_HTML_NAME in html_text\n fp = open(image_path, 'rb')\n msgImage = MIMEImage(fp.read())\n fp.close()\n msgImage.add_header('Content-ID', _IMG_HTML_NAME)\n msg.attach(msgImage)\n if not testing:\n server.sendmail(login, to_addr, msg.as_string())\n server.quit()\n logger.info('[send_email] Success: message from {} to {} sent'.\n format(login, to_addr, email_id))\n cur.execute('UPDATE queue SET status=\"SENT\" where id=?', (\n email_id,))\n conn.commit()\n conn.close()\n return True\n except Exception as ex:\n logger.warning(fail_log.format(login, to_addr, email_id, ex))\n exception = 'EXCEPTION'\n if 'password' in repr(ex) or 'Password' in repr(ex):\n exception = 'WRONG LOGIN OR PASSWORD'\n elif 'spam' in repr(ex) or 'Spam' in repr(ex):\n exception = 'SPAM'\n else:\n exception = 'BAD SMTP SERVER'\n cur.execute('UPDATE queue SET status=? where id=?', (exception,\n email_id))\n conn.commit()\n conn.close()\n logger.warning(fail_log2.format(login, to_addr, email_id))\n return False\n",
"<import token>\n<assignment token>\n<function token>\n"
] | false |
98,561 |
3a7387d657892e3027a8766b57164eeb636fc92c
|
import json
import os
import boto3
import logging
import smtoolkit as vcsm
import uuid
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def process_payload(facerec):
try:
logger.debug('event.'+ os.environ['AWS_LAMBDA_FUNCTION_NAME'] + '.process_payload.trigger={}'.format(json.dumps(facerec)))
tablename = os.environ['SessionTable']
sfnarn = os.environ['WorkFlowArn']
visitorid = facerec['Visitor']['FaceId']
searchresult = vcsm.find_session(visitorid,tablename)
print(searchresult)
#Find Session
if 'Item' in searchresult and searchresult['Item'] != None:
print(searchresult)
sessionid = searchresult['Item']['SessionId']
hostnotiftoken = searchresult['Item']['HostNotificationToken']
hostarrivtoken = searchresult['Item']['HostArrivalToken']
remind_host = os.environ['ResponseUrl'] + "?"+ vcsm.generate_params('remind_host',hostnotiftoken)
vcsm.trigger_continue_workflow(remind_host)
else:
sfnexecid = "vc-session-" + str(uuid.uuid4())
facerec['SessionId'] = sfnexecid
vcsm.start_workflow_execution(sfnexecid,sfnarn,facerec)
except Exception as e:
print(e)
print("Exception error while checking for payloads")
raise e
def process_blankpayload(facerec):
try:
sfnarn = os.environ['WorkFlowArn']
logger.debug('event.'+ os.environ['AWS_LAMBDA_FUNCTION_NAME'] + '.process_blankpayload.trigger={}'.format(json.dumps(facerec)))
sfnexecid = "blank-vc-session-" + str(uuid.uuid4())
vcsm.start_workflow_execution(sfnexecid,sfnarn,facerec)
except Exception as e:
print(e)
print("Exception error while checking for payloads")
raise e
def lambda_handler(event, context):
try:
logger.debug('event.'+ os.environ['AWS_LAMBDA_FUNCTION_NAME'] + '.trigger={}'.format(json.dumps(event)))
for rec in event['Records']:
if rec['EventSource'] == "aws:sns":
facerec = rec['Sns']['Message']
payload = json.loads(facerec)
if 'Visitor' in payload:
if payload['Visitor'] != None:
process_payload(json.loads(facerec))
else:
process_blankpayload(json.loads(facerec))
result = facerec
logger.debug('event.'+ os.environ['AWS_LAMBDA_FUNCTION_NAME'] + '.result={}'.format(json.dumps(result)))
return result
except Exception as e:
print(e)
print("Exception error registering session")
raise e
|
[
"import json\nimport os\nimport boto3\nimport logging\nimport smtoolkit as vcsm\nimport uuid\n \nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n\ndef process_payload(facerec):\n try:\n logger.debug('event.'+ os.environ['AWS_LAMBDA_FUNCTION_NAME'] + '.process_payload.trigger={}'.format(json.dumps(facerec)))\n tablename = os.environ['SessionTable']\n sfnarn = os.environ['WorkFlowArn']\n visitorid = facerec['Visitor']['FaceId']\n searchresult = vcsm.find_session(visitorid,tablename)\n print(searchresult)\n #Find Session\n \n if 'Item' in searchresult and searchresult['Item'] != None:\n print(searchresult)\n sessionid = searchresult['Item']['SessionId']\n hostnotiftoken = searchresult['Item']['HostNotificationToken']\n hostarrivtoken = searchresult['Item']['HostArrivalToken'] \n remind_host = os.environ['ResponseUrl'] + \"?\"+ vcsm.generate_params('remind_host',hostnotiftoken)\n vcsm.trigger_continue_workflow(remind_host)\n else:\n sfnexecid = \"vc-session-\" + str(uuid.uuid4())\n facerec['SessionId'] = sfnexecid\n vcsm.start_workflow_execution(sfnexecid,sfnarn,facerec)\n\n except Exception as e:\n print(e)\n print(\"Exception error while checking for payloads\")\n raise e\n\ndef process_blankpayload(facerec):\n try:\n sfnarn = os.environ['WorkFlowArn']\n logger.debug('event.'+ os.environ['AWS_LAMBDA_FUNCTION_NAME'] + '.process_blankpayload.trigger={}'.format(json.dumps(facerec)))\n sfnexecid = \"blank-vc-session-\" + str(uuid.uuid4())\n vcsm.start_workflow_execution(sfnexecid,sfnarn,facerec)\n\n except Exception as e:\n print(e)\n print(\"Exception error while checking for payloads\")\n raise e\n\ndef lambda_handler(event, context):\n \n try:\n logger.debug('event.'+ os.environ['AWS_LAMBDA_FUNCTION_NAME'] + '.trigger={}'.format(json.dumps(event)))\n \n for rec in event['Records']:\n if rec['EventSource'] == \"aws:sns\":\n facerec = rec['Sns']['Message']\n \n payload = json.loads(facerec)\n \n if 'Visitor' in payload:\n if payload['Visitor'] != None:\n process_payload(json.loads(facerec))\n else:\n process_blankpayload(json.loads(facerec))\n result = facerec\n logger.debug('event.'+ os.environ['AWS_LAMBDA_FUNCTION_NAME'] + '.result={}'.format(json.dumps(result)))\n return result\n except Exception as e:\n print(e)\n print(\"Exception error registering session\")\n raise e\n",
"import json\nimport os\nimport boto3\nimport logging\nimport smtoolkit as vcsm\nimport uuid\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n\ndef process_payload(facerec):\n try:\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.process_payload.trigger={}'.format(json.dumps(facerec)))\n tablename = os.environ['SessionTable']\n sfnarn = os.environ['WorkFlowArn']\n visitorid = facerec['Visitor']['FaceId']\n searchresult = vcsm.find_session(visitorid, tablename)\n print(searchresult)\n if 'Item' in searchresult and searchresult['Item'] != None:\n print(searchresult)\n sessionid = searchresult['Item']['SessionId']\n hostnotiftoken = searchresult['Item']['HostNotificationToken']\n hostarrivtoken = searchresult['Item']['HostArrivalToken']\n remind_host = os.environ['ResponseUrl'\n ] + '?' + vcsm.generate_params('remind_host', hostnotiftoken)\n vcsm.trigger_continue_workflow(remind_host)\n else:\n sfnexecid = 'vc-session-' + str(uuid.uuid4())\n facerec['SessionId'] = sfnexecid\n vcsm.start_workflow_execution(sfnexecid, sfnarn, facerec)\n except Exception as e:\n print(e)\n print('Exception error while checking for payloads')\n raise e\n\n\ndef process_blankpayload(facerec):\n try:\n sfnarn = os.environ['WorkFlowArn']\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.process_blankpayload.trigger={}'.format(json.dumps(facerec)))\n sfnexecid = 'blank-vc-session-' + str(uuid.uuid4())\n vcsm.start_workflow_execution(sfnexecid, sfnarn, facerec)\n except Exception as e:\n print(e)\n print('Exception error while checking for payloads')\n raise e\n\n\ndef lambda_handler(event, context):\n try:\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.trigger={}'.format(json.dumps(event)))\n for rec in event['Records']:\n if rec['EventSource'] == 'aws:sns':\n facerec = rec['Sns']['Message']\n payload = json.loads(facerec)\n if 'Visitor' in payload:\n if payload['Visitor'] != None:\n process_payload(json.loads(facerec))\n else:\n process_blankpayload(json.loads(facerec))\n result = facerec\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.result={}'.format(json.dumps(result)))\n return result\n except Exception as e:\n print(e)\n print('Exception error registering session')\n raise e\n",
"<import token>\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n\ndef process_payload(facerec):\n try:\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.process_payload.trigger={}'.format(json.dumps(facerec)))\n tablename = os.environ['SessionTable']\n sfnarn = os.environ['WorkFlowArn']\n visitorid = facerec['Visitor']['FaceId']\n searchresult = vcsm.find_session(visitorid, tablename)\n print(searchresult)\n if 'Item' in searchresult and searchresult['Item'] != None:\n print(searchresult)\n sessionid = searchresult['Item']['SessionId']\n hostnotiftoken = searchresult['Item']['HostNotificationToken']\n hostarrivtoken = searchresult['Item']['HostArrivalToken']\n remind_host = os.environ['ResponseUrl'\n ] + '?' + vcsm.generate_params('remind_host', hostnotiftoken)\n vcsm.trigger_continue_workflow(remind_host)\n else:\n sfnexecid = 'vc-session-' + str(uuid.uuid4())\n facerec['SessionId'] = sfnexecid\n vcsm.start_workflow_execution(sfnexecid, sfnarn, facerec)\n except Exception as e:\n print(e)\n print('Exception error while checking for payloads')\n raise e\n\n\ndef process_blankpayload(facerec):\n try:\n sfnarn = os.environ['WorkFlowArn']\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.process_blankpayload.trigger={}'.format(json.dumps(facerec)))\n sfnexecid = 'blank-vc-session-' + str(uuid.uuid4())\n vcsm.start_workflow_execution(sfnexecid, sfnarn, facerec)\n except Exception as e:\n print(e)\n print('Exception error while checking for payloads')\n raise e\n\n\ndef lambda_handler(event, context):\n try:\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.trigger={}'.format(json.dumps(event)))\n for rec in event['Records']:\n if rec['EventSource'] == 'aws:sns':\n facerec = rec['Sns']['Message']\n payload = json.loads(facerec)\n if 'Visitor' in payload:\n if payload['Visitor'] != None:\n process_payload(json.loads(facerec))\n else:\n process_blankpayload(json.loads(facerec))\n result = facerec\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.result={}'.format(json.dumps(result)))\n return result\n except Exception as e:\n print(e)\n print('Exception error registering session')\n raise e\n",
"<import token>\n<assignment token>\nlogger.setLevel(logging.DEBUG)\n\n\ndef process_payload(facerec):\n try:\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.process_payload.trigger={}'.format(json.dumps(facerec)))\n tablename = os.environ['SessionTable']\n sfnarn = os.environ['WorkFlowArn']\n visitorid = facerec['Visitor']['FaceId']\n searchresult = vcsm.find_session(visitorid, tablename)\n print(searchresult)\n if 'Item' in searchresult and searchresult['Item'] != None:\n print(searchresult)\n sessionid = searchresult['Item']['SessionId']\n hostnotiftoken = searchresult['Item']['HostNotificationToken']\n hostarrivtoken = searchresult['Item']['HostArrivalToken']\n remind_host = os.environ['ResponseUrl'\n ] + '?' + vcsm.generate_params('remind_host', hostnotiftoken)\n vcsm.trigger_continue_workflow(remind_host)\n else:\n sfnexecid = 'vc-session-' + str(uuid.uuid4())\n facerec['SessionId'] = sfnexecid\n vcsm.start_workflow_execution(sfnexecid, sfnarn, facerec)\n except Exception as e:\n print(e)\n print('Exception error while checking for payloads')\n raise e\n\n\ndef process_blankpayload(facerec):\n try:\n sfnarn = os.environ['WorkFlowArn']\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.process_blankpayload.trigger={}'.format(json.dumps(facerec)))\n sfnexecid = 'blank-vc-session-' + str(uuid.uuid4())\n vcsm.start_workflow_execution(sfnexecid, sfnarn, facerec)\n except Exception as e:\n print(e)\n print('Exception error while checking for payloads')\n raise e\n\n\ndef lambda_handler(event, context):\n try:\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.trigger={}'.format(json.dumps(event)))\n for rec in event['Records']:\n if rec['EventSource'] == 'aws:sns':\n facerec = rec['Sns']['Message']\n payload = json.loads(facerec)\n if 'Visitor' in payload:\n if payload['Visitor'] != None:\n process_payload(json.loads(facerec))\n else:\n process_blankpayload(json.loads(facerec))\n result = facerec\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.result={}'.format(json.dumps(result)))\n return result\n except Exception as e:\n print(e)\n print('Exception error registering session')\n raise e\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef process_payload(facerec):\n try:\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.process_payload.trigger={}'.format(json.dumps(facerec)))\n tablename = os.environ['SessionTable']\n sfnarn = os.environ['WorkFlowArn']\n visitorid = facerec['Visitor']['FaceId']\n searchresult = vcsm.find_session(visitorid, tablename)\n print(searchresult)\n if 'Item' in searchresult and searchresult['Item'] != None:\n print(searchresult)\n sessionid = searchresult['Item']['SessionId']\n hostnotiftoken = searchresult['Item']['HostNotificationToken']\n hostarrivtoken = searchresult['Item']['HostArrivalToken']\n remind_host = os.environ['ResponseUrl'\n ] + '?' + vcsm.generate_params('remind_host', hostnotiftoken)\n vcsm.trigger_continue_workflow(remind_host)\n else:\n sfnexecid = 'vc-session-' + str(uuid.uuid4())\n facerec['SessionId'] = sfnexecid\n vcsm.start_workflow_execution(sfnexecid, sfnarn, facerec)\n except Exception as e:\n print(e)\n print('Exception error while checking for payloads')\n raise e\n\n\ndef process_blankpayload(facerec):\n try:\n sfnarn = os.environ['WorkFlowArn']\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.process_blankpayload.trigger={}'.format(json.dumps(facerec)))\n sfnexecid = 'blank-vc-session-' + str(uuid.uuid4())\n vcsm.start_workflow_execution(sfnexecid, sfnarn, facerec)\n except Exception as e:\n print(e)\n print('Exception error while checking for payloads')\n raise e\n\n\ndef lambda_handler(event, context):\n try:\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.trigger={}'.format(json.dumps(event)))\n for rec in event['Records']:\n if rec['EventSource'] == 'aws:sns':\n facerec = rec['Sns']['Message']\n payload = json.loads(facerec)\n if 'Visitor' in payload:\n if payload['Visitor'] != None:\n process_payload(json.loads(facerec))\n else:\n process_blankpayload(json.loads(facerec))\n result = facerec\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.result={}'.format(json.dumps(result)))\n return result\n except Exception as e:\n print(e)\n print('Exception error registering session')\n raise e\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef process_payload(facerec):\n try:\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.process_payload.trigger={}'.format(json.dumps(facerec)))\n tablename = os.environ['SessionTable']\n sfnarn = os.environ['WorkFlowArn']\n visitorid = facerec['Visitor']['FaceId']\n searchresult = vcsm.find_session(visitorid, tablename)\n print(searchresult)\n if 'Item' in searchresult and searchresult['Item'] != None:\n print(searchresult)\n sessionid = searchresult['Item']['SessionId']\n hostnotiftoken = searchresult['Item']['HostNotificationToken']\n hostarrivtoken = searchresult['Item']['HostArrivalToken']\n remind_host = os.environ['ResponseUrl'\n ] + '?' + vcsm.generate_params('remind_host', hostnotiftoken)\n vcsm.trigger_continue_workflow(remind_host)\n else:\n sfnexecid = 'vc-session-' + str(uuid.uuid4())\n facerec['SessionId'] = sfnexecid\n vcsm.start_workflow_execution(sfnexecid, sfnarn, facerec)\n except Exception as e:\n print(e)\n print('Exception error while checking for payloads')\n raise e\n\n\ndef process_blankpayload(facerec):\n try:\n sfnarn = os.environ['WorkFlowArn']\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.process_blankpayload.trigger={}'.format(json.dumps(facerec)))\n sfnexecid = 'blank-vc-session-' + str(uuid.uuid4())\n vcsm.start_workflow_execution(sfnexecid, sfnarn, facerec)\n except Exception as e:\n print(e)\n print('Exception error while checking for payloads')\n raise e\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef process_payload(facerec):\n try:\n logger.debug('event.' + os.environ['AWS_LAMBDA_FUNCTION_NAME'] +\n '.process_payload.trigger={}'.format(json.dumps(facerec)))\n tablename = os.environ['SessionTable']\n sfnarn = os.environ['WorkFlowArn']\n visitorid = facerec['Visitor']['FaceId']\n searchresult = vcsm.find_session(visitorid, tablename)\n print(searchresult)\n if 'Item' in searchresult and searchresult['Item'] != None:\n print(searchresult)\n sessionid = searchresult['Item']['SessionId']\n hostnotiftoken = searchresult['Item']['HostNotificationToken']\n hostarrivtoken = searchresult['Item']['HostArrivalToken']\n remind_host = os.environ['ResponseUrl'\n ] + '?' + vcsm.generate_params('remind_host', hostnotiftoken)\n vcsm.trigger_continue_workflow(remind_host)\n else:\n sfnexecid = 'vc-session-' + str(uuid.uuid4())\n facerec['SessionId'] = sfnexecid\n vcsm.start_workflow_execution(sfnexecid, sfnarn, facerec)\n except Exception as e:\n print(e)\n print('Exception error while checking for payloads')\n raise e\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,562 |
7d977ba7218f1510ed6f4c78d013cb199d69ee8e
|
import sys
import yaml
import requests
try:
apikeys = yaml.safe_load(open("config/apikeys.conf"))["imgur"]
except:
print("Warning: invalid or nonexistant api key.", file=sys.stderr)
print("Skipping util.services.imgur", file=sys.stderr)
apikeys = None
else:
def upload(data):
headers = {"Authorization": "Client-ID %s" % apikeys["client_id"]}
res = requests.post(
"https://api.imgur.com/3/upload.json",
headers = headers,
data = {'image': data}
)
return res.json()
|
[
"import sys\nimport yaml\nimport requests\n\ntry:\n apikeys = yaml.safe_load(open(\"config/apikeys.conf\"))[\"imgur\"]\nexcept:\n print(\"Warning: invalid or nonexistant api key.\", file=sys.stderr)\n print(\"Skipping util.services.imgur\", file=sys.stderr)\n apikeys = None\nelse:\n def upload(data):\n headers = {\"Authorization\": \"Client-ID %s\" % apikeys[\"client_id\"]}\n\n res = requests.post(\n \"https://api.imgur.com/3/upload.json\", \n headers = headers,\n data = {'image': data}\n )\n return res.json()",
"import sys\nimport yaml\nimport requests\ntry:\n apikeys = yaml.safe_load(open('config/apikeys.conf'))['imgur']\nexcept:\n print('Warning: invalid or nonexistant api key.', file=sys.stderr)\n print('Skipping util.services.imgur', file=sys.stderr)\n apikeys = None\nelse:\n\n def upload(data):\n headers = {'Authorization': 'Client-ID %s' % apikeys['client_id']}\n res = requests.post('https://api.imgur.com/3/upload.json', headers=\n headers, data={'image': data})\n return res.json()\n",
"<import token>\ntry:\n apikeys = yaml.safe_load(open('config/apikeys.conf'))['imgur']\nexcept:\n print('Warning: invalid or nonexistant api key.', file=sys.stderr)\n print('Skipping util.services.imgur', file=sys.stderr)\n apikeys = None\nelse:\n\n def upload(data):\n headers = {'Authorization': 'Client-ID %s' % apikeys['client_id']}\n res = requests.post('https://api.imgur.com/3/upload.json', headers=\n headers, data={'image': data})\n return res.json()\n",
"<import token>\n<code token>\n"
] | false |
98,563 |
b4c4c0c7dfcc4a615c79c1377e82307c746ab571
|
r, g, b, n = map(int, input().split())
ans = 0
for i in range(n + 1):
if i * r > n:
break
for j in range(n + 1):
if i * r + j * g > n:
break
bnum = n - i * r - j * g
if bnum % b == 0:
ans += 1
print(ans)
|
[
"r, g, b, n = map(int, input().split())\n\nans = 0\n\nfor i in range(n + 1):\n if i * r > n:\n break\n for j in range(n + 1):\n if i * r + j * g > n:\n break\n bnum = n - i * r - j * g\n if bnum % b == 0:\n ans += 1\nprint(ans)\n",
"r, g, b, n = map(int, input().split())\nans = 0\nfor i in range(n + 1):\n if i * r > n:\n break\n for j in range(n + 1):\n if i * r + j * g > n:\n break\n bnum = n - i * r - j * g\n if bnum % b == 0:\n ans += 1\nprint(ans)\n",
"<assignment token>\nfor i in range(n + 1):\n if i * r > n:\n break\n for j in range(n + 1):\n if i * r + j * g > n:\n break\n bnum = n - i * r - j * g\n if bnum % b == 0:\n ans += 1\nprint(ans)\n",
"<assignment token>\n<code token>\n"
] | false |
98,564 |
f89a5296b0640d6bf49e544f2a19b710b5d558c1
|
# -*- coding: utf-8 -*-
"""
GalRotpy.py - a Python-based tool for parametrizing galaxy potential by rotation curve
Copyright (c) 2016 Andr\'es Granados
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Created on Fri Jan 6 07:00:00 MST 2017
"""
from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons, TextBox # Matplotlib widgets
import matplotlib.pylab as plt # Plotting interface
import numpy as np # Array managing
from galpy.potential import MiyamotoNagaiPotential, NFWPotential, RazorThinExponentialDiskPotential, BurkertPotential # GALPY potentials
from galpy.potential import calcRotcurve # composed rotation curve calculation for plotting
from astropy import units # Physical/real units data managing
from astropy import table as Table # For fast and easy reading / writing with tables using numpy library
import emcee
import corner
import time
import pandas as pd
import multiprocessing as mp
from scipy.optimize import fsolve
np.warnings.filterwarnings('ignore')
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
# PART 1: Base code
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
# Here we load the needed data
tt=Table.Table.read('rot_curve.txt', format='ascii.tab') # Rotation curve
input_params=Table.Table.read('input_params.txt', format='ascii.tab') # Initial parameters
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
x_offset = 0.0 # It defines a radial coordinate offset as user input
r_0=1*units.kpc # units
v_0=220*units.km/units.s # units
# Real data:
r_data=tt['r']-x_offset # The txt file must contain the radial coordinate values in kpc
v_c_data=tt['vel'] # velocity in km/s
v_c_err_data = tt['e_vel'] # and velocity error in km/s
# This loop is needed since galpy fails when r=0 or very close to 0
for i in range(len(r_data)):
if r_data[i]<1e-3:
r_data[i]=1e-3
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initial parameters:
# Bulge potential
a1=input_params['a (kpc)'][0]
b1=input_params['b (kpc)'][0]
amp1=input_params['mass'][0]
# Thin disk potential
a2=input_params['a (kpc)'][1]
b2=input_params['b (kpc)'][1]
amp2=input_params['mass'][1]
# Thick disk potential
a3=input_params['a (kpc)'][2]
b3=input_params['b (kpc)'][2]
amp3=input_params['mass'][2]
# Dark Halo potential
a5=input_params['a (kpc)'][4]
amp5=input_params['mass'][4]
# Eential disk potential
h_r=input_params['a (kpc)'][3]
amp4=input_params['mass'][3]
# Burkert potential
a6=input_params['a (kpc)'][5]
amp6=input_params['mass'][5]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we calculate de rotation curve for each of the potentials used
lista=np.linspace(0.001, 1.02*np.max(r_data), 10*len(r_data)) # radial coordinate for the rotation curve calculation
# Potentials definition using physical units (amplitude in Solar masses, scales in kpc and surface density in Solar masses / pc^2 )
MN_Bulge_p= MiyamotoNagaiPotential(amp=amp1*units.Msun,a=a1*units.kpc,b=b1*units.kpc,normalize=False,ro=r_0, vo=v_0)
MN_Thin_Disk_p= MiyamotoNagaiPotential(amp=amp2*units.Msun,a=a2*units.kpc,b=b2*units.kpc,normalize=False,ro=r_0, vo=v_0)
MN_Thick_Disk_p= MiyamotoNagaiPotential(amp=amp3*units.Msun,a=a3*units.kpc,b=b3*units.kpc,normalize=False,ro=r_0, vo=v_0)
EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4*(units.Msun/(units.pc**2)), hr=h_r*units.kpc, maxiter=20, tol=0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)
NFW_p = NFWPotential(amp=amp5*units.Msun, a=a5*units.kpc, normalize=False, ro=r_0, vo=v_0)
BK_p = BurkertPotential(amp=amp6*units.Msun/(units.kpc)**3, a=a6*units.kpc, normalize=False, ro=r_0, vo=v_0)
# Circular velocities in km/s
MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None)*220
MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None)*220
MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None)*220
EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None)*220
NFW = calcRotcurve(NFW_p, lista, phi=None)*220
BK = calcRotcurve(BK_p, lista, phi=None)*220
# Circular velocity for the composition of 5 potentials in km/s
v_circ_comp = calcRotcurve([MN_Bulge_p,MN_Thin_Disk_p,MN_Thick_Disk_p, EX_Disk_p, NFW_p, BK_p], lista, phi=None)*220
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we plot the different curves
fig = plt.figure(1)
ax = fig.add_axes((0.41, 0.1, 0.55, 0.85))
#ax.yaxis.set_ticks_position('both')
#ax.tick_params(axis='y', which='both', labelleft=True, labelright=True)
# Data
CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='', ls='none')
CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')
# A plot for each rotation curve with the colors indicated below
MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')
MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')
MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')
EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')
NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')
BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')
# Composed rotation curve
v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')
ax.set_xlabel(r'$R(kpc)$', fontsize=20)
ax.set_ylabel(r'$v_c(km/s)$', fontsize=20)
ax.tick_params(axis='both', which='both', labelsize=15)
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
# PART 2: Interactive(Slides) code
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
# Checkbox for selecting the potentials to compose the rotation
rax = plt.axes((0.07, 0.8, 0.21, 0.15))
check = CheckButtons(rax, ('MN Bulge (GRAY)', 'MN Thin Disc (PURPLE)', 'MN Thick Disc (BLUE)', 'Exp. Disc (CYAN)', 'NFW - Halo (GREEN)', 'Burkert - Halo (ORANGE)'), (True, True, True, True, True, True))
for r in check.rectangles: # Checkbox options-colors
r.set_facecolor("lavender")
r.set_edgecolor("black")
#r.set_alpha(0.2)
[ll.set_color("black") for l in check.lines for ll in l]
[ll.set_linewidth(2) for l in check.lines for ll in l]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we define the sliders for each potential
# Bulge - gray
MN_b_amp_ax = fig.add_axes((0.09,0.75,0.17,0.03))
MN_b_amp_s = Slider(MN_b_amp_ax, r"$M$($M_\odot$)", input_params['mass'][0]/(10**input_params['threshold_mass'][0]), input_params['mass'][0]*(10**input_params['threshold_mass'][0]), valinit=input_params['mass'][0], color='gray', valfmt='%1.3E')
MN_b_a_ax = fig.add_axes((0.09,0.72,0.17,0.03))
MN_b_a_s = Slider(MN_b_a_ax, "$a$ ($kpc$)", 0, 0.1*input_params['threshold_a'][0], valinit=input_params['a (kpc)'][0], color='gray')
MN_b_b_ax = fig.add_axes((0.09,0.69,0.17,0.03))
MN_b_b_s = Slider(MN_b_b_ax, "$b$ ($kpc$)", input_params['b (kpc)'][0]*(1-0.01*input_params['threshold_b'][0]), input_params['b (kpc)'][0]*(1+0.01*input_params['threshold_b'][0]), valinit=input_params['b (kpc)'][0], color='gray')
# Thin disk - purple
MN_td_amp_ax = fig.add_axes((0.09,0.63,0.17,0.03))
MN_td_amp_s = Slider(MN_td_amp_ax, r"$M$($M_\odot$)", input_params['mass'][1]/(10**input_params['threshold_mass'][1]), input_params['mass'][1]*(10**input_params['threshold_mass'][1]), valinit=input_params['mass'][1], color='purple', valfmt='%1.3E')
MN_td_a_ax = fig.add_axes((0.09,0.60,0.17,0.03))
MN_td_a_s = Slider(MN_td_a_ax, "$a$ ($kpc$)", input_params['a (kpc)'][1]*(1-0.01*input_params['threshold_a'][1]), input_params['a (kpc)'][1]*(1+0.01*input_params['threshold_a'][1]), valinit=input_params['a (kpc)'][1], color='purple')
MN_td_b_ax = fig.add_axes((0.09,0.57,0.17,0.03))
MN_td_b_s = Slider(MN_td_b_ax, "$b$ ($kpc$)", input_params['b (kpc)'][1]/(10**input_params['threshold_b'][1]), input_params['b (kpc)'][1]*(10**input_params['threshold_b'][1]), valinit=input_params['b (kpc)'][1], color='purple')
# Thick disk - Blue
MN_tkd_amp_ax = fig.add_axes((0.09,0.51,0.17,0.03))
MN_tkd_amp_s = Slider(MN_tkd_amp_ax, r"$M$($M_\odot$)", input_params['mass'][2]/(10**input_params['threshold_mass'][2]), input_params['mass'][2]*(10**input_params['threshold_mass'][2]), valinit=input_params['mass'][2], color='blue', valfmt='%1.3E')
MN_tkd_a_ax = fig.add_axes((0.09,0.48,0.17,0.03))
MN_tkd_a_s = Slider(MN_tkd_a_ax, "$a$ ($kpc$)", input_params['a (kpc)'][2]*(1-0.01*input_params['threshold_a'][2]), input_params['a (kpc)'][2]*(1+0.01*input_params['threshold_a'][2]), valinit=input_params['a (kpc)'][2], color='blue')
MN_tkd_b_ax = fig.add_axes((0.09,0.45,0.17,0.03))
MN_tkd_b_s = Slider(MN_tkd_b_ax, "$b$ ($kpc$)", input_params['b (kpc)'][2]/(10**input_params['threshold_b'][2]), input_params['b (kpc)'][2]*(10**input_params['threshold_b'][2]), valinit=input_params['b (kpc)'][2], color='blue')
# Exponential disk - Cyan
MN_ed_amp_ax = fig.add_axes((0.09,0.39,0.17,0.03))
MN_ed_amp_s = Slider(MN_ed_amp_ax, r"$\Sigma_0$($M_\odot/pc^2$)", input_params['mass'][3]/(10**input_params['threshold_mass'][3]), input_params['mass'][3]*(10**input_params['threshold_mass'][3]), valinit=input_params['mass'][3], color='cyan', valfmt='%1.3E')
MN_ed_a_ax = fig.add_axes((0.09,0.36,0.17,0.03))
MN_ed_a_s = Slider(MN_ed_a_ax, "$h_r$ ($kpc$)", input_params['a (kpc)'][3]*(1-0.01*input_params['threshold_a'][3]), input_params['a (kpc)'][3]*(1+0.01*input_params['threshold_a'][3]), valinit=input_params['a (kpc)'][3], color='cyan')
# NFW Halo - green
NFW_amp_ax = fig.add_axes((0.09,0.30,0.17,0.03))
NFW_amp_s = Slider(NFW_amp_ax, r"$M_0$($M_\odot$)", input_params['mass'][4]/(10*input_params['threshold_mass'][4]), input_params['mass'][4]*(10**input_params['threshold_mass'][4]), valinit=input_params['mass'][4], color='green', valfmt='%1.3E')
NFW_a_ax = fig.add_axes((0.09,0.27,0.17,0.03))
NFW_a_s = Slider(NFW_a_ax, "$a$ ($kpc$)", input_params['a (kpc)'][4]*(1-0.01*input_params['threshold_a'][4]), input_params['a (kpc)'][4]*(1+0.01*input_params['threshold_a'][4]), valinit=input_params['a (kpc)'][4], color='green')
# Burkert Halo - orange
BK_amp_ax = fig.add_axes((0.09,0.21,0.17,0.03))
BK_amp_s = Slider(BK_amp_ax, r"$\rho_0$($M_\odot/kpc^3$)", input_params['mass'][5]/(10*input_params['threshold_mass'][5]), input_params['mass'][5]*(10**input_params['threshold_mass'][5]), valinit=input_params['mass'][5], color='orange', valfmt='%1.3E')
BK_a_ax = fig.add_axes((0.09,0.18,0.17,0.03))
BK_a_s = Slider(BK_a_ax, "$a$ ($kpc$)", input_params['a (kpc)'][5]*(1-0.01*input_params['threshold_a'][5]), input_params['a (kpc)'][5]*(1+0.01*input_params['threshold_a'][5]), valinit=input_params['a (kpc)'][5], color='orange')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we define the function for setting new parameters for each potential
# Bulge
def MN_b_amp_s_func(val):
if MN_b_plot.get_visible() == True:
global MN_Bulge_p, amp1, a1, b1
amp1=val*1
MN_Bulge_p = MiyamotoNagaiPotential(amp=val*units.Msun,a=a1*units.kpc,b=b1*units.kpc,normalize=False,ro=r_0, vo=v_0)
update_rot_curve()
def MN_b_a_s_func(val):
if MN_b_plot.get_visible() == True:
global MN_Bulge_p, amp1, a1, b1
a1=val*1
MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1*units.Msun,a=val*units.kpc,b=b1*units.kpc,normalize=False,ro=r_0, vo=v_0)
update_rot_curve()
def MN_b_b_s_func(val):
if MN_b_plot.get_visible() == True:
global MN_Bulge_p, amp1, a1, b1
b1=val*1
MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1*units.Msun,a=a1*units.kpc,b=val*units.kpc,normalize=False,ro=r_0, vo=v_0)
update_rot_curve()
# Thin disk
def MN_td_amp_s_func(val):
if MN_td_plot.get_visible() == True:
global MN_Thin_Disk_p, amp2, a2, b2
amp2=val*1
MN_Thin_Disk_p= MiyamotoNagaiPotential(amp=val*units.Msun,a=a2*units.kpc,b=b2*units.kpc,normalize=False,ro=r_0, vo=v_0)
update_rot_curve()
def MN_td_a_s_func(val):
if MN_td_plot.get_visible() == True:
global MN_Thin_Disk_p, amp2, a2, b2
a2=val*1
MN_Thin_Disk_p= MiyamotoNagaiPotential(amp=amp2*units.Msun,a=val*units.kpc,b=b2*units.kpc,normalize=False,ro=r_0, vo=v_0)
update_rot_curve()
def MN_td_b_s_func(val):
if MN_td_plot.get_visible() == True:
global MN_Thin_Disk_p, amp2, a2, b2
b2=val*1
MN_Thin_Disk_p= MiyamotoNagaiPotential(amp=amp2*units.Msun,a=a2*units.kpc,b=val*units.kpc,normalize=False,ro=r_0, vo=v_0)
update_rot_curve()
# Thick disk
def MN_tkd_amp_s_func(val):
if MN_tkd_plot.get_visible() == True:
global MN_Thick_Disk_p, amp3, a3, b3
amp3=val*1
MN_Thick_Disk_p= MiyamotoNagaiPotential(amp=val*units.Msun,a=a3*units.kpc,b=b3*units.kpc,normalize=False,ro=r_0, vo=v_0)
update_rot_curve()
def MN_tkd_a_s_func(val):
if MN_tkd_plot.get_visible() == True:
global MN_Thick_Disk_p, amp3, a3, b3
a3=val*1
MN_Thick_Disk_p= MiyamotoNagaiPotential(amp=amp3*units.Msun,a=val*units.kpc,b=b3*units.kpc,normalize=False,ro=r_0, vo=v_0)
update_rot_curve()
def MN_tkd_b_s_func(val):
if MN_tkd_plot.get_visible() == True:
global MN_Thick_Disk_p, amp3, a3, b3
b3=val*1
MN_Thick_Disk_p= MiyamotoNagaiPotential(amp=amp3*units.Msun,a=a3*units.kpc,b=val*units.kpc,normalize=False,ro=r_0, vo=v_0)
update_rot_curve()
# Exponential disk
def MN_ed_amp_s_func(val):
if EX_d_plot.get_visible() == True:
global EX_Disk_p, amp4,h_r
amp4=val*1
EX_Disk_p = RazorThinExponentialDiskPotential(amp=val*(units.Msun/(units.pc**2)), hr=h_r*units.kpc, maxiter=20, tol=0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)
update_rot_curve()
def MN_ed_a_s_func(val):
if EX_d_plot.get_visible() == True:
global EX_Disk_p, amp4,h_r
h_r=val*1
EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4*(units.Msun/(units.pc**2)), hr=val*units.kpc, maxiter=20, tol=0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)
update_rot_curve()
# NFW Halo
def NFW_amp_s_func(val):
if NFW_plot.get_visible() == True:
global NFW_p, amp5,a5
amp5=val*1
NFW_p = NFWPotential(amp=val*units.Msun, a=a5*units.kpc, normalize=False, ro=r_0, vo=v_0)
update_rot_curve()
def NFW_a_s_func(val):
if NFW_plot.get_visible() == True:
global NFW_p, amp5,a5
a5=val*1
NFW_p = NFWPotential(amp=amp5*units.Msun, a=val*units.kpc, normalize=False, ro=r_0, vo=v_0)
update_rot_curve()
# Burkert Halo
def BK_amp_s_func(val):
if BK_plot.get_visible() == True:
global BK_p, amp6,a6
amp6=val*1
BK_p = BurkertPotential(amp=val*units.Msun/(units.kpc)**3, a=a6*units.kpc, normalize=False, ro=r_0, vo=v_0)
update_rot_curve()
def BK_a_s_func(val):
if BK_plot.get_visible() == True:
global BK_p, amp6,a6
a6=val*1
BK_p = BurkertPotential(amp=amp6*units.Msun/(units.kpc)**3, a=val*units.kpc, normalize=False, ro=r_0, vo=v_0)
update_rot_curve()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we define the funcion which update the rotation curve for the selected and the composed potential
def update_rot_curve():
ax.clear()
global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p,MN_Thick_Disk_p, MN_td_plot,MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot
composite_pot_array=[]
ax.set_xlabel(r'$R(kpc)$', fontsize=20)
ax.set_ylabel(r'$v_c(km/s)$', fontsize=20)
ax.tick_params(axis='both', which='both', labelsize=15)
#ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.set_xlim([0, 1.02*r_data[-1]])
ax.set_ylim([0,np.max(v_c_data)*1.2])
if MN_b_plot.get_visible() == True:
MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None)*220
MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')
composite_pot_array.append(MN_Bulge_p)
if MN_td_plot.get_visible() == True:
MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None)*220
MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')
composite_pot_array.append(MN_Thin_Disk_p)
if MN_tkd_plot.get_visible() == True:
MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None)*220
MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')
composite_pot_array.append(MN_Thick_Disk_p)
if NFW_plot.get_visible() == True:
NFW = calcRotcurve(NFW_p, lista, phi=None)*220
NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')
composite_pot_array.append(NFW_p)
if EX_d_plot.get_visible() == True:
EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None)*220
EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')
composite_pot_array.append(EX_Disk_p)
if BK_plot.get_visible() == True:
BK = calcRotcurve(BK_p, lista, phi=None)*220
BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')
composite_pot_array.append(BK_p)
CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='', ls='none')
CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')
v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None)*220
v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we define the sliders update functions
MN_b_amp_s.on_changed(MN_b_amp_s_func)
MN_b_a_s.on_changed(MN_b_a_s_func)
MN_b_b_s.on_changed(MN_b_b_s_func)
MN_td_amp_s.on_changed(MN_td_amp_s_func)
MN_td_a_s.on_changed(MN_td_a_s_func)
MN_td_b_s.on_changed(MN_td_b_s_func)
MN_tkd_amp_s.on_changed(MN_tkd_amp_s_func)
MN_tkd_a_s.on_changed(MN_tkd_a_s_func)
MN_tkd_b_s.on_changed(MN_tkd_b_s_func)
NFW_amp_s.on_changed(NFW_amp_s_func)
NFW_a_s.on_changed(NFW_a_s_func)
BK_amp_s.on_changed(BK_amp_s_func)
BK_a_s.on_changed(BK_a_s_func)
MN_ed_amp_s.on_changed(MN_ed_amp_s_func)
MN_ed_a_s.on_changed(MN_ed_a_s_func)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we define the function and create the button which reset the sliders
def reset(event):
MN_b_amp_s.reset()
MN_b_a_s.reset()
MN_b_b_s.reset()
MN_td_amp_s.reset()
MN_td_a_s.reset()
MN_td_b_s.reset()
MN_tkd_amp_s.reset()
MN_tkd_a_s.reset()
MN_tkd_b_s.reset()
MN_ed_amp_s.reset()
MN_ed_a_s.reset()
NFW_amp_s.reset()
NFW_a_s.reset()
BK_amp_s.reset()
BK_a_s.reset()
axcolor="lavender"
resetax = fig.add_axes((0.07, 0.08, 0.08, 0.05))
button_reset = Button(resetax, 'Reset', color=axcolor)
button_reset.on_clicked(reset)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Enable/disable the selected potential for the composed rotation curve
def check_on_clicked(label):
if label == 'MN Bulge (GRAY)':
MN_b_plot.set_visible(not MN_b_plot.get_visible())
update_rot_curve()
elif label == 'MN Thin Disc (PURPLE)':
MN_td_plot.set_visible(not MN_td_plot.get_visible())
update_rot_curve()
elif label == 'MN Thick Disc (BLUE)':
MN_tkd_plot.set_visible(not MN_tkd_plot.get_visible())
update_rot_curve()
elif label == 'Exp. Disc (CYAN)':
EX_d_plot.set_visible(not EX_d_plot.get_visible())
update_rot_curve()
elif label == 'NFW - Halo (GREEN)':
NFW_plot.set_visible(not NFW_plot.get_visible())
update_rot_curve()
elif label == 'Burkert - Halo (ORANGE)':
BK_plot.set_visible(not BK_plot.get_visible())
update_rot_curve()
plt.draw()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Plotting all the curves
ax.set_xlabel(r'$R(kpc)$', fontsize=20)
ax.set_ylabel(r'$v_c(km/s)$', fontsize=20)
ax.tick_params(axis='both', which='both', labelsize=15)
#ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.set_xlim([0, np.max(lista)])
ax.set_ylim([0,np.max(v_c_data)*1.2])
check.on_clicked(check_on_clicked)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
Once you click on this button the principal window will close, so you can now enter the number of walkers
and the number of steps you want to use. Take into account that the number of walkers have to be even and at least
twice the dimension of the system (number of parameters to evaluate)
"""
axcolor="lavender"
resetax = fig.add_axes((0.20, 0.08, 0.08, 0.05))
button_start = Button(resetax, 'Start', color=axcolor)
def start(event):
plt.close(1)
button_start.on_clicked(start)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
plt.show()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we find the parameters that will be used as initial guess
chk=[]
if MN_b_plot.get_visible() == True:
chk.append(True)
else:
chk.append(False)
if MN_td_plot.get_visible() == True:
chk.append(True)
else:
chk.append(False)
if MN_tkd_plot.get_visible() == True:
chk.append(True)
else:
chk.append(False)
if EX_d_plot.get_visible() == True:
chk.append(True)
else:
chk.append(False)
if NFW_plot.get_visible() == True:
chk.append(True)
else:
chk.append(False)
if BK_plot.get_visible() == True:
chk.append(True)
else:
chk.append(False)
compnts = ['BULGE','THIN DISC','THICK DISC','EXP. DISC', 'DARK HALO', 'BURKERT HALO']
masses = [amp1, amp2, amp3, amp4, amp5, amp6]
aa = [a1, a2, a3, h_r, a5, a6]
bb = [b1, b2, b3, "None", "None", "None"]
init_parameters = Table.Table([compnts,masses, aa,bb, chk], names=('component', 'mass', 'a (kpc)', 'b (kpc)', 'checked'))
init_parameters.write('init_guess_params.txt', format='ascii.tab', overwrite=True)
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
# PART 3: MCMC(Parameters determination) code
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
print ("\n#####################################################################")
print ("###################### GalRotpy ######################")
print ("#####################################################################\n\n")
def model(parameters, R):
global chk, para_labels, aa
para = {}
for i in range(len(para_labels)):
para[para_labels[i]] = parameters[i]
r_0=1*units.kpc
v_0=220*units.km/units.s
check_pot = []
if chk[0]==True:
if aa[0]==0.:
a1=0.
amp1=para["amp1"]; b1=para["b1"]
else:
amp1=para["amp1"]; a1=para["a1"]; b1=para["b1"]
MN_Bulge_p= MiyamotoNagaiPotential(amp=amp1*units.Msun,a=a1*units.kpc,b=b1*units.kpc,normalize=False,ro=r_0, vo=v_0)
check_pot.append(MN_Bulge_p)
if chk[1]==True:
amp2=para["amp2"]; a2=para["a2"]; b2=para["b2"]
MN_Thin_Disk_p= MiyamotoNagaiPotential(amp=amp2*units.Msun,a=a2*units.kpc,b=b2*units.kpc,normalize=False,ro=r_0, vo=v_0)
check_pot.append(MN_Thin_Disk_p)
if chk[2]==True:
amp3=para["amp3"]; a3=para["a3"]; b3=para["b3"]
MN_Thick_Disk_p= MiyamotoNagaiPotential(amp=amp3*units.Msun,a=a3*units.kpc,b=b3*units.kpc,normalize=False,ro=r_0, vo=v_0)
check_pot.append(MN_Thick_Disk_p)
if chk[3]==True:
amp4=para["amp4"]; h_r=para["h_r"]
EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4*(units.Msun/(units.pc**2)), hr=h_r*units.kpc, maxiter=20, tol=0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)
check_pot.append(EX_Disk_p)
if chk[4]==True:
amp5=para["amp5"]; a5=para["a5"]
NFW_p = NFWPotential(amp=amp5*units.Msun, a=a5*units.kpc, normalize=False, ro=r_0, vo=v_0)
check_pot.append(NFW_p)
if chk[5]==True:
amp6=para["amp6"]; a6=para["a6"]
BK_p = BurkertPotential(amp=amp6*units.Msun/(units.kpc)**3, a=a6*units.kpc, normalize=False, ro=r_0, vo=v_0)
check_pot.append(BK_p)
vc_total=calcRotcurve(check_pot, R, phi=None)*220
return vc_total
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Probability distributions
#ln Prior
def lnprior(parameters):
booL = []
for i in parameters:
if i>0.:
booL.append(True)
else:
booL.append(False)
if False in booL:
return -np.inf
else:
return 0.0
#ln Likehood
def lnlike(parameters, x, y, yerr):
Model = model(parameters, x)
return -0.5*(np.sum( ((y-Model)/yerr)**2))
#ln Posterior
def lnprob(parameters, x, y, yerr):
lp = lnprior(parameters)
Model = model(parameters, x)
if not np.isfinite(lp) or (True in np.isnan(Model)):
return -np.inf
else:
return lp + lnlike(parameters, x, y, yerr)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
Here the parameters associated to the selected models are defined, and also
the initial guesses are given.
"""
para_labels = []
labels = []
labels_log = []
para_in = []
if chk[0]==True:
if aa[0]==0.:
para_labels.append("b1"); para_in.append(bb[0]); labels.append(r"$b_B$"); labels_log.append(r"$\log(b_B)$")
para_labels.append("amp1"); para_in.append(masses[0]); labels.append(r"$M_B$"); labels_log.append(r"$\log(M_B)$")
else:
para_labels.append("a1"); para_in.append(aa[0]); labels.append(r"$a_B$"); labels_log.append(r"$\log(a_b)$")
para_labels.append("b1"); para_in.append(bb[0]); labels.append(r"$b_B$"); labels_log.append(r"$\log(b_b)$")
para_labels.append("amp1"); para_in.append(masses[0]); labels.append(r"$M_B$"); labels_log.append(r"$\log(M_b)$")
if chk[1]==True:
para_labels.append("a2"); para_in.append(aa[1]); labels.append(r"$a_{TD}$"); labels_log.append(r"$\log(a_{TD})$")
para_labels.append("b2"); para_in.append(bb[1]); labels.append(r"$b_{TD}$"); labels_log.append(r"$\log(b_{TD})$")
para_labels.append("amp2"); para_in.append(masses[1]); labels.append(r"$M_{TD}$"); labels_log.append(r"$\log(M_{TD})$")
if chk[2]==True:
para_labels.append("a3"); para_in.append(aa[2]); labels.append(r"$a_{TkD}$"); labels_log.append(r"$\log(a_{TkD})$")
para_labels.append("b3"); para_in.append(bb[2]); labels.append(r"$b_{TkD}$"); labels_log.append(r"$\log(b_{TkD})$")
para_labels.append("amp3"); para_in.append(masses[2]); labels.append(r"$M_{TkD}$"); labels_log.append(r"$\log(M_{TkD})$")
if chk[3]==True:
para_labels.append("h_r"); para_in.append(aa[3]); labels.append(r"$h_{r}$"); labels_log.append(r"$\log(h_{r})$")
para_labels.append("amp4"); para_in.append(masses[3]); labels.append(r"$\Sigma_{0}$"); labels_log.append(r"$\log(\Sigma_{0})$")
if chk[4]==True:
para_labels.append("a5"); para_in.append(aa[4]); labels.append(r"$a_{NFW}$"); labels_log.append(r"$\log(a_{NFW})$")
para_labels.append("amp5"); para_in.append(masses[4]); labels.append(r"$M_{0}$"); labels_log.append(r"$\log(M_{0})$")
if chk[5]==True:
para_labels.append("a6"); para_in.append(aa[5]); labels.append(r"$a_{Bk}$"); labels_log.append(r"$\log(a_{Bk})$")
para_labels.append("amp6"); para_in.append(masses[5]); labels.append(r"$\rho_{0}$"); labels_log.append(r"$\log(\rho_{0})$")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Dimension
start = np.array(para_in)
ndim = len(start)
print ("Dimension: ", ndim, "\n")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cosmological overdensity
if chk[4]==True or chk[5]==True:
Delta_c = float(input("Enter the cosmological overdensity you want to use:\n"))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Nwalkers and Steps
nwalkers = int(input("\nEnter the number of walkers you want to use:\n"))
steps = int(input("\nEnter the number of steps you want the walkers to take:\n"))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Rotational Curve Model
pos_step = 1e-8
pos_in = [abs(start + pos_step*start*np.random.randn(ndim)+1e-9*np.random.randn(ndim)) for i in range(nwalkers)]
#pos_in = [abs(2.*start*np.random.rand(ndim)+0.01*np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(r_data, v_c_data, v_c_err_data), threads=ndim*mp.cpu_count())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# perform MCMC
print ("\n#####################################################################\n")
Round = int(input("Enter the number of times you want GalRotpy to run:\n"))
if Round <=0:
print ("\nStart over...")
exit()
print ("\nRunning...\n")
time0 = time.time()
if Round == 1:
p0, lp, _ = sampler.run_mcmc(pos_in, steps)
print ("It took ", (time.time()-time0)/60, "minutes\n")
if Round >1:
for j in range(Round-1):
ti=time.time()
PARA=[]
p0, lp, _ = sampler.run_mcmc(pos_in, steps)
SAMPLES = sampler.chain[:, int(0.5*steps):, :].reshape((-1, ndim))
for i in range(ndim):
mcmc = np.percentile(SAMPLES[:, i], [50.-0.5*68, 50., 50.+0.5*68])
PARA.append(mcmc[1])
p=np.array(PARA)
pos_in = [abs(p + pos_step*p*np.random.randn(ndim)+1e-8*np.random.randn(ndim)) for i in range(nwalkers)]
sampler.reset()
print("Run " + str(j+1) + " done")
print ("Time: ", (time.time()-ti)/60, "minutes\n")
ti=time.time()
if Round > 1:
steps=3*steps
p0, lp, _ = sampler.run_mcmc(pos_in, steps)
print("Run " + str(Round) + " done")
print ("Time: ", (time.time()-ti)/60, "minutes\n")
print ("It took ", (time.time()-time0)/60, "minutes\n")
print ("#####################################################################\n")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we plot the chains for each parameter
fig = plt.figure(2)
ax = fig.add_axes((0.15, 0.3, 0.75, 0.6))
chain_steps = [i for i in range(len(sampler.chain[:,:,0].T))]
chain_W = []
for i in range(nwalkers):
chain_value = sampler.chain[:,:,0].T[:][:,i]
ax.plot(chain_steps, chain_value, '-', color='k', alpha=0.3)
ax.plot(chain_steps, len(chain_steps)*[start[0]], '-', color='r', lw=1)
ax.set_xlim(0, len(chain_steps)-1)
ax.set_xlabel(r"$Steps$", fontsize = 10)
ax.set_ylabel(labels[0], fontsize = 15)
class Index(object):
ind = 0
def next(self, event):
global ndim, start, chain_W, nwalkers, chain_steps
self.ind += 1
if self.ind >= ndim:
self.ind = 0
ax.clear()
#plt.subplots_adjust(bottom=0.2)
for i in range(nwalkers):
data_a = np.array(sampler.chain[:,:,self.ind].T)[:,i]
ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)
ax.plot(chain_steps, len(chain_steps)*[start[self.ind]], '-', color='r', lw=1)
ax.set_xlim(0, len(chain_steps)-1)
ax.set_xlabel(r"$Steps$", fontsize = 10)
ax.set_ylabel(labels[self.ind], fontsize = 15)
plt.tight_layout()
plt.draw()
def prev(self, event):
global ndim, start, chain_W, nwalkers, chain_steps
self.ind -= 1
if self.ind == -1:
self.ind = ndim-1
ax.clear()
#plt.subplots_adjust(bottom=0.2)
for i in range(nwalkers):
data_a = np.array(sampler.chain[:,:,self.ind].T)[:,i]
ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)
ax.plot(chain_steps, len(chain_steps)*[start[self.ind]], '-', color='r', lw=1)
ax.set_xlim(0, len(chain_steps)-1)
ax.set_xlabel(r"$Steps$", fontsize = 10)
ax.set_ylabel(labels[self.ind], fontsize = 15)
plt.tight_layout()
plt.draw()
axcolor="lavender"
callback = Index()
axprev = plt.axes([0.3, 0.05, 0.1, 0.075])
axnext = plt.axes([0.6, 0.05, 0.1, 0.075])
bnext = Button(axnext, 'Next', color=axcolor)
bnext.on_clicked(callback.next)
bprev = Button(axprev, 'Previous', color=axcolor)
bprev.on_clicked(callback.prev)
def burn(event):
plt.close()
resetax = fig.add_axes((0.45, 0.05, 0.1, 0.075))
button_reset = Button(resetax, 'Burn-in', color=axcolor)
button_reset.on_clicked(burn)
plt.show()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Nwalkers and Steps
burn_in = int(input("Enter the number of steps you want to burn-in:\n"))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we plot the region of confidence
print ("\n#####################################################################\n")
print ("Plotting...")
if burn_in == 0.:
samples = sampler.chain[:, :, :].reshape((-1, ndim))
else:
samples = sampler.chain[:, burn_in:, :].reshape((-1, ndim))
samples.shape
percentage = 0.68
fig = corner.corner(np.log10(samples), labels=labels_log, label_kwargs = {"fontsize": 21.5},
bins=50, use_math_text =True, color = "gray", max_n_ticks=3,#truth_color = "red", truths= np.log10(start),
smooth=1., levels=[1-np.exp(-0.5), 1-np.exp(-2.) ], quantiles = [0.5-0.5*percentage, 0.5, 0.5+0.5*percentage],
fill_contours=True, plot_datapoints=True)
axes = np.array(fig.axes).reshape((ndim, ndim))
for yi in range(ndim):
for xi in range(yi+1):
ax = axes[yi, xi]
ax.tick_params(axis='both', which='major', labelsize=14.5, pad=3, direction = "in")
fig.savefig("Conf_Regions.pdf",bbox_inches='tight',pad_inches=0.15)
#Here we obtain the quantities of interest, which will be include in a table as output
"""
aux1 = (1.*units.m).to(units.kpc)
aux2 = (1.*units.km).to(units.kpc)
aux3 = (1.*units.kg).to(units.Msun)
H_0 = (67.8/1000)*(aux2/(units.s*units.kpc)) # Planck 2016
G = 6.67408e-11*(aux1**3/(units.s**2*aux3))
rho_c = 3.*H_0**2/(8.*np.pi*G)
"""
H_0 = 2.1972483582604943e-18 #1 / s
G = 4.517103050001136e-39 #kpc^3 / (s^2 solMass)
rho_c = 127.5791469578729 #solMass / kpc^3
# NFW
def eq_nfw(x, rho_0, rho_c):
global Delta_c
return (np.log(1+x)-(x/(1+x))-((Delta_c*rho_c)/(3.*rho_0))*x**3)
def mass_nfw(x, rho_0, a):
return (4.*np.pi*rho_0*a**3*(np.log(1+x)-(x/(1+x))))
# Burkert
def eq_b(x, rho_0, rho_c):
global Delta_c
return (2.*np.log(1+x)+np.log(1+x**2)-2.*np.arctan(x)-(4.*Delta_c*rho_c/(3.*rho_0))*x**3)
def mass_b(x, rho_0, a):
return (np.pi*rho_0*a**3*(2.*np.log(1+x)+np.log(1+x**2)-2.*np.arctan(x)))
# For both halo distributions we have x=r/a
table_data = []
index = []
para = []
parap68=[]; paran68=[]
parap95=[]; paran95=[]
table_para = []
table_units = []
final_para_labels = []
fit_para = []
for i in range(ndim):
mcmc = np.percentile(samples[:, i], [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])
para.append(mcmc[2])
fit_para.append(mcmc[2])
parap68.append(mcmc[3]-mcmc[2])
paran68.append(mcmc[2]-mcmc[1])
parap95.append(mcmc[4]-mcmc[2])
paran95.append(mcmc[2]-mcmc[0])
final_para_labels.append(para_labels[i])
#Exponential Disc
if para_labels[i]=="h_r":
ed_h_r = np.array(samples[:, i])
if para_labels[i]=="amp4":
ed_sigma0 = np.array(samples[:, i])
M_disc = 2.*np.pi*ed_sigma0*(1000*ed_h_r)**2
mcmc = np.percentile(M_disc, [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])
para.append(mcmc[2])
parap68.append(mcmc[3]-mcmc[2])
paran68.append(mcmc[2]-mcmc[1])
parap95.append(mcmc[4]-mcmc[2])
paran95.append(mcmc[2]-mcmc[0])
final_para_labels.append("M_star")
#NFW
if para_labels[i]=="a5":
nfw_a = np.array(samples[:, i])
if para_labels[i]=="amp5":
nfw_M0 = np.array(samples[:, i])
rho_0 = nfw_M0/(4.*np.pi*nfw_a**3)
mcmc = np.percentile(rho_0, [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])
para.append(mcmc[2])
parap68.append(mcmc[3]-mcmc[2])
paran68.append(mcmc[2]-mcmc[1])
parap95.append(mcmc[4]-mcmc[2])
paran95.append(mcmc[2]-mcmc[0])
final_para_labels.append("rho_0_NFW")
# Concentration parameter (X)
X_nfw = []
for density in rho_0:
X_nfw.append(fsolve(eq_nfw, 100000., args=(density, rho_c))[0])
mcmc = np.percentile(np.array(X_nfw), [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])
para.append(mcmc[2])
parap68.append(mcmc[3]-mcmc[2])
paran68.append(mcmc[2]-mcmc[1])
parap95.append(mcmc[4]-mcmc[2])
paran95.append(mcmc[2]-mcmc[0])
final_para_labels.append("X_NFW")
# Halo Mass (M_h)
M_h_nfw = mass_nfw(np.array(X_nfw), rho_0, nfw_a)
mcmc = np.percentile(M_h_nfw, [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])
para.append(mcmc[2])
parap68.append(mcmc[3]-mcmc[2])
paran68.append(mcmc[2]-mcmc[1])
parap95.append(mcmc[4]-mcmc[2])
paran95.append(mcmc[2]-mcmc[0])
final_para_labels.append("M_h_NFW")
#Burkert
if para_labels[i]=="a6":
b_a = np.array(samples[:, i])
if para_labels[i]=="amp6":
# Concentration parameter (X)
X_b = []
for density in samples[:, i]: # Here samples[:, i] corresponds to rho_0
X_b.append(fsolve(eq_b, 100000., args=(density, rho_c))[0])
mcmc = np.percentile(np.array(X_b), [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])
para.append(mcmc[2])
parap68.append(mcmc[3]-mcmc[2])
paran68.append(mcmc[2]-mcmc[1])
parap95.append(mcmc[4]-mcmc[2])
paran95.append(mcmc[2]-mcmc[0])
final_para_labels.append("X_Bk")
# Halo Mass (M_h)
M_h_b= mass_b(np.array(X_b), samples[:, i], b_a)
mcmc = np.percentile(M_h_b, [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])
para.append(mcmc[2])
parap68.append(mcmc[3]-mcmc[2])
paran68.append(mcmc[2]-mcmc[1])
parap95.append(mcmc[4]-mcmc[2])
paran95.append(mcmc[2]-mcmc[0])
final_para_labels.append("M_h_Bk")
r=np.linspace(0.001, 1.02*np.amax(r_data),10000)
curva = model(fit_para, r)
Y_guess=model(start, r)
np.warnings.filterwarnings('ignore')
plt.figure(figsize=(6, 6))
best_para = {}
for i in range(len(final_para_labels)):
best_para[final_para_labels[i]] = para[i]
if chk[0]==True:
if aa[0]==0.:
a1=0.
amp1=best_para["amp1"]; b1=best_para["b1"]
else:
amp1=best_para["amp1"]; a1=best_para["a1"]; b1=best_para["b1"]
MN_Bulge_p= MiyamotoNagaiPotential(amp=amp1*units.Msun,a=a1*units.kpc,b=b1*units.kpc,normalize=False,ro=r_0, vo=v_0)
vc_b=calcRotcurve(MN_Bulge_p, r, phi=None)*220
plt.plot(r, vc_b, "--", color = "gray", label = r"Bulge")
if chk[1]==True:
amp2=best_para["amp2"]; a2=best_para["a2"]; b2=best_para["b2"]
MN_Thin_Disk_p= MiyamotoNagaiPotential(amp=amp2*units.Msun,a=a2*units.kpc,b=b2*units.kpc,normalize=False,ro=r_0, vo=v_0)
vc_td=calcRotcurve(MN_Thin_Disk_p, r, phi=None)*220
plt.plot(r, vc_td, "--", color = "purple", label = r"Thin Disk")
if chk[2]==True:
amp3=best_para["amp3"]; a3=best_para["a3"]; b3=best_para["b3"]
MN_Thick_Disk_p= MiyamotoNagaiPotential(amp=amp3*units.Msun,a=a3*units.kpc,b=b3*units.kpc,normalize=False,ro=r_0, vo=v_0)
vc_tkd=calcRotcurve(MN_Thick_Disk_p, r, phi=None)*220
plt.plot(r, vc_tkd, "--", color = "blue", label = r"Thick Disk")
if chk[3]==True:
amp4=best_para["amp4"]; h_r=best_para["h_r"]
EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4*(units.Msun/(units.pc**2)), hr=h_r*units.kpc, maxiter=20, tol=0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)
vc_exp=calcRotcurve(EX_Disk_p, r, phi=None)*220
plt.plot(r, vc_exp, "--", color = "cyan", label = r"Exp. Disk")
if chk[4]==True:
amp5=best_para["amp5"]; a5=best_para["a5"]
NFW_p = NFWPotential(amp=amp5*units.Msun, a=a5*units.kpc, normalize=False, ro=r_0, vo=v_0)
vc_nfw=calcRotcurve(NFW_p, r, phi=None)*220
plt.plot(r, vc_nfw, "--", color = "green", label = r"NFW - Halo")
if chk[5]==True:
amp6=best_para["amp6"]; a6=best_para["a6"]
BK_p = BurkertPotential(amp=amp6*units.Msun/(units.kpc)**3, a=a6*units.kpc, normalize=False, ro=r_0, vo=v_0)
vc_bk=calcRotcurve(BK_p, r, phi=None)*220
plt.plot(r, vc_bk, "--", color = "orange", label = r"Burkert - Halo")
#plt.plot(r, Y_guess, "-", color='blue', lw=1.5, label=r"Initial Guess")
plt.errorbar(r_data, v_c_data, yerr=v_c_err_data, fmt='ko', ecolor='black', ms=4, label = None)
plt.plot(r, curva, "-", color='red', lw=1.5, label=r"Best Fit")
plt.xlabel(r"$R(kpc)$",fontsize=20)
plt.ylabel(r"$V_c(km/s)$",fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=15)
plt.xlim(0., 1.02*np.amax(r_data))
plt.ylim(0., 1.1*np.amax(v_c_data))
plt.tight_layout()
plt.legend(loc="lower right",fontsize=15)
plt.savefig("GalRotpy_fit.pdf")
print ("\n#####################################################################\n")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we construct the table with the final results
if chk[0]==True:
if aa[0]==0.:
index.append(r"BULGE"); index.append(r"---")
table_para.append(r"b"); table_units.append(r"kpc")
table_para.append(r"M"); table_units.append(r"M_Sun")
else:
index.append(r"BULGE"); index.append(r"---"); index.append(r"---")
table_para.append(r"a"); table_units.append(r"kpc")
table_para.append(r"b"); table_units.append(r"kpc")
table_para.append(r"M"); table_units.append(r"M_Sun")
if chk[1]==True:
index.append(r"THIN DISK"); index.append(r"---"); index.append(r"---")
table_para.append(r"a"); table_units.append(r"kpc")
table_para.append(r"b"); table_units.append(r"kpc")
table_para.append(r"M"); table_units.append(r"M_Sun")
if chk[2]==True:
index.append(r"THICK DISK"); index.append(r"---"); index.append(r"---")
table_para.append(r"a"); table_units.append(r"kpc")
table_para.append(r"b"); table_units.append(r"kpc")
table_para.append(r"M"); table_units.append(r"M_Sun")
if chk[3]==True:
index.append(r"EXPONENTIAL DISK"); index.append(r"---"); index.append(r"---")
table_para.append(r"h_r"); table_units.append(r"kpc")
table_para.append(r"Sigma_0"); table_units.append(r"M_Sun/pc^2")
table_para.append(r"M"); table_units.append(r"M_Sun")
if chk[4]==True:
index.append(r"NFW HALO"); index.append(r"---"); index.append(r"---"); index.append(r"---"); index.append(r"---")
table_para.append(r"a"); table_units.append(r"kpc")
table_para.append(r"M_0"); table_units.append(r"M_Sun")
table_para.append(r"rho_0"); table_units.append(r"M_Sun/kpc^3")
table_para.append(r"X"); table_units.append(r"---")
table_para.append(r"M_h"); table_units.append(r"M_Sun")
if chk[5]==True:
index.append(r"BURKERT HALO"); index.append(r"---"); index.append(r"---"); index.append(r"---")
table_para.append(r"a"); table_units.append(r"kpc")
table_para.append(r"rho_0"); table_units.append(r"M_Sun/kpc^3")
table_para.append(r"X"); table_units.append(r"---")
table_para.append(r"M_h"); table_units.append(r"M_Sun")
for i in range(len(para)):
table_data.append([table_para[i], table_units[i], paran95[i], paran68[i], para[i], parap68[i], parap95[i]])
column_name = [r"PARAMETER", r"UNITS", r"95%(-)", r"68%(-)", r"FIT", r"68%(+)", r"95%(+)"]
table_p = pd.DataFrame(table_data, index=index, columns=column_name)
table_p.to_csv("final_params.txt", sep='\t', encoding='utf-8')
print (table_p)
print ("\n#####################################################################")
print ("\nDone")
print ("\n#####################################################################\n")
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\n GalRotpy.py - a Python-based tool for parametrizing galaxy potential by rotation curve\n\n Copyright (c) 2016 Andr\\'es Granados\n All rights reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining\n a copy of this software and associated documentation files (the\n \"Software\"), to deal in the Software without restriction, including\n without limitation the rights to use, copy, modify, merge, publish,\n distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so, subject to\n the following conditions:\n\n The above copyright notice and this permission notice shall be\n included in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. \n IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY \n CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, \n TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE \n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n Created on Fri Jan 6 07:00:00 MST 2017\n\"\"\"\n\nfrom matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons, TextBox # Matplotlib widgets\nimport matplotlib.pylab as plt # Plotting interface\nimport numpy as np # Array managing\nfrom galpy.potential import MiyamotoNagaiPotential, NFWPotential, RazorThinExponentialDiskPotential, BurkertPotential # GALPY potentials\nfrom galpy.potential import calcRotcurve # composed rotation curve calculation for plotting\nfrom astropy import units # Physical/real units data managing\nfrom astropy import table as Table # For fast and easy reading / writing with tables using numpy library\nimport emcee\nimport corner\nimport time\nimport pandas as pd\nimport multiprocessing as mp\nfrom scipy.optimize import fsolve\n\nnp.warnings.filterwarnings('ignore')\n\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n\n# PART 1: Base code\n\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n\n# Here we load the needed data\n\ntt=Table.Table.read('rot_curve.txt', format='ascii.tab') # Rotation curve\n\ninput_params=Table.Table.read('input_params.txt', format='ascii.tab') # Initial parameters\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nx_offset = 0.0 # It defines a radial coordinate offset as user input\nr_0=1*units.kpc # units \nv_0=220*units.km/units.s # units \n\n# Real data:\nr_data=tt['r']-x_offset # The txt file must contain the radial coordinate values in kpc\nv_c_data=tt['vel'] # velocity in km/s\nv_c_err_data = tt['e_vel'] # and velocity error in km/s\n\n# This loop is needed since galpy fails when r=0 or very close to 0\nfor i in range(len(r_data)):\n if r_data[i]<1e-3:\n r_data[i]=1e-3\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Initial parameters:\n\n# Bulge potential \na1=input_params['a (kpc)'][0]\nb1=input_params['b (kpc)'][0]\namp1=input_params['mass'][0]\n\n# Thin disk potential\na2=input_params['a (kpc)'][1]\nb2=input_params['b (kpc)'][1]\namp2=input_params['mass'][1]\n\n# Thick disk potential\na3=input_params['a (kpc)'][2]\nb3=input_params['b (kpc)'][2]\namp3=input_params['mass'][2]\n\n# Dark Halo potential\na5=input_params['a (kpc)'][4]\namp5=input_params['mass'][4]\n\n# Eential disk potential\nh_r=input_params['a (kpc)'][3]\namp4=input_params['mass'][3]\n\n# Burkert potential\na6=input_params['a (kpc)'][5]\namp6=input_params['mass'][5]\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Here we calculate de rotation curve for each of the potentials used\n\nlista=np.linspace(0.001, 1.02*np.max(r_data), 10*len(r_data)) # radial coordinate for the rotation curve calculation\n\n# Potentials definition using physical units (amplitude in Solar masses, scales in kpc and surface density in Solar masses / pc^2 )\nMN_Bulge_p= MiyamotoNagaiPotential(amp=amp1*units.Msun,a=a1*units.kpc,b=b1*units.kpc,normalize=False,ro=r_0, vo=v_0)\nMN_Thin_Disk_p= MiyamotoNagaiPotential(amp=amp2*units.Msun,a=a2*units.kpc,b=b2*units.kpc,normalize=False,ro=r_0, vo=v_0)\nMN_Thick_Disk_p= MiyamotoNagaiPotential(amp=amp3*units.Msun,a=a3*units.kpc,b=b3*units.kpc,normalize=False,ro=r_0, vo=v_0)\nEX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4*(units.Msun/(units.pc**2)), hr=h_r*units.kpc, maxiter=20, tol=0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\nNFW_p = NFWPotential(amp=amp5*units.Msun, a=a5*units.kpc, normalize=False, ro=r_0, vo=v_0)\nBK_p = BurkertPotential(amp=amp6*units.Msun/(units.kpc)**3, a=a6*units.kpc, normalize=False, ro=r_0, vo=v_0)\n\n# Circular velocities in km/s\nMN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None)*220\nMN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None)*220\nMN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None)*220\nEX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None)*220\nNFW = calcRotcurve(NFW_p, lista, phi=None)*220\nBK = calcRotcurve(BK_p, lista, phi=None)*220\n\n# Circular velocity for the composition of 5 potentials in km/s\nv_circ_comp = calcRotcurve([MN_Bulge_p,MN_Thin_Disk_p,MN_Thick_Disk_p, EX_Disk_p, NFW_p, BK_p], lista, phi=None)*220\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Here we plot the different curves\n\nfig = plt.figure(1)\nax = fig.add_axes((0.41, 0.1, 0.55, 0.85))\n\n#ax.yaxis.set_ticks_position('both')\n#ax.tick_params(axis='y', which='both', labelleft=True, labelright=True)\n\n# Data\nCV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='', ls='none')\nCV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n\n# A plot for each rotation curve with the colors indicated below\nMN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\nMN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\nMN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\nEX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\nNFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\nBK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n\n# Composed rotation curve\nv_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\nax.set_xlabel(r'$R(kpc)$', fontsize=20)\nax.set_ylabel(r'$v_c(km/s)$', fontsize=20)\nax.tick_params(axis='both', which='both', labelsize=15)\n\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n\n# PART 2: Interactive(Slides) code\n\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n\n# Checkbox for selecting the potentials to compose the rotation\nrax = plt.axes((0.07, 0.8, 0.21, 0.15))\ncheck = CheckButtons(rax, ('MN Bulge (GRAY)', 'MN Thin Disc (PURPLE)', 'MN Thick Disc (BLUE)', 'Exp. Disc (CYAN)', 'NFW - Halo (GREEN)', 'Burkert - Halo (ORANGE)'), (True, True, True, True, True, True))\n\nfor r in check.rectangles: # Checkbox options-colors\n r.set_facecolor(\"lavender\") \n r.set_edgecolor(\"black\")\n #r.set_alpha(0.2) \n\n[ll.set_color(\"black\") for l in check.lines for ll in l]\n[ll.set_linewidth(2) for l in check.lines for ll in l]\n\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Here we define the sliders for each potential\n\n# Bulge - gray\nMN_b_amp_ax = fig.add_axes((0.09,0.75,0.17,0.03))\nMN_b_amp_s = Slider(MN_b_amp_ax, r\"$M$($M_\\odot$)\", input_params['mass'][0]/(10**input_params['threshold_mass'][0]), input_params['mass'][0]*(10**input_params['threshold_mass'][0]), valinit=input_params['mass'][0], color='gray', valfmt='%1.3E')\nMN_b_a_ax = fig.add_axes((0.09,0.72,0.17,0.03))\nMN_b_a_s = Slider(MN_b_a_ax, \"$a$ ($kpc$)\", 0, 0.1*input_params['threshold_a'][0], valinit=input_params['a (kpc)'][0], color='gray')\nMN_b_b_ax = fig.add_axes((0.09,0.69,0.17,0.03))\nMN_b_b_s = Slider(MN_b_b_ax, \"$b$ ($kpc$)\", input_params['b (kpc)'][0]*(1-0.01*input_params['threshold_b'][0]), input_params['b (kpc)'][0]*(1+0.01*input_params['threshold_b'][0]), valinit=input_params['b (kpc)'][0], color='gray')\n\n# Thin disk - purple\nMN_td_amp_ax = fig.add_axes((0.09,0.63,0.17,0.03))\nMN_td_amp_s = Slider(MN_td_amp_ax, r\"$M$($M_\\odot$)\", input_params['mass'][1]/(10**input_params['threshold_mass'][1]), input_params['mass'][1]*(10**input_params['threshold_mass'][1]), valinit=input_params['mass'][1], color='purple', valfmt='%1.3E')\nMN_td_a_ax = fig.add_axes((0.09,0.60,0.17,0.03))\nMN_td_a_s = Slider(MN_td_a_ax, \"$a$ ($kpc$)\", input_params['a (kpc)'][1]*(1-0.01*input_params['threshold_a'][1]), input_params['a (kpc)'][1]*(1+0.01*input_params['threshold_a'][1]), valinit=input_params['a (kpc)'][1], color='purple')\nMN_td_b_ax = fig.add_axes((0.09,0.57,0.17,0.03))\nMN_td_b_s = Slider(MN_td_b_ax, \"$b$ ($kpc$)\", input_params['b (kpc)'][1]/(10**input_params['threshold_b'][1]), input_params['b (kpc)'][1]*(10**input_params['threshold_b'][1]), valinit=input_params['b (kpc)'][1], color='purple')\n\n# Thick disk - Blue\nMN_tkd_amp_ax = fig.add_axes((0.09,0.51,0.17,0.03))\nMN_tkd_amp_s = Slider(MN_tkd_amp_ax, r\"$M$($M_\\odot$)\", input_params['mass'][2]/(10**input_params['threshold_mass'][2]), input_params['mass'][2]*(10**input_params['threshold_mass'][2]), valinit=input_params['mass'][2], color='blue', valfmt='%1.3E')\nMN_tkd_a_ax = fig.add_axes((0.09,0.48,0.17,0.03))\nMN_tkd_a_s = Slider(MN_tkd_a_ax, \"$a$ ($kpc$)\", input_params['a (kpc)'][2]*(1-0.01*input_params['threshold_a'][2]), input_params['a (kpc)'][2]*(1+0.01*input_params['threshold_a'][2]), valinit=input_params['a (kpc)'][2], color='blue')\nMN_tkd_b_ax = fig.add_axes((0.09,0.45,0.17,0.03))\nMN_tkd_b_s = Slider(MN_tkd_b_ax, \"$b$ ($kpc$)\", input_params['b (kpc)'][2]/(10**input_params['threshold_b'][2]), input_params['b (kpc)'][2]*(10**input_params['threshold_b'][2]), valinit=input_params['b (kpc)'][2], color='blue')\n\n# Exponential disk - Cyan\nMN_ed_amp_ax = fig.add_axes((0.09,0.39,0.17,0.03))\nMN_ed_amp_s = Slider(MN_ed_amp_ax, r\"$\\Sigma_0$($M_\\odot/pc^2$)\", input_params['mass'][3]/(10**input_params['threshold_mass'][3]), input_params['mass'][3]*(10**input_params['threshold_mass'][3]), valinit=input_params['mass'][3], color='cyan', valfmt='%1.3E')\nMN_ed_a_ax = fig.add_axes((0.09,0.36,0.17,0.03))\nMN_ed_a_s = Slider(MN_ed_a_ax, \"$h_r$ ($kpc$)\", input_params['a (kpc)'][3]*(1-0.01*input_params['threshold_a'][3]), input_params['a (kpc)'][3]*(1+0.01*input_params['threshold_a'][3]), valinit=input_params['a (kpc)'][3], color='cyan')\n\n# NFW Halo - green\nNFW_amp_ax = fig.add_axes((0.09,0.30,0.17,0.03))\nNFW_amp_s = Slider(NFW_amp_ax, r\"$M_0$($M_\\odot$)\", input_params['mass'][4]/(10*input_params['threshold_mass'][4]), input_params['mass'][4]*(10**input_params['threshold_mass'][4]), valinit=input_params['mass'][4], color='green', valfmt='%1.3E')\nNFW_a_ax = fig.add_axes((0.09,0.27,0.17,0.03))\nNFW_a_s = Slider(NFW_a_ax, \"$a$ ($kpc$)\", input_params['a (kpc)'][4]*(1-0.01*input_params['threshold_a'][4]), input_params['a (kpc)'][4]*(1+0.01*input_params['threshold_a'][4]), valinit=input_params['a (kpc)'][4], color='green')\n\n# Burkert Halo - orange\nBK_amp_ax = fig.add_axes((0.09,0.21,0.17,0.03))\nBK_amp_s = Slider(BK_amp_ax, r\"$\\rho_0$($M_\\odot/kpc^3$)\", input_params['mass'][5]/(10*input_params['threshold_mass'][5]), input_params['mass'][5]*(10**input_params['threshold_mass'][5]), valinit=input_params['mass'][5], color='orange', valfmt='%1.3E')\nBK_a_ax = fig.add_axes((0.09,0.18,0.17,0.03))\nBK_a_s = Slider(BK_a_ax, \"$a$ ($kpc$)\", input_params['a (kpc)'][5]*(1-0.01*input_params['threshold_a'][5]), input_params['a (kpc)'][5]*(1+0.01*input_params['threshold_a'][5]), valinit=input_params['a (kpc)'][5], color='orange')\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Here we define the function for setting new parameters for each potential\n\n# Bulge\ndef MN_b_amp_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n amp1=val*1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=val*units.Msun,a=a1*units.kpc,b=b1*units.kpc,normalize=False,ro=r_0, vo=v_0) \n update_rot_curve()\ndef MN_b_a_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n a1=val*1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1*units.Msun,a=val*units.kpc,b=b1*units.kpc,normalize=False,ro=r_0, vo=v_0) \n update_rot_curve()\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1=val*1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1*units.Msun,a=a1*units.kpc,b=val*units.kpc,normalize=False,ro=r_0, vo=v_0) \n update_rot_curve()\n \n# Thin disk\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2=val*1\n MN_Thin_Disk_p= MiyamotoNagaiPotential(amp=val*units.Msun,a=a2*units.kpc,b=b2*units.kpc,normalize=False,ro=r_0, vo=v_0) \n update_rot_curve()\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2=val*1\n MN_Thin_Disk_p= MiyamotoNagaiPotential(amp=amp2*units.Msun,a=val*units.kpc,b=b2*units.kpc,normalize=False,ro=r_0, vo=v_0) \n update_rot_curve()\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2=val*1\n MN_Thin_Disk_p= MiyamotoNagaiPotential(amp=amp2*units.Msun,a=a2*units.kpc,b=val*units.kpc,normalize=False,ro=r_0, vo=v_0) \n update_rot_curve()\n\n# Thick disk\ndef MN_tkd_amp_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n amp3=val*1\n MN_Thick_Disk_p= MiyamotoNagaiPotential(amp=val*units.Msun,a=a3*units.kpc,b=b3*units.kpc,normalize=False,ro=r_0, vo=v_0) \n update_rot_curve()\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3=val*1\n MN_Thick_Disk_p= MiyamotoNagaiPotential(amp=amp3*units.Msun,a=val*units.kpc,b=b3*units.kpc,normalize=False,ro=r_0, vo=v_0) \n update_rot_curve()\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3=val*1\n MN_Thick_Disk_p= MiyamotoNagaiPotential(amp=amp3*units.Msun,a=a3*units.kpc,b=val*units.kpc,normalize=False,ro=r_0, vo=v_0) \n update_rot_curve()\n \n# Exponential disk\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4,h_r\n amp4=val*1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val*(units.Msun/(units.pc**2)), hr=h_r*units.kpc, maxiter=20, tol=0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve() \ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4,h_r\n h_r=val*1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4*(units.Msun/(units.pc**2)), hr=val*units.kpc, maxiter=20, tol=0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n \n# NFW Halo \ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5,a5\n amp5=val*1\n NFW_p = NFWPotential(amp=val*units.Msun, a=a5*units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve() \ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5,a5\n a5=val*1\n NFW_p = NFWPotential(amp=amp5*units.Msun, a=val*units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n# Burkert Halo \ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6,a6\n amp6=val*1\n BK_p = BurkertPotential(amp=val*units.Msun/(units.kpc)**3, a=a6*units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6,a6\n a6=val*1\n BK_p = BurkertPotential(amp=amp6*units.Msun/(units.kpc)**3, a=val*units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Here we define the funcion which update the rotation curve for the selected and the composed potential\ndef update_rot_curve():\n\tax.clear()\n\tglobal MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p,MN_Thick_Disk_p, MN_td_plot,MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n\tcomposite_pot_array=[]\n\tax.set_xlabel(r'$R(kpc)$', fontsize=20)\n\tax.set_ylabel(r'$v_c(km/s)$', fontsize=20)\n\tax.tick_params(axis='both', which='both', labelsize=15)\n\t#ax.xaxis.set_major_locator(ticker.MultipleLocator(5))\n\tax.set_xlim([0, 1.02*r_data[-1]])\n\tax.set_ylim([0,np.max(v_c_data)*1.2])\n \n\tif MN_b_plot.get_visible() == True:\n\t\tMN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None)*220\n\t\tMN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n\t\tcomposite_pot_array.append(MN_Bulge_p)\n\tif MN_td_plot.get_visible() == True:\n\t\tMN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None)*220\n\t\tMN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n\t\tcomposite_pot_array.append(MN_Thin_Disk_p)\n\tif MN_tkd_plot.get_visible() == True:\n\t\tMN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None)*220\n\t\tMN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n\t\tcomposite_pot_array.append(MN_Thick_Disk_p)\n\tif NFW_plot.get_visible() == True:\n\t\tNFW = calcRotcurve(NFW_p, lista, phi=None)*220\n\t\tNFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n\t\tcomposite_pot_array.append(NFW_p)\n\tif EX_d_plot.get_visible() == True:\n\t\tEX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None)*220\n\t\tEX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n\t\tcomposite_pot_array.append(EX_Disk_p)\n\tif BK_plot.get_visible() == True:\n\t\tBK = calcRotcurve(BK_p, lista, phi=None)*220\n\t\tBK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n\t\tcomposite_pot_array.append(BK_p)\n\tCV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='', ls='none')\n\tCV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n\tv_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None)*220\n\tv_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Here we define the sliders update functions\nMN_b_amp_s.on_changed(MN_b_amp_s_func)\nMN_b_a_s.on_changed(MN_b_a_s_func)\nMN_b_b_s.on_changed(MN_b_b_s_func)\nMN_td_amp_s.on_changed(MN_td_amp_s_func)\nMN_td_a_s.on_changed(MN_td_a_s_func)\nMN_td_b_s.on_changed(MN_td_b_s_func)\nMN_tkd_amp_s.on_changed(MN_tkd_amp_s_func)\nMN_tkd_a_s.on_changed(MN_tkd_a_s_func)\nMN_tkd_b_s.on_changed(MN_tkd_b_s_func)\nNFW_amp_s.on_changed(NFW_amp_s_func)\nNFW_a_s.on_changed(NFW_a_s_func)\nBK_amp_s.on_changed(BK_amp_s_func)\nBK_a_s.on_changed(BK_a_s_func)\nMN_ed_amp_s.on_changed(MN_ed_amp_s_func)\nMN_ed_a_s.on_changed(MN_ed_a_s_func)\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Here we define the function and create the button which reset the sliders \n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset() \n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\naxcolor=\"lavender\"\nresetax = fig.add_axes((0.07, 0.08, 0.08, 0.05))\nbutton_reset = Button(resetax, 'Reset', color=axcolor)\nbutton_reset.on_clicked(reset)\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Enable/disable the selected potential for the composed rotation curve\ndef check_on_clicked(label):\n\n if label == 'MN Bulge (GRAY)':\n MN_b_plot.set_visible(not MN_b_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thin Disc (PURPLE)':\n MN_td_plot.set_visible(not MN_td_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thick Disc (BLUE)':\n MN_tkd_plot.set_visible(not MN_tkd_plot.get_visible())\n update_rot_curve()\n elif label == 'Exp. Disc (CYAN)':\n EX_d_plot.set_visible(not EX_d_plot.get_visible())\n update_rot_curve()\n elif label == 'NFW - Halo (GREEN)':\n NFW_plot.set_visible(not NFW_plot.get_visible())\n update_rot_curve()\n elif label == 'Burkert - Halo (ORANGE)':\n BK_plot.set_visible(not BK_plot.get_visible())\n update_rot_curve()\n plt.draw()\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Plotting all the curves\n\nax.set_xlabel(r'$R(kpc)$', fontsize=20)\nax.set_ylabel(r'$v_c(km/s)$', fontsize=20)\nax.tick_params(axis='both', which='both', labelsize=15)\n#ax.xaxis.set_major_locator(ticker.MultipleLocator(5))\nax.set_xlim([0, np.max(lista)])\nax.set_ylim([0,np.max(v_c_data)*1.2])\ncheck.on_clicked(check_on_clicked)\n\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\nOnce you click on this button the principal window will close, so you can now enter the number of walkers \nand the number of steps you want to use. Take into account that the number of walkers have to be even and at least\ntwice the dimension of the system (number of parameters to evaluate)\n\"\"\"\naxcolor=\"lavender\"\nresetax = fig.add_axes((0.20, 0.08, 0.08, 0.05))\nbutton_start = Button(resetax, 'Start', color=axcolor)\n \ndef start(event):\n\tplt.close(1)\n\nbutton_start.on_clicked(start)\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nplt.show()\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Here we find the parameters that will be used as initial guess \n\nchk=[]\nif MN_b_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif MN_td_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif MN_tkd_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif EX_d_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif NFW_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif BK_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\n\ncompnts = ['BULGE','THIN DISC','THICK DISC','EXP. DISC', 'DARK HALO', 'BURKERT HALO']\nmasses = [amp1, amp2, amp3, amp4, amp5, amp6]\naa = [a1, a2, a3, h_r, a5, a6]\nbb = [b1, b2, b3, \"None\", \"None\", \"None\"]\n\ninit_parameters = Table.Table([compnts,masses, aa,bb, chk], names=('component', 'mass', 'a (kpc)', 'b (kpc)', 'checked'))\ninit_parameters.write('init_guess_params.txt', format='ascii.tab', overwrite=True)\n\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n\n# PART 3: MCMC(Parameters determination) code\n\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n#HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH\n\nprint (\"\\n#####################################################################\")\nprint (\"###################### GalRotpy ######################\")\nprint (\"#####################################################################\\n\\n\")\n\ndef model(parameters, R):\n\tglobal chk, para_labels, aa\n \n\tpara = {}\n \n\tfor i in range(len(para_labels)):\n\t\tpara[para_labels[i]] = parameters[i]\n\t\t \t\n\tr_0=1*units.kpc\n\tv_0=220*units.km/units.s\n\n\tcheck_pot = []\n\t\n\tif chk[0]==True:\n\t\tif aa[0]==0.:\n\t\t\ta1=0.\n\t\t\tamp1=para[\"amp1\"]; b1=para[\"b1\"]\n\t\telse: \n\t\t\tamp1=para[\"amp1\"]; a1=para[\"a1\"]; b1=para[\"b1\"]\n\t\t\t\n\t\tMN_Bulge_p= MiyamotoNagaiPotential(amp=amp1*units.Msun,a=a1*units.kpc,b=b1*units.kpc,normalize=False,ro=r_0, vo=v_0)\n\t\tcheck_pot.append(MN_Bulge_p)\n\n\tif chk[1]==True:\n\t\tamp2=para[\"amp2\"]; a2=para[\"a2\"]; b2=para[\"b2\"]\n\t\tMN_Thin_Disk_p= MiyamotoNagaiPotential(amp=amp2*units.Msun,a=a2*units.kpc,b=b2*units.kpc,normalize=False,ro=r_0, vo=v_0)\n\t\tcheck_pot.append(MN_Thin_Disk_p)\n \n\tif chk[2]==True:\n\t\tamp3=para[\"amp3\"]; a3=para[\"a3\"]; b3=para[\"b3\"]\n\t\tMN_Thick_Disk_p= MiyamotoNagaiPotential(amp=amp3*units.Msun,a=a3*units.kpc,b=b3*units.kpc,normalize=False,ro=r_0, vo=v_0)\n\t\tcheck_pot.append(MN_Thick_Disk_p)\t\n\t\t\n\tif chk[3]==True:\n\t\tamp4=para[\"amp4\"]; h_r=para[\"h_r\"]\n\t\tEX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4*(units.Msun/(units.pc**2)), hr=h_r*units.kpc, maxiter=20, tol=0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n\t\tcheck_pot.append(EX_Disk_p)\n\t\t\n\tif chk[4]==True:\n\t\tamp5=para[\"amp5\"]; a5=para[\"a5\"]\n\t\tNFW_p = NFWPotential(amp=amp5*units.Msun, a=a5*units.kpc, normalize=False, ro=r_0, vo=v_0)\n\t\tcheck_pot.append(NFW_p)\n\t\t\n\tif chk[5]==True:\n\t\tamp6=para[\"amp6\"]; a6=para[\"a6\"]\n\t\tBK_p = BurkertPotential(amp=amp6*units.Msun/(units.kpc)**3, a=a6*units.kpc, normalize=False, ro=r_0, vo=v_0)\n\t\tcheck_pot.append(BK_p)\n\n\tvc_total=calcRotcurve(check_pot, R, phi=None)*220\n\treturn vc_total\n \n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n#Probability distributions\n\n#ln Prior\ndef lnprior(parameters):\n\t\n\tbooL = []\n\t\n\tfor i in parameters:\n\t\tif i>0.:\n\t\t\tbooL.append(True)\n\t\telse:\n\t\t\tbooL.append(False)\n\t\t\t\n\tif False in booL:\n\t\treturn -np.inf\t\t\n\telse:\n\t\treturn 0.0\n \n#ln Likehood \ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5*(np.sum( ((y-Model)/yerr)**2))\n\n#ln Posterior\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or (True in np.isnan(Model)):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr) \n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\nHere the parameters associated to the selected models are defined, and also\nthe initial guesses are given.\n\"\"\"\npara_labels = []\nlabels = []\nlabels_log = []\npara_in = []\n\nif chk[0]==True:\n\tif aa[0]==0.:\n\t\tpara_labels.append(\"b1\"); para_in.append(bb[0]);\t\t labels.append(r\"$b_B$\");\t\t labels_log.append(r\"$\\log(b_B)$\")\n\t\tpara_labels.append(\"amp1\"); para_in.append(masses[0]); labels.append(r\"$M_B$\");\t\t labels_log.append(r\"$\\log(M_B)$\")\n\telse: \n\t\tpara_labels.append(\"a1\"); para_in.append(aa[0]);\t\t labels.append(r\"$a_B$\");\t\t labels_log.append(r\"$\\log(a_b)$\")\t\n\t\tpara_labels.append(\"b1\"); para_in.append(bb[0]);\t\t labels.append(r\"$b_B$\");\t\t labels_log.append(r\"$\\log(b_b)$\")\n\t\tpara_labels.append(\"amp1\"); para_in.append(masses[0]); labels.append(r\"$M_B$\");\t\t labels_log.append(r\"$\\log(M_b)$\")\n\nif chk[1]==True:\n\tpara_labels.append(\"a2\"); para_in.append(aa[1]); \t labels.append(r\"$a_{TD}$\");\t labels_log.append(r\"$\\log(a_{TD})$\")\n\tpara_labels.append(\"b2\"); para_in.append(bb[1]); \t labels.append(r\"$b_{TD}$\");\t labels_log.append(r\"$\\log(b_{TD})$\")\n\tpara_labels.append(\"amp2\"); para_in.append(masses[1]);\t labels.append(r\"$M_{TD}$\");\t labels_log.append(r\"$\\log(M_{TD})$\")\n\nif chk[2]==True:\n\tpara_labels.append(\"a3\"); para_in.append(aa[2]);\t\t labels.append(r\"$a_{TkD}$\");\t labels_log.append(r\"$\\log(a_{TkD})$\")\t\t\n\tpara_labels.append(\"b3\"); para_in.append(bb[2]);\t\t labels.append(r\"$b_{TkD}$\");\t labels_log.append(r\"$\\log(b_{TkD})$\")\t\n\tpara_labels.append(\"amp3\"); para_in.append(masses[2]); labels.append(r\"$M_{TkD}$\");\t labels_log.append(r\"$\\log(M_{TkD})$\")\t\n\nif chk[3]==True:\n\tpara_labels.append(\"h_r\"); para_in.append(aa[3]);\t\t labels.append(r\"$h_{r}$\");\t\t labels_log.append(r\"$\\log(h_{r})$\")\t\n\tpara_labels.append(\"amp4\"); para_in.append(masses[3]);\t labels.append(r\"$\\Sigma_{0}$\"); labels_log.append(r\"$\\log(\\Sigma_{0})$\")\t\n\t\nif chk[4]==True:\n\tpara_labels.append(\"a5\"); para_in.append(aa[4]);\t\t labels.append(r\"$a_{NFW}$\");\t labels_log.append(r\"$\\log(a_{NFW})$\")\n\tpara_labels.append(\"amp5\"); para_in.append(masses[4]);\t labels.append(r\"$M_{0}$\");\t labels_log.append(r\"$\\log(M_{0})$\")\n\t\nif chk[5]==True:\n\tpara_labels.append(\"a6\"); para_in.append(aa[5]);\t\t labels.append(r\"$a_{Bk}$\");\t labels_log.append(r\"$\\log(a_{Bk})$\")\n\tpara_labels.append(\"amp6\"); para_in.append(masses[5]);\t labels.append(r\"$\\rho_{0}$\");\t labels_log.append(r\"$\\log(\\rho_{0})$\")\n \n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n# Dimension\n\nstart = np.array(para_in)\nndim = len(start)\nprint (\"Dimension: \", ndim, \"\\n\")\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n# Cosmological overdensity\n\nif chk[4]==True or chk[5]==True:\n\tDelta_c = float(input(\"Enter the cosmological overdensity you want to use:\\n\"))\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Nwalkers and Steps\n\nnwalkers = int(input(\"\\nEnter the number of walkers you want to use:\\n\"))\nsteps = int(input(\"\\nEnter the number of steps you want the walkers to take:\\n\"))\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n# Rotational Curve Model\npos_step = 1e-8\npos_in = [abs(start + pos_step*start*np.random.randn(ndim)+1e-9*np.random.randn(ndim)) for i in range(nwalkers)]\n#pos_in = [abs(2.*start*np.random.rand(ndim)+0.01*np.random.rand(ndim)) for i in range(nwalkers)]\nsampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(r_data, v_c_data, v_c_err_data), threads=ndim*mp.cpu_count())\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# perform MCMC\n\nprint (\"\\n#####################################################################\\n\")\nRound = int(input(\"Enter the number of times you want GalRotpy to run:\\n\"))\nif Round <=0:\n\tprint (\"\\nStart over...\")\n\texit()\n\t\nprint (\"\\nRunning...\\n\")\ntime0 = time.time()\n\nif Round == 1:\n\tp0, lp, _ = sampler.run_mcmc(pos_in, steps)\n\tprint (\"It took \", (time.time()-time0)/60, \"minutes\\n\")\n\nif Round >1:\n\n\tfor j in range(Round-1):\n\t\tti=time.time()\n\t\tPARA=[]\n\t\tp0, lp, _ = sampler.run_mcmc(pos_in, steps)\n\t\tSAMPLES = sampler.chain[:, int(0.5*steps):, :].reshape((-1, ndim))\n\t\tfor i in range(ndim):\n\t\t\tmcmc = np.percentile(SAMPLES[:, i], [50.-0.5*68, 50., 50.+0.5*68])\n\t\t\tPARA.append(mcmc[1])\n\t\tp=np.array(PARA)\n\t\tpos_in = [abs(p + pos_step*p*np.random.randn(ndim)+1e-8*np.random.randn(ndim)) for i in range(nwalkers)]\n\t\tsampler.reset()\n\t\tprint(\"Run \" + str(j+1) + \" done\")\n\t\tprint (\"Time: \", (time.time()-ti)/60, \"minutes\\n\")\n\t\n\tti=time.time()\n\tif Round > 1:\n\t\tsteps=3*steps\n\tp0, lp, _ = sampler.run_mcmc(pos_in, steps)\n\tprint(\"Run \" + str(Round) + \" done\")\n\tprint (\"Time: \", (time.time()-ti)/60, \"minutes\\n\")\n\tprint (\"It took \", (time.time()-time0)/60, \"minutes\\n\")\n\nprint (\"#####################################################################\\n\")\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Here we plot the chains for each parameter\n\nfig = plt.figure(2)\nax = fig.add_axes((0.15, 0.3, 0.75, 0.6))\n\nchain_steps = [i for i in range(len(sampler.chain[:,:,0].T))]\nchain_W = []\nfor i in range(nwalkers):\n\tchain_value = sampler.chain[:,:,0].T[:][:,i]\n\tax.plot(chain_steps, chain_value, '-', color='k', alpha=0.3)\nax.plot(chain_steps, len(chain_steps)*[start[0]], '-', color='r', lw=1)\nax.set_xlim(0, len(chain_steps)-1)\nax.set_xlabel(r\"$Steps$\", fontsize = 10)\nax.set_ylabel(labels[0], fontsize = 15)\n\n\nclass Index(object):\n\n\tind = 0\n \n\tdef next(self, event):\n\t\tglobal ndim, start, chain_W, nwalkers, chain_steps\n\t\t\n\n\t\tself.ind += 1\n\t\tif self.ind >= ndim:\n\t\t\tself.ind = 0\t\n\t\tax.clear()\n\t\t#plt.subplots_adjust(bottom=0.2)\t\n\t\tfor i in range(nwalkers):\n\t\t\tdata_a = np.array(sampler.chain[:,:,self.ind].T)[:,i]\t\n\t\t\tax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n\t\t\tax.plot(chain_steps, len(chain_steps)*[start[self.ind]], '-', color='r', lw=1)\n\t\tax.set_xlim(0, len(chain_steps)-1)\n\t\tax.set_xlabel(r\"$Steps$\", fontsize = 10)\n\t\tax.set_ylabel(labels[self.ind], fontsize = 15)\n\t\tplt.tight_layout()\n\t\tplt.draw()\n\n\tdef prev(self, event):\n\t\tglobal ndim, start, chain_W, nwalkers, chain_steps\n\t\t\n\n\t\tself.ind -= 1\n\t\tif self.ind == -1:\n\t\t\tself.ind = ndim-1\n\t\t\t\n\t\tax.clear()\n\t\t#plt.subplots_adjust(bottom=0.2)\t\n\t\tfor i in range(nwalkers):\n\t\t\tdata_a = np.array(sampler.chain[:,:,self.ind].T)[:,i]\t\n\t\t\tax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n\t\t\tax.plot(chain_steps, len(chain_steps)*[start[self.ind]], '-', color='r', lw=1)\n\t\tax.set_xlim(0, len(chain_steps)-1)\n\t\tax.set_xlabel(r\"$Steps$\", fontsize = 10)\n\t\tax.set_ylabel(labels[self.ind], fontsize = 15)\n\t\tplt.tight_layout()\n\t\tplt.draw()\n\t\t\naxcolor=\"lavender\"\ncallback = Index()\naxprev = plt.axes([0.3, 0.05, 0.1, 0.075])\naxnext = plt.axes([0.6, 0.05, 0.1, 0.075])\nbnext = Button(axnext, 'Next', color=axcolor)\nbnext.on_clicked(callback.next)\nbprev = Button(axprev, 'Previous', color=axcolor)\nbprev.on_clicked(callback.prev)\n\ndef burn(event):\n plt.close()\n\n\nresetax = fig.add_axes((0.45, 0.05, 0.1, 0.075))\nbutton_reset = Button(resetax, 'Burn-in', color=axcolor)\nbutton_reset.on_clicked(burn)\n\nplt.show()\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Nwalkers and Steps\n\nburn_in = int(input(\"Enter the number of steps you want to burn-in:\\n\"))\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Here we plot the region of confidence\n\nprint (\"\\n#####################################################################\\n\")\nprint (\"Plotting...\")\n\nif burn_in == 0.:\n\tsamples = sampler.chain[:, :, :].reshape((-1, ndim))\nelse:\n\tsamples = sampler.chain[:, burn_in:, :].reshape((-1, ndim))\n\t\nsamples.shape\n\npercentage = 0.68\n\nfig = corner.corner(np.log10(samples), labels=labels_log, label_kwargs = {\"fontsize\": 21.5},\n\t\t\t\t\t bins=50, use_math_text =True, color = \"gray\", max_n_ticks=3,#truth_color = \"red\", truths= np.log10(start), \n\t\t\t\t\t smooth=1., levels=[1-np.exp(-0.5), 1-np.exp(-2.) ], quantiles = [0.5-0.5*percentage, 0.5, 0.5+0.5*percentage], \n fill_contours=True, plot_datapoints=True)\n\naxes = np.array(fig.axes).reshape((ndim, ndim))\n\nfor yi in range(ndim):\n\tfor xi in range(yi+1):\n\t\tax = axes[yi, xi]\n\t\tax.tick_params(axis='both', which='major', labelsize=14.5, pad=3, direction = \"in\")\n\t\t\nfig.savefig(\"Conf_Regions.pdf\",bbox_inches='tight',pad_inches=0.15)\n\n#Here we obtain the quantities of interest, which will be include in a table as output\n\n\"\"\"\naux1 = (1.*units.m).to(units.kpc)\naux2 = (1.*units.km).to(units.kpc)\naux3 = (1.*units.kg).to(units.Msun)\nH_0 = (67.8/1000)*(aux2/(units.s*units.kpc)) # Planck 2016\nG = 6.67408e-11*(aux1**3/(units.s**2*aux3))\nrho_c = 3.*H_0**2/(8.*np.pi*G)\n\"\"\"\n\nH_0 = 2.1972483582604943e-18 #1 / s\nG = 4.517103050001136e-39 #kpc^3 / (s^2 solMass)\nrho_c = 127.5791469578729 #solMass / kpc^3\n\n# NFW\ndef eq_nfw(x, rho_0, rho_c):\n\tglobal Delta_c\n\treturn (np.log(1+x)-(x/(1+x))-((Delta_c*rho_c)/(3.*rho_0))*x**3) \ndef mass_nfw(x, rho_0, a):\n\treturn (4.*np.pi*rho_0*a**3*(np.log(1+x)-(x/(1+x)))) \n\n# Burkert\ndef eq_b(x, rho_0, rho_c):\n\tglobal Delta_c\n\treturn (2.*np.log(1+x)+np.log(1+x**2)-2.*np.arctan(x)-(4.*Delta_c*rho_c/(3.*rho_0))*x**3) \ndef mass_b(x, rho_0, a):\n\treturn (np.pi*rho_0*a**3*(2.*np.log(1+x)+np.log(1+x**2)-2.*np.arctan(x))) \n# For both halo distributions we have x=r/a\n\ntable_data = []\nindex = []\npara = []\nparap68=[]; paran68=[]\nparap95=[]; paran95=[]\ntable_para = []\ntable_units = []\nfinal_para_labels = []\nfit_para = []\n\nfor i in range(ndim):\t\n\tmcmc = np.percentile(samples[:, i], [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])\n\tpara.append(mcmc[2])\n\tfit_para.append(mcmc[2]) \n\tparap68.append(mcmc[3]-mcmc[2])\n\tparan68.append(mcmc[2]-mcmc[1])\n\tparap95.append(mcmc[4]-mcmc[2])\n\tparan95.append(mcmc[2]-mcmc[0])\n\tfinal_para_labels.append(para_labels[i])\n\t\n\t#Exponential Disc\n\tif para_labels[i]==\"h_r\":\n\t\ted_h_r = np.array(samples[:, i])\n\tif para_labels[i]==\"amp4\":\n\t\ted_sigma0 = np.array(samples[:, i])\n\t\tM_disc = 2.*np.pi*ed_sigma0*(1000*ed_h_r)**2\n\t\tmcmc = np.percentile(M_disc, [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])\n\t\tpara.append(mcmc[2])\n\t\tparap68.append(mcmc[3]-mcmc[2])\n\t\tparan68.append(mcmc[2]-mcmc[1])\n\t\tparap95.append(mcmc[4]-mcmc[2])\n\t\tparan95.append(mcmc[2]-mcmc[0])\n\t\tfinal_para_labels.append(\"M_star\")\n\t\n\t#NFW\n\tif para_labels[i]==\"a5\":\n\t\tnfw_a = np.array(samples[:, i])\n\tif para_labels[i]==\"amp5\":\n\t\tnfw_M0 = np.array(samples[:, i])\n\t\trho_0 = nfw_M0/(4.*np.pi*nfw_a**3)\n\t\tmcmc = np.percentile(rho_0, [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])\n\t\tpara.append(mcmc[2])\n\t\tparap68.append(mcmc[3]-mcmc[2])\n\t\tparan68.append(mcmc[2]-mcmc[1])\n\t\tparap95.append(mcmc[4]-mcmc[2])\n\t\tparan95.append(mcmc[2]-mcmc[0])\n\t\tfinal_para_labels.append(\"rho_0_NFW\")\n\t\t\n\t\t# Concentration parameter (X)\n\t\tX_nfw = []\n\t\tfor density in rho_0:\n\t\t\tX_nfw.append(fsolve(eq_nfw, 100000., args=(density, rho_c))[0])\n\t\t\n\t\tmcmc = np.percentile(np.array(X_nfw), [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])\n\t\tpara.append(mcmc[2])\n\t\tparap68.append(mcmc[3]-mcmc[2])\n\t\tparan68.append(mcmc[2]-mcmc[1])\n\t\tparap95.append(mcmc[4]-mcmc[2])\n\t\tparan95.append(mcmc[2]-mcmc[0])\n\t\tfinal_para_labels.append(\"X_NFW\")\n\t\t\n\t\t\n\t\t\n\t\t# Halo Mass (M_h)\n\t\tM_h_nfw = mass_nfw(np.array(X_nfw), rho_0, nfw_a) \n\t\tmcmc = np.percentile(M_h_nfw, [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])\n\t\tpara.append(mcmc[2])\n\t\tparap68.append(mcmc[3]-mcmc[2])\n\t\tparan68.append(mcmc[2]-mcmc[1])\n\t\tparap95.append(mcmc[4]-mcmc[2])\n\t\tparan95.append(mcmc[2]-mcmc[0])\n\t\tfinal_para_labels.append(\"M_h_NFW\")\n\t\n\t#Burkert\n\tif para_labels[i]==\"a6\":\n\t\tb_a = np.array(samples[:, i])\n\tif para_labels[i]==\"amp6\":\n\t\t# Concentration parameter (X)\n\t\tX_b = []\n\t\tfor density in samples[:, i]: # Here samples[:, i] corresponds to rho_0\n\t\t\tX_b.append(fsolve(eq_b, 100000., args=(density, rho_c))[0])\n\n\t\tmcmc = np.percentile(np.array(X_b), [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])\n\t\tpara.append(mcmc[2])\n\t\tparap68.append(mcmc[3]-mcmc[2])\n\t\tparan68.append(mcmc[2]-mcmc[1])\n\t\tparap95.append(mcmc[4]-mcmc[2])\n\t\tparan95.append(mcmc[2]-mcmc[0])\n\t\tfinal_para_labels.append(\"X_Bk\")\n\t\t\n\t\t# Halo Mass (M_h)\n\t\tM_h_b= mass_b(np.array(X_b), samples[:, i], b_a) \n\t\tmcmc = np.percentile(M_h_b, [50.-0.5*95, 50.-0.5*68, 50., 50.+0.5*68, 50.+0.5*95])\n\t\tpara.append(mcmc[2])\n\t\tparap68.append(mcmc[3]-mcmc[2])\n\t\tparan68.append(mcmc[2]-mcmc[1])\n\t\tparap95.append(mcmc[4]-mcmc[2])\n\t\tparan95.append(mcmc[2]-mcmc[0])\n\t\tfinal_para_labels.append(\"M_h_Bk\")\n\t\n\nr=np.linspace(0.001, 1.02*np.amax(r_data),10000)\ncurva = model(fit_para, r)\nY_guess=model(start, r)\n \nnp.warnings.filterwarnings('ignore')\nplt.figure(figsize=(6, 6))\n\n\nbest_para = {}\n \nfor i in range(len(final_para_labels)):\n\tbest_para[final_para_labels[i]] = para[i]\n\t\t \t\nif chk[0]==True:\n\tif aa[0]==0.:\n\t\ta1=0.\n\t\tamp1=best_para[\"amp1\"]; b1=best_para[\"b1\"]\n\telse:\n\t\tamp1=best_para[\"amp1\"]; a1=best_para[\"a1\"]; b1=best_para[\"b1\"]\n\tMN_Bulge_p= MiyamotoNagaiPotential(amp=amp1*units.Msun,a=a1*units.kpc,b=b1*units.kpc,normalize=False,ro=r_0, vo=v_0)\n\tvc_b=calcRotcurve(MN_Bulge_p, r, phi=None)*220\n\tplt.plot(r, vc_b, \"--\", color = \"gray\", label = r\"Bulge\")\n\nif chk[1]==True:\n\tamp2=best_para[\"amp2\"]; a2=best_para[\"a2\"]; b2=best_para[\"b2\"]\n\tMN_Thin_Disk_p= MiyamotoNagaiPotential(amp=amp2*units.Msun,a=a2*units.kpc,b=b2*units.kpc,normalize=False,ro=r_0, vo=v_0)\n\tvc_td=calcRotcurve(MN_Thin_Disk_p, r, phi=None)*220\n\tplt.plot(r, vc_td, \"--\", color = \"purple\", label = r\"Thin Disk\")\n \nif chk[2]==True:\n\tamp3=best_para[\"amp3\"]; a3=best_para[\"a3\"]; b3=best_para[\"b3\"]\n\tMN_Thick_Disk_p= MiyamotoNagaiPotential(amp=amp3*units.Msun,a=a3*units.kpc,b=b3*units.kpc,normalize=False,ro=r_0, vo=v_0)\n\tvc_tkd=calcRotcurve(MN_Thick_Disk_p, r, phi=None)*220\n\tplt.plot(r, vc_tkd, \"--\", color = \"blue\", label = r\"Thick Disk\")\n\t\t\nif chk[3]==True:\n\tamp4=best_para[\"amp4\"]; h_r=best_para[\"h_r\"]\n\tEX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4*(units.Msun/(units.pc**2)), hr=h_r*units.kpc, maxiter=20, tol=0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n\tvc_exp=calcRotcurve(EX_Disk_p, r, phi=None)*220\n\tplt.plot(r, vc_exp, \"--\", color = \"cyan\", label = r\"Exp. Disk\")\n\t\t\nif chk[4]==True:\n\tamp5=best_para[\"amp5\"]; a5=best_para[\"a5\"]\n\tNFW_p = NFWPotential(amp=amp5*units.Msun, a=a5*units.kpc, normalize=False, ro=r_0, vo=v_0)\n\tvc_nfw=calcRotcurve(NFW_p, r, phi=None)*220\n\tplt.plot(r, vc_nfw, \"--\", color = \"green\", label = r\"NFW - Halo\")\n\t\t\nif chk[5]==True:\n\tamp6=best_para[\"amp6\"]; a6=best_para[\"a6\"]\n\tBK_p = BurkertPotential(amp=amp6*units.Msun/(units.kpc)**3, a=a6*units.kpc, normalize=False, ro=r_0, vo=v_0)\n\tvc_bk=calcRotcurve(BK_p, r, phi=None)*220\n\tplt.plot(r, vc_bk, \"--\", color = \"orange\", label = r\"Burkert - Halo\")\n\n#plt.plot(r, Y_guess, \"-\", color='blue', lw=1.5, label=r\"Initial Guess\")\nplt.errorbar(r_data, v_c_data, yerr=v_c_err_data, fmt='ko', ecolor='black', ms=4, label = None)\nplt.plot(r, curva, \"-\", color='red', lw=1.5, label=r\"Best Fit\")\nplt.xlabel(r\"$R(kpc)$\",fontsize=20)\nplt.ylabel(r\"$V_c(km/s)$\",fontsize=20)\nplt.tick_params(axis='both', which='major', labelsize=15)\nplt.xlim(0., 1.02*np.amax(r_data))\nplt.ylim(0., 1.1*np.amax(v_c_data))\nplt.tight_layout()\nplt.legend(loc=\"lower right\",fontsize=15)\nplt.savefig(\"GalRotpy_fit.pdf\")\n\nprint (\"\\n#####################################################################\\n\")\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Here we construct the table with the final results\n\nif chk[0]==True:\n\tif aa[0]==0.:\n\t\tindex.append(r\"BULGE\"); index.append(r\"---\")\n\t\ttable_para.append(r\"b\");\ttable_units.append(r\"kpc\")\n\t\ttable_para.append(r\"M\");\ttable_units.append(r\"M_Sun\")\n\telse:\n\t\tindex.append(r\"BULGE\"); index.append(r\"---\"); index.append(r\"---\")\n\t\ttable_para.append(r\"a\");\ttable_units.append(r\"kpc\")\n\t\ttable_para.append(r\"b\");\ttable_units.append(r\"kpc\")\n\t\ttable_para.append(r\"M\");\ttable_units.append(r\"M_Sun\")\n\t\nif chk[1]==True:\n\tindex.append(r\"THIN DISK\"); index.append(r\"---\"); index.append(r\"---\")\n\ttable_para.append(r\"a\");\ttable_units.append(r\"kpc\")\n\ttable_para.append(r\"b\");\ttable_units.append(r\"kpc\")\n\ttable_para.append(r\"M\");\ttable_units.append(r\"M_Sun\")\n\nif chk[2]==True:\n\tindex.append(r\"THICK DISK\"); index.append(r\"---\"); index.append(r\"---\")\n\ttable_para.append(r\"a\");\ttable_units.append(r\"kpc\")\n\ttable_para.append(r\"b\");\ttable_units.append(r\"kpc\")\n\ttable_para.append(r\"M\");\ttable_units.append(r\"M_Sun\")\n\nif chk[3]==True:\n\tindex.append(r\"EXPONENTIAL DISK\"); index.append(r\"---\"); index.append(r\"---\")\n\ttable_para.append(r\"h_r\");\t\ttable_units.append(r\"kpc\")\n\ttable_para.append(r\"Sigma_0\");\ttable_units.append(r\"M_Sun/pc^2\")\n\ttable_para.append(r\"M\");\ttable_units.append(r\"M_Sun\")\t\n\t\nif chk[4]==True:\n\tindex.append(r\"NFW HALO\"); index.append(r\"---\"); index.append(r\"---\"); index.append(r\"---\"); index.append(r\"---\")\n\ttable_para.append(r\"a\");\ttable_units.append(r\"kpc\")\n\ttable_para.append(r\"M_0\");\ttable_units.append(r\"M_Sun\")\n\ttable_para.append(r\"rho_0\");\ttable_units.append(r\"M_Sun/kpc^3\")\n\ttable_para.append(r\"X\");\ttable_units.append(r\"---\")\n\ttable_para.append(r\"M_h\");\ttable_units.append(r\"M_Sun\")\n\n\t\nif chk[5]==True:\n\tindex.append(r\"BURKERT HALO\"); index.append(r\"---\"); index.append(r\"---\"); index.append(r\"---\")\n\t\n\ttable_para.append(r\"a\");\t\ttable_units.append(r\"kpc\")\n\ttable_para.append(r\"rho_0\");\ttable_units.append(r\"M_Sun/kpc^3\")\n\ttable_para.append(r\"X\");\ttable_units.append(r\"---\")\n\ttable_para.append(r\"M_h\");\ttable_units.append(r\"M_Sun\")\n\n\t\nfor i in range(len(para)):\n\ttable_data.append([table_para[i], table_units[i], paran95[i], paran68[i], para[i], parap68[i], parap95[i]])\n\ncolumn_name = [r\"PARAMETER\", r\"UNITS\", r\"95%(-)\", r\"68%(-)\", r\"FIT\", r\"68%(+)\", r\"95%(+)\"]\t\ntable_p = pd.DataFrame(table_data, index=index, columns=column_name)\ntable_p.to_csv(\"final_params.txt\", sep='\\t', encoding='utf-8')\nprint (table_p)\nprint (\"\\n#####################################################################\")\nprint (\"\\nDone\")\nprint (\"\\n#####################################################################\\n\")\n\n",
"<docstring token>\nfrom matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons, TextBox\nimport matplotlib.pylab as plt\nimport numpy as np\nfrom galpy.potential import MiyamotoNagaiPotential, NFWPotential, RazorThinExponentialDiskPotential, BurkertPotential\nfrom galpy.potential import calcRotcurve\nfrom astropy import units\nfrom astropy import table as Table\nimport emcee\nimport corner\nimport time\nimport pandas as pd\nimport multiprocessing as mp\nfrom scipy.optimize import fsolve\nnp.warnings.filterwarnings('ignore')\ntt = Table.Table.read('rot_curve.txt', format='ascii.tab')\ninput_params = Table.Table.read('input_params.txt', format='ascii.tab')\nx_offset = 0.0\nr_0 = 1 * units.kpc\nv_0 = 220 * units.km / units.s\nr_data = tt['r'] - x_offset\nv_c_data = tt['vel']\nv_c_err_data = tt['e_vel']\nfor i in range(len(r_data)):\n if r_data[i] < 0.001:\n r_data[i] = 0.001\na1 = input_params['a (kpc)'][0]\nb1 = input_params['b (kpc)'][0]\namp1 = input_params['mass'][0]\na2 = input_params['a (kpc)'][1]\nb2 = input_params['b (kpc)'][1]\namp2 = input_params['mass'][1]\na3 = input_params['a (kpc)'][2]\nb3 = input_params['b (kpc)'][2]\namp3 = input_params['mass'][2]\na5 = input_params['a (kpc)'][4]\namp5 = input_params['mass'][4]\nh_r = input_params['a (kpc)'][3]\namp4 = input_params['mass'][3]\na6 = input_params['a (kpc)'][5]\namp6 = input_params['mass'][5]\nlista = np.linspace(0.001, 1.02 * np.max(r_data), 10 * len(r_data))\nMN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 * units.kpc,\n b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\nMN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 * units\n .kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\nMN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=a3 *\n units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\nEX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.Msun / \n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001, normalize=\n False, ro=r_0, vo=v_0, new=True, glorder=100)\nNFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc, normalize=\n False, ro=r_0, vo=v_0)\nBK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\nMN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\nMN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\nMN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\nEX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\nNFW = calcRotcurve(NFW_p, lista, phi=None) * 220\nBK = calcRotcurve(BK_p, lista, phi=None) * 220\nv_circ_comp = calcRotcurve([MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p,\n EX_Disk_p, NFW_p, BK_p], lista, phi=None) * 220\nfig = plt.figure(1)\nax = fig.add_axes((0.41, 0.1, 0.55, 0.85))\nCV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='', ls=\n 'none')\nCV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\nMN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\nMN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\nMN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\nEX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\nNFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\nBK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\nv_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\nax.set_xlabel('$R(kpc)$', fontsize=20)\nax.set_ylabel('$v_c(km/s)$', fontsize=20)\nax.tick_params(axis='both', which='both', labelsize=15)\nrax = plt.axes((0.07, 0.8, 0.21, 0.15))\ncheck = CheckButtons(rax, ('MN Bulge (GRAY)', 'MN Thin Disc (PURPLE)',\n 'MN Thick Disc (BLUE)', 'Exp. Disc (CYAN)', 'NFW - Halo (GREEN)',\n 'Burkert - Halo (ORANGE)'), (True, True, True, True, True, True))\nfor r in check.rectangles:\n r.set_facecolor('lavender')\n r.set_edgecolor('black')\n[ll.set_color('black') for l in check.lines for ll in l]\n[ll.set_linewidth(2) for l in check.lines for ll in l]\nMN_b_amp_ax = fig.add_axes((0.09, 0.75, 0.17, 0.03))\nMN_b_amp_s = Slider(MN_b_amp_ax, '$M$($M_\\\\odot$)', input_params['mass'][0] /\n 10 ** input_params['threshold_mass'][0], input_params['mass'][0] * 10 **\n input_params['threshold_mass'][0], valinit=input_params['mass'][0],\n color='gray', valfmt='%1.3E')\nMN_b_a_ax = fig.add_axes((0.09, 0.72, 0.17, 0.03))\nMN_b_a_s = Slider(MN_b_a_ax, '$a$ ($kpc$)', 0, 0.1 * input_params[\n 'threshold_a'][0], valinit=input_params['a (kpc)'][0], color='gray')\nMN_b_b_ax = fig.add_axes((0.09, 0.69, 0.17, 0.03))\nMN_b_b_s = Slider(MN_b_b_ax, '$b$ ($kpc$)', input_params['b (kpc)'][0] * (1 -\n 0.01 * input_params['threshold_b'][0]), input_params['b (kpc)'][0] * (1 +\n 0.01 * input_params['threshold_b'][0]), valinit=input_params['b (kpc)']\n [0], color='gray')\nMN_td_amp_ax = fig.add_axes((0.09, 0.63, 0.17, 0.03))\nMN_td_amp_s = Slider(MN_td_amp_ax, '$M$($M_\\\\odot$)', input_params['mass'][\n 1] / 10 ** input_params['threshold_mass'][1], input_params['mass'][1] *\n 10 ** input_params['threshold_mass'][1], valinit=input_params['mass'][1\n ], color='purple', valfmt='%1.3E')\nMN_td_a_ax = fig.add_axes((0.09, 0.6, 0.17, 0.03))\nMN_td_a_s = Slider(MN_td_a_ax, '$a$ ($kpc$)', input_params['a (kpc)'][1] *\n (1 - 0.01 * input_params['threshold_a'][1]), input_params['a (kpc)'][1] *\n (1 + 0.01 * input_params['threshold_a'][1]), valinit=input_params[\n 'a (kpc)'][1], color='purple')\nMN_td_b_ax = fig.add_axes((0.09, 0.57, 0.17, 0.03))\nMN_td_b_s = Slider(MN_td_b_ax, '$b$ ($kpc$)', input_params['b (kpc)'][1] / \n 10 ** input_params['threshold_b'][1], input_params['b (kpc)'][1] * 10 **\n input_params['threshold_b'][1], valinit=input_params['b (kpc)'][1],\n color='purple')\nMN_tkd_amp_ax = fig.add_axes((0.09, 0.51, 0.17, 0.03))\nMN_tkd_amp_s = Slider(MN_tkd_amp_ax, '$M$($M_\\\\odot$)', input_params['mass'\n ][2] / 10 ** input_params['threshold_mass'][2], input_params['mass'][2] *\n 10 ** input_params['threshold_mass'][2], valinit=input_params['mass'][2\n ], color='blue', valfmt='%1.3E')\nMN_tkd_a_ax = fig.add_axes((0.09, 0.48, 0.17, 0.03))\nMN_tkd_a_s = Slider(MN_tkd_a_ax, '$a$ ($kpc$)', input_params['a (kpc)'][2] *\n (1 - 0.01 * input_params['threshold_a'][2]), input_params['a (kpc)'][2] *\n (1 + 0.01 * input_params['threshold_a'][2]), valinit=input_params[\n 'a (kpc)'][2], color='blue')\nMN_tkd_b_ax = fig.add_axes((0.09, 0.45, 0.17, 0.03))\nMN_tkd_b_s = Slider(MN_tkd_b_ax, '$b$ ($kpc$)', input_params['b (kpc)'][2] /\n 10 ** input_params['threshold_b'][2], input_params['b (kpc)'][2] * 10 **\n input_params['threshold_b'][2], valinit=input_params['b (kpc)'][2],\n color='blue')\nMN_ed_amp_ax = fig.add_axes((0.09, 0.39, 0.17, 0.03))\nMN_ed_amp_s = Slider(MN_ed_amp_ax, '$\\\\Sigma_0$($M_\\\\odot/pc^2$)', \n input_params['mass'][3] / 10 ** input_params['threshold_mass'][3], \n input_params['mass'][3] * 10 ** input_params['threshold_mass'][3],\n valinit=input_params['mass'][3], color='cyan', valfmt='%1.3E')\nMN_ed_a_ax = fig.add_axes((0.09, 0.36, 0.17, 0.03))\nMN_ed_a_s = Slider(MN_ed_a_ax, '$h_r$ ($kpc$)', input_params['a (kpc)'][3] *\n (1 - 0.01 * input_params['threshold_a'][3]), input_params['a (kpc)'][3] *\n (1 + 0.01 * input_params['threshold_a'][3]), valinit=input_params[\n 'a (kpc)'][3], color='cyan')\nNFW_amp_ax = fig.add_axes((0.09, 0.3, 0.17, 0.03))\nNFW_amp_s = Slider(NFW_amp_ax, '$M_0$($M_\\\\odot$)', input_params['mass'][4] /\n (10 * input_params['threshold_mass'][4]), input_params['mass'][4] * 10 **\n input_params['threshold_mass'][4], valinit=input_params['mass'][4],\n color='green', valfmt='%1.3E')\nNFW_a_ax = fig.add_axes((0.09, 0.27, 0.17, 0.03))\nNFW_a_s = Slider(NFW_a_ax, '$a$ ($kpc$)', input_params['a (kpc)'][4] * (1 -\n 0.01 * input_params['threshold_a'][4]), input_params['a (kpc)'][4] * (1 +\n 0.01 * input_params['threshold_a'][4]), valinit=input_params['a (kpc)']\n [4], color='green')\nBK_amp_ax = fig.add_axes((0.09, 0.21, 0.17, 0.03))\nBK_amp_s = Slider(BK_amp_ax, '$\\\\rho_0$($M_\\\\odot/kpc^3$)', input_params[\n 'mass'][5] / (10 * input_params['threshold_mass'][5]), input_params[\n 'mass'][5] * 10 ** input_params['threshold_mass'][5], valinit=\n input_params['mass'][5], color='orange', valfmt='%1.3E')\nBK_a_ax = fig.add_axes((0.09, 0.18, 0.17, 0.03))\nBK_a_s = Slider(BK_a_ax, '$a$ ($kpc$)', input_params['a (kpc)'][5] * (1 - \n 0.01 * input_params['threshold_a'][5]), input_params['a (kpc)'][5] * (1 +\n 0.01 * input_params['threshold_a'][5]), valinit=input_params['a (kpc)']\n [5], color='orange')\n\n\ndef MN_b_amp_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n amp1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_a_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n a1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=val *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_amp_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n amp3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a3 *\n units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n a5 = val * 1\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=val * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\nMN_b_amp_s.on_changed(MN_b_amp_s_func)\nMN_b_a_s.on_changed(MN_b_a_s_func)\nMN_b_b_s.on_changed(MN_b_b_s_func)\nMN_td_amp_s.on_changed(MN_td_amp_s_func)\nMN_td_a_s.on_changed(MN_td_a_s_func)\nMN_td_b_s.on_changed(MN_td_b_s_func)\nMN_tkd_amp_s.on_changed(MN_tkd_amp_s_func)\nMN_tkd_a_s.on_changed(MN_tkd_a_s_func)\nMN_tkd_b_s.on_changed(MN_tkd_b_s_func)\nNFW_amp_s.on_changed(NFW_amp_s_func)\nNFW_a_s.on_changed(NFW_a_s_func)\nBK_amp_s.on_changed(BK_amp_s_func)\nBK_a_s.on_changed(BK_a_s_func)\nMN_ed_amp_s.on_changed(MN_ed_amp_s_func)\nMN_ed_a_s.on_changed(MN_ed_a_s_func)\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\naxcolor = 'lavender'\nresetax = fig.add_axes((0.07, 0.08, 0.08, 0.05))\nbutton_reset = Button(resetax, 'Reset', color=axcolor)\nbutton_reset.on_clicked(reset)\n\n\ndef check_on_clicked(label):\n if label == 'MN Bulge (GRAY)':\n MN_b_plot.set_visible(not MN_b_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thin Disc (PURPLE)':\n MN_td_plot.set_visible(not MN_td_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thick Disc (BLUE)':\n MN_tkd_plot.set_visible(not MN_tkd_plot.get_visible())\n update_rot_curve()\n elif label == 'Exp. Disc (CYAN)':\n EX_d_plot.set_visible(not EX_d_plot.get_visible())\n update_rot_curve()\n elif label == 'NFW - Halo (GREEN)':\n NFW_plot.set_visible(not NFW_plot.get_visible())\n update_rot_curve()\n elif label == 'Burkert - Halo (ORANGE)':\n BK_plot.set_visible(not BK_plot.get_visible())\n update_rot_curve()\n plt.draw()\n\n\nax.set_xlabel('$R(kpc)$', fontsize=20)\nax.set_ylabel('$v_c(km/s)$', fontsize=20)\nax.tick_params(axis='both', which='both', labelsize=15)\nax.set_xlim([0, np.max(lista)])\nax.set_ylim([0, np.max(v_c_data) * 1.2])\ncheck.on_clicked(check_on_clicked)\n<docstring token>\naxcolor = 'lavender'\nresetax = fig.add_axes((0.2, 0.08, 0.08, 0.05))\nbutton_start = Button(resetax, 'Start', color=axcolor)\n\n\ndef start(event):\n plt.close(1)\n\n\nbutton_start.on_clicked(start)\nplt.show()\nchk = []\nif MN_b_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif MN_td_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif MN_tkd_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif EX_d_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif NFW_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif BK_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\ncompnts = ['BULGE', 'THIN DISC', 'THICK DISC', 'EXP. DISC', 'DARK HALO',\n 'BURKERT HALO']\nmasses = [amp1, amp2, amp3, amp4, amp5, amp6]\naa = [a1, a2, a3, h_r, a5, a6]\nbb = [b1, b2, b3, 'None', 'None', 'None']\ninit_parameters = Table.Table([compnts, masses, aa, bb, chk], names=(\n 'component', 'mass', 'a (kpc)', 'b (kpc)', 'checked'))\ninit_parameters.write('init_guess_params.txt', format='ascii.tab',\n overwrite=True)\nprint(\n \"\"\"\n#####################################################################\"\"\"\n )\nprint('###################### GalRotpy ######################')\nprint(\n '#####################################################################\\n\\n'\n )\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\ndef lnprior(parameters):\n booL = []\n for i in parameters:\n if i > 0.0:\n booL.append(True)\n else:\n booL.append(False)\n if False in booL:\n return -np.inf\n else:\n return 0.0\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\npara_labels = []\nlabels = []\nlabels_log = []\npara_in = []\nif chk[0] == True:\n if aa[0] == 0.0:\n para_labels.append('b1')\n para_in.append(bb[0])\n labels.append('$b_B$')\n labels_log.append('$\\\\log(b_B)$')\n para_labels.append('amp1')\n para_in.append(masses[0])\n labels.append('$M_B$')\n labels_log.append('$\\\\log(M_B)$')\n else:\n para_labels.append('a1')\n para_in.append(aa[0])\n labels.append('$a_B$')\n labels_log.append('$\\\\log(a_b)$')\n para_labels.append('b1')\n para_in.append(bb[0])\n labels.append('$b_B$')\n labels_log.append('$\\\\log(b_b)$')\n para_labels.append('amp1')\n para_in.append(masses[0])\n labels.append('$M_B$')\n labels_log.append('$\\\\log(M_b)$')\nif chk[1] == True:\n para_labels.append('a2')\n para_in.append(aa[1])\n labels.append('$a_{TD}$')\n labels_log.append('$\\\\log(a_{TD})$')\n para_labels.append('b2')\n para_in.append(bb[1])\n labels.append('$b_{TD}$')\n labels_log.append('$\\\\log(b_{TD})$')\n para_labels.append('amp2')\n para_in.append(masses[1])\n labels.append('$M_{TD}$')\n labels_log.append('$\\\\log(M_{TD})$')\nif chk[2] == True:\n para_labels.append('a3')\n para_in.append(aa[2])\n labels.append('$a_{TkD}$')\n labels_log.append('$\\\\log(a_{TkD})$')\n para_labels.append('b3')\n para_in.append(bb[2])\n labels.append('$b_{TkD}$')\n labels_log.append('$\\\\log(b_{TkD})$')\n para_labels.append('amp3')\n para_in.append(masses[2])\n labels.append('$M_{TkD}$')\n labels_log.append('$\\\\log(M_{TkD})$')\nif chk[3] == True:\n para_labels.append('h_r')\n para_in.append(aa[3])\n labels.append('$h_{r}$')\n labels_log.append('$\\\\log(h_{r})$')\n para_labels.append('amp4')\n para_in.append(masses[3])\n labels.append('$\\\\Sigma_{0}$')\n labels_log.append('$\\\\log(\\\\Sigma_{0})$')\nif chk[4] == True:\n para_labels.append('a5')\n para_in.append(aa[4])\n labels.append('$a_{NFW}$')\n labels_log.append('$\\\\log(a_{NFW})$')\n para_labels.append('amp5')\n para_in.append(masses[4])\n labels.append('$M_{0}$')\n labels_log.append('$\\\\log(M_{0})$')\nif chk[5] == True:\n para_labels.append('a6')\n para_in.append(aa[5])\n labels.append('$a_{Bk}$')\n labels_log.append('$\\\\log(a_{Bk})$')\n para_labels.append('amp6')\n para_in.append(masses[5])\n labels.append('$\\\\rho_{0}$')\n labels_log.append('$\\\\log(\\\\rho_{0})$')\nstart = np.array(para_in)\nndim = len(start)\nprint('Dimension: ', ndim, '\\n')\nif chk[4] == True or chk[5] == True:\n Delta_c = float(input(\n 'Enter the cosmological overdensity you want to use:\\n'))\nnwalkers = int(input(\"\"\"\nEnter the number of walkers you want to use:\n\"\"\"))\nsteps = int(input(\n \"\"\"\nEnter the number of steps you want the walkers to take:\n\"\"\"))\npos_step = 1e-08\npos_in = [abs(start + pos_step * start * np.random.randn(ndim) + 1e-09 * np\n .random.randn(ndim)) for i in range(nwalkers)]\nsampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(r_data,\n v_c_data, v_c_err_data), threads=ndim * mp.cpu_count())\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\nRound = int(input('Enter the number of times you want GalRotpy to run:\\n'))\nif Round <= 0:\n print('\\nStart over...')\n exit()\nprint('\\nRunning...\\n')\ntime0 = time.time()\nif Round == 1:\n p0, lp, _ = sampler.run_mcmc(pos_in, steps)\n print('It took ', (time.time() - time0) / 60, 'minutes\\n')\nif Round > 1:\n for j in range(Round - 1):\n ti = time.time()\n PARA = []\n p0, lp, _ = sampler.run_mcmc(pos_in, steps)\n SAMPLES = sampler.chain[:, int(0.5 * steps):, :].reshape((-1, ndim))\n for i in range(ndim):\n mcmc = np.percentile(SAMPLES[:, i], [50.0 - 0.5 * 68, 50.0, \n 50.0 + 0.5 * 68])\n PARA.append(mcmc[1])\n p = np.array(PARA)\n pos_in = [abs(p + pos_step * p * np.random.randn(ndim) + 1e-08 * np\n .random.randn(ndim)) for i in range(nwalkers)]\n sampler.reset()\n print('Run ' + str(j + 1) + ' done')\n print('Time: ', (time.time() - ti) / 60, 'minutes\\n')\n ti = time.time()\n if Round > 1:\n steps = 3 * steps\n p0, lp, _ = sampler.run_mcmc(pos_in, steps)\n print('Run ' + str(Round) + ' done')\n print('Time: ', (time.time() - ti) / 60, 'minutes\\n')\n print('It took ', (time.time() - time0) / 60, 'minutes\\n')\nprint('#####################################################################\\n'\n )\nfig = plt.figure(2)\nax = fig.add_axes((0.15, 0.3, 0.75, 0.6))\nchain_steps = [i for i in range(len(sampler.chain[:, :, 0].T))]\nchain_W = []\nfor i in range(nwalkers):\n chain_value = sampler.chain[:, :, 0].T[:][:, i]\n ax.plot(chain_steps, chain_value, '-', color='k', alpha=0.3)\nax.plot(chain_steps, len(chain_steps) * [start[0]], '-', color='r', lw=1)\nax.set_xlim(0, len(chain_steps) - 1)\nax.set_xlabel('$Steps$', fontsize=10)\nax.set_ylabel(labels[0], fontsize=15)\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\naxcolor = 'lavender'\ncallback = Index()\naxprev = plt.axes([0.3, 0.05, 0.1, 0.075])\naxnext = plt.axes([0.6, 0.05, 0.1, 0.075])\nbnext = Button(axnext, 'Next', color=axcolor)\nbnext.on_clicked(callback.next)\nbprev = Button(axprev, 'Previous', color=axcolor)\nbprev.on_clicked(callback.prev)\n\n\ndef burn(event):\n plt.close()\n\n\nresetax = fig.add_axes((0.45, 0.05, 0.1, 0.075))\nbutton_reset = Button(resetax, 'Burn-in', color=axcolor)\nbutton_reset.on_clicked(burn)\nplt.show()\nburn_in = int(input('Enter the number of steps you want to burn-in:\\n'))\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\nprint('Plotting...')\nif burn_in == 0.0:\n samples = sampler.chain[:, :, :].reshape((-1, ndim))\nelse:\n samples = sampler.chain[:, burn_in:, :].reshape((-1, ndim))\nsamples.shape\npercentage = 0.68\nfig = corner.corner(np.log10(samples), labels=labels_log, label_kwargs={\n 'fontsize': 21.5}, bins=50, use_math_text=True, color='gray',\n max_n_ticks=3, smooth=1.0, levels=[1 - np.exp(-0.5), 1 - np.exp(-2.0)],\n quantiles=[0.5 - 0.5 * percentage, 0.5, 0.5 + 0.5 * percentage],\n fill_contours=True, plot_datapoints=True)\naxes = np.array(fig.axes).reshape((ndim, ndim))\nfor yi in range(ndim):\n for xi in range(yi + 1):\n ax = axes[yi, xi]\n ax.tick_params(axis='both', which='major', labelsize=14.5, pad=3,\n direction='in')\nfig.savefig('Conf_Regions.pdf', bbox_inches='tight', pad_inches=0.15)\n<docstring token>\nH_0 = 2.1972483582604943e-18\nG = 4.517103050001136e-39\nrho_c = 127.5791469578729\n\n\ndef eq_nfw(x, rho_0, rho_c):\n global Delta_c\n return np.log(1 + x) - x / (1 + x) - Delta_c * rho_c / (3.0 * rho_0\n ) * x ** 3\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\ndef mass_b(x, rho_0, a):\n return np.pi * rho_0 * a ** 3 * (2.0 * np.log(1 + x) + np.log(1 + x ** \n 2) - 2.0 * np.arctan(x))\n\n\ntable_data = []\nindex = []\npara = []\nparap68 = []\nparan68 = []\nparap95 = []\nparan95 = []\ntable_para = []\ntable_units = []\nfinal_para_labels = []\nfit_para = []\nfor i in range(ndim):\n mcmc = np.percentile(samples[:, i], [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, \n 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n fit_para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append(para_labels[i])\n if para_labels[i] == 'h_r':\n ed_h_r = np.array(samples[:, i])\n if para_labels[i] == 'amp4':\n ed_sigma0 = np.array(samples[:, i])\n M_disc = 2.0 * np.pi * ed_sigma0 * (1000 * ed_h_r) ** 2\n mcmc = np.percentile(M_disc, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, \n 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('M_star')\n if para_labels[i] == 'a5':\n nfw_a = np.array(samples[:, i])\n if para_labels[i] == 'amp5':\n nfw_M0 = np.array(samples[:, i])\n rho_0 = nfw_M0 / (4.0 * np.pi * nfw_a ** 3)\n mcmc = np.percentile(rho_0, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, 50.0,\n 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('rho_0_NFW')\n X_nfw = []\n for density in rho_0:\n X_nfw.append(fsolve(eq_nfw, 100000.0, args=(density, rho_c))[0])\n mcmc = np.percentile(np.array(X_nfw), [50.0 - 0.5 * 95, 50.0 - 0.5 *\n 68, 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('X_NFW')\n M_h_nfw = mass_nfw(np.array(X_nfw), rho_0, nfw_a)\n mcmc = np.percentile(M_h_nfw, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, \n 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('M_h_NFW')\n if para_labels[i] == 'a6':\n b_a = np.array(samples[:, i])\n if para_labels[i] == 'amp6':\n X_b = []\n for density in samples[:, i]:\n X_b.append(fsolve(eq_b, 100000.0, args=(density, rho_c))[0])\n mcmc = np.percentile(np.array(X_b), [50.0 - 0.5 * 95, 50.0 - 0.5 * \n 68, 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('X_Bk')\n M_h_b = mass_b(np.array(X_b), samples[:, i], b_a)\n mcmc = np.percentile(M_h_b, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, 50.0,\n 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('M_h_Bk')\nr = np.linspace(0.001, 1.02 * np.amax(r_data), 10000)\ncurva = model(fit_para, r)\nY_guess = model(start, r)\nnp.warnings.filterwarnings('ignore')\nplt.figure(figsize=(6, 6))\nbest_para = {}\nfor i in range(len(final_para_labels)):\n best_para[final_para_labels[i]] = para[i]\nif chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = best_para['amp1']\n b1 = best_para['b1']\n else:\n amp1 = best_para['amp1']\n a1 = best_para['a1']\n b1 = best_para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 * units\n .kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_b = calcRotcurve(MN_Bulge_p, r, phi=None) * 220\n plt.plot(r, vc_b, '--', color='gray', label='Bulge')\nif chk[1] == True:\n amp2 = best_para['amp2']\n a2 = best_para['a2']\n b2 = best_para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_td = calcRotcurve(MN_Thin_Disk_p, r, phi=None) * 220\n plt.plot(r, vc_td, '--', color='purple', label='Thin Disk')\nif chk[2] == True:\n amp3 = best_para['amp3']\n a3 = best_para['a3']\n b3 = best_para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=a3 *\n units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_tkd = calcRotcurve(MN_Thick_Disk_p, r, phi=None) * 220\n plt.plot(r, vc_tkd, '--', color='blue', label='Thick Disk')\nif chk[3] == True:\n amp4 = best_para['amp4']\n h_r = best_para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.Msun / \n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n vc_exp = calcRotcurve(EX_Disk_p, r, phi=None) * 220\n plt.plot(r, vc_exp, '--', color='cyan', label='Exp. Disk')\nif chk[4] == True:\n amp5 = best_para['amp5']\n a5 = best_para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc, normalize\n =False, ro=r_0, vo=v_0)\n vc_nfw = calcRotcurve(NFW_p, r, phi=None) * 220\n plt.plot(r, vc_nfw, '--', color='green', label='NFW - Halo')\nif chk[5] == True:\n amp6 = best_para['amp6']\n a6 = best_para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_bk = calcRotcurve(BK_p, r, phi=None) * 220\n plt.plot(r, vc_bk, '--', color='orange', label='Burkert - Halo')\nplt.errorbar(r_data, v_c_data, yerr=v_c_err_data, fmt='ko', ecolor='black',\n ms=4, label=None)\nplt.plot(r, curva, '-', color='red', lw=1.5, label='Best Fit')\nplt.xlabel('$R(kpc)$', fontsize=20)\nplt.ylabel('$V_c(km/s)$', fontsize=20)\nplt.tick_params(axis='both', which='major', labelsize=15)\nplt.xlim(0.0, 1.02 * np.amax(r_data))\nplt.ylim(0.0, 1.1 * np.amax(v_c_data))\nplt.tight_layout()\nplt.legend(loc='lower right', fontsize=15)\nplt.savefig('GalRotpy_fit.pdf')\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\nif chk[0] == True:\n if aa[0] == 0.0:\n index.append('BULGE')\n index.append('---')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\n else:\n index.append('BULGE')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[1] == True:\n index.append('THIN DISK')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[2] == True:\n index.append('THICK DISK')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[3] == True:\n index.append('EXPONENTIAL DISK')\n index.append('---')\n index.append('---')\n table_para.append('h_r')\n table_units.append('kpc')\n table_para.append('Sigma_0')\n table_units.append('M_Sun/pc^2')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[4] == True:\n index.append('NFW HALO')\n index.append('---')\n index.append('---')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('M_0')\n table_units.append('M_Sun')\n table_para.append('rho_0')\n table_units.append('M_Sun/kpc^3')\n table_para.append('X')\n table_units.append('---')\n table_para.append('M_h')\n table_units.append('M_Sun')\nif chk[5] == True:\n index.append('BURKERT HALO')\n index.append('---')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('rho_0')\n table_units.append('M_Sun/kpc^3')\n table_para.append('X')\n table_units.append('---')\n table_para.append('M_h')\n table_units.append('M_Sun')\nfor i in range(len(para)):\n table_data.append([table_para[i], table_units[i], paran95[i], paran68[i\n ], para[i], parap68[i], parap95[i]])\ncolumn_name = ['PARAMETER', 'UNITS', '95%(-)', '68%(-)', 'FIT', '68%(+)',\n '95%(+)']\ntable_p = pd.DataFrame(table_data, index=index, columns=column_name)\ntable_p.to_csv('final_params.txt', sep='\\t', encoding='utf-8')\nprint(table_p)\nprint(\n \"\"\"\n#####################################################################\"\"\"\n )\nprint('\\nDone')\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\n",
"<docstring token>\n<import token>\nnp.warnings.filterwarnings('ignore')\ntt = Table.Table.read('rot_curve.txt', format='ascii.tab')\ninput_params = Table.Table.read('input_params.txt', format='ascii.tab')\nx_offset = 0.0\nr_0 = 1 * units.kpc\nv_0 = 220 * units.km / units.s\nr_data = tt['r'] - x_offset\nv_c_data = tt['vel']\nv_c_err_data = tt['e_vel']\nfor i in range(len(r_data)):\n if r_data[i] < 0.001:\n r_data[i] = 0.001\na1 = input_params['a (kpc)'][0]\nb1 = input_params['b (kpc)'][0]\namp1 = input_params['mass'][0]\na2 = input_params['a (kpc)'][1]\nb2 = input_params['b (kpc)'][1]\namp2 = input_params['mass'][1]\na3 = input_params['a (kpc)'][2]\nb3 = input_params['b (kpc)'][2]\namp3 = input_params['mass'][2]\na5 = input_params['a (kpc)'][4]\namp5 = input_params['mass'][4]\nh_r = input_params['a (kpc)'][3]\namp4 = input_params['mass'][3]\na6 = input_params['a (kpc)'][5]\namp6 = input_params['mass'][5]\nlista = np.linspace(0.001, 1.02 * np.max(r_data), 10 * len(r_data))\nMN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 * units.kpc,\n b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\nMN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 * units\n .kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\nMN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=a3 *\n units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\nEX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.Msun / \n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001, normalize=\n False, ro=r_0, vo=v_0, new=True, glorder=100)\nNFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc, normalize=\n False, ro=r_0, vo=v_0)\nBK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\nMN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\nMN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\nMN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\nEX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\nNFW = calcRotcurve(NFW_p, lista, phi=None) * 220\nBK = calcRotcurve(BK_p, lista, phi=None) * 220\nv_circ_comp = calcRotcurve([MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p,\n EX_Disk_p, NFW_p, BK_p], lista, phi=None) * 220\nfig = plt.figure(1)\nax = fig.add_axes((0.41, 0.1, 0.55, 0.85))\nCV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='', ls=\n 'none')\nCV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\nMN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\nMN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\nMN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\nEX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\nNFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\nBK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\nv_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\nax.set_xlabel('$R(kpc)$', fontsize=20)\nax.set_ylabel('$v_c(km/s)$', fontsize=20)\nax.tick_params(axis='both', which='both', labelsize=15)\nrax = plt.axes((0.07, 0.8, 0.21, 0.15))\ncheck = CheckButtons(rax, ('MN Bulge (GRAY)', 'MN Thin Disc (PURPLE)',\n 'MN Thick Disc (BLUE)', 'Exp. Disc (CYAN)', 'NFW - Halo (GREEN)',\n 'Burkert - Halo (ORANGE)'), (True, True, True, True, True, True))\nfor r in check.rectangles:\n r.set_facecolor('lavender')\n r.set_edgecolor('black')\n[ll.set_color('black') for l in check.lines for ll in l]\n[ll.set_linewidth(2) for l in check.lines for ll in l]\nMN_b_amp_ax = fig.add_axes((0.09, 0.75, 0.17, 0.03))\nMN_b_amp_s = Slider(MN_b_amp_ax, '$M$($M_\\\\odot$)', input_params['mass'][0] /\n 10 ** input_params['threshold_mass'][0], input_params['mass'][0] * 10 **\n input_params['threshold_mass'][0], valinit=input_params['mass'][0],\n color='gray', valfmt='%1.3E')\nMN_b_a_ax = fig.add_axes((0.09, 0.72, 0.17, 0.03))\nMN_b_a_s = Slider(MN_b_a_ax, '$a$ ($kpc$)', 0, 0.1 * input_params[\n 'threshold_a'][0], valinit=input_params['a (kpc)'][0], color='gray')\nMN_b_b_ax = fig.add_axes((0.09, 0.69, 0.17, 0.03))\nMN_b_b_s = Slider(MN_b_b_ax, '$b$ ($kpc$)', input_params['b (kpc)'][0] * (1 -\n 0.01 * input_params['threshold_b'][0]), input_params['b (kpc)'][0] * (1 +\n 0.01 * input_params['threshold_b'][0]), valinit=input_params['b (kpc)']\n [0], color='gray')\nMN_td_amp_ax = fig.add_axes((0.09, 0.63, 0.17, 0.03))\nMN_td_amp_s = Slider(MN_td_amp_ax, '$M$($M_\\\\odot$)', input_params['mass'][\n 1] / 10 ** input_params['threshold_mass'][1], input_params['mass'][1] *\n 10 ** input_params['threshold_mass'][1], valinit=input_params['mass'][1\n ], color='purple', valfmt='%1.3E')\nMN_td_a_ax = fig.add_axes((0.09, 0.6, 0.17, 0.03))\nMN_td_a_s = Slider(MN_td_a_ax, '$a$ ($kpc$)', input_params['a (kpc)'][1] *\n (1 - 0.01 * input_params['threshold_a'][1]), input_params['a (kpc)'][1] *\n (1 + 0.01 * input_params['threshold_a'][1]), valinit=input_params[\n 'a (kpc)'][1], color='purple')\nMN_td_b_ax = fig.add_axes((0.09, 0.57, 0.17, 0.03))\nMN_td_b_s = Slider(MN_td_b_ax, '$b$ ($kpc$)', input_params['b (kpc)'][1] / \n 10 ** input_params['threshold_b'][1], input_params['b (kpc)'][1] * 10 **\n input_params['threshold_b'][1], valinit=input_params['b (kpc)'][1],\n color='purple')\nMN_tkd_amp_ax = fig.add_axes((0.09, 0.51, 0.17, 0.03))\nMN_tkd_amp_s = Slider(MN_tkd_amp_ax, '$M$($M_\\\\odot$)', input_params['mass'\n ][2] / 10 ** input_params['threshold_mass'][2], input_params['mass'][2] *\n 10 ** input_params['threshold_mass'][2], valinit=input_params['mass'][2\n ], color='blue', valfmt='%1.3E')\nMN_tkd_a_ax = fig.add_axes((0.09, 0.48, 0.17, 0.03))\nMN_tkd_a_s = Slider(MN_tkd_a_ax, '$a$ ($kpc$)', input_params['a (kpc)'][2] *\n (1 - 0.01 * input_params['threshold_a'][2]), input_params['a (kpc)'][2] *\n (1 + 0.01 * input_params['threshold_a'][2]), valinit=input_params[\n 'a (kpc)'][2], color='blue')\nMN_tkd_b_ax = fig.add_axes((0.09, 0.45, 0.17, 0.03))\nMN_tkd_b_s = Slider(MN_tkd_b_ax, '$b$ ($kpc$)', input_params['b (kpc)'][2] /\n 10 ** input_params['threshold_b'][2], input_params['b (kpc)'][2] * 10 **\n input_params['threshold_b'][2], valinit=input_params['b (kpc)'][2],\n color='blue')\nMN_ed_amp_ax = fig.add_axes((0.09, 0.39, 0.17, 0.03))\nMN_ed_amp_s = Slider(MN_ed_amp_ax, '$\\\\Sigma_0$($M_\\\\odot/pc^2$)', \n input_params['mass'][3] / 10 ** input_params['threshold_mass'][3], \n input_params['mass'][3] * 10 ** input_params['threshold_mass'][3],\n valinit=input_params['mass'][3], color='cyan', valfmt='%1.3E')\nMN_ed_a_ax = fig.add_axes((0.09, 0.36, 0.17, 0.03))\nMN_ed_a_s = Slider(MN_ed_a_ax, '$h_r$ ($kpc$)', input_params['a (kpc)'][3] *\n (1 - 0.01 * input_params['threshold_a'][3]), input_params['a (kpc)'][3] *\n (1 + 0.01 * input_params['threshold_a'][3]), valinit=input_params[\n 'a (kpc)'][3], color='cyan')\nNFW_amp_ax = fig.add_axes((0.09, 0.3, 0.17, 0.03))\nNFW_amp_s = Slider(NFW_amp_ax, '$M_0$($M_\\\\odot$)', input_params['mass'][4] /\n (10 * input_params['threshold_mass'][4]), input_params['mass'][4] * 10 **\n input_params['threshold_mass'][4], valinit=input_params['mass'][4],\n color='green', valfmt='%1.3E')\nNFW_a_ax = fig.add_axes((0.09, 0.27, 0.17, 0.03))\nNFW_a_s = Slider(NFW_a_ax, '$a$ ($kpc$)', input_params['a (kpc)'][4] * (1 -\n 0.01 * input_params['threshold_a'][4]), input_params['a (kpc)'][4] * (1 +\n 0.01 * input_params['threshold_a'][4]), valinit=input_params['a (kpc)']\n [4], color='green')\nBK_amp_ax = fig.add_axes((0.09, 0.21, 0.17, 0.03))\nBK_amp_s = Slider(BK_amp_ax, '$\\\\rho_0$($M_\\\\odot/kpc^3$)', input_params[\n 'mass'][5] / (10 * input_params['threshold_mass'][5]), input_params[\n 'mass'][5] * 10 ** input_params['threshold_mass'][5], valinit=\n input_params['mass'][5], color='orange', valfmt='%1.3E')\nBK_a_ax = fig.add_axes((0.09, 0.18, 0.17, 0.03))\nBK_a_s = Slider(BK_a_ax, '$a$ ($kpc$)', input_params['a (kpc)'][5] * (1 - \n 0.01 * input_params['threshold_a'][5]), input_params['a (kpc)'][5] * (1 +\n 0.01 * input_params['threshold_a'][5]), valinit=input_params['a (kpc)']\n [5], color='orange')\n\n\ndef MN_b_amp_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n amp1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_a_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n a1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=val *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_amp_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n amp3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a3 *\n units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n a5 = val * 1\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=val * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\nMN_b_amp_s.on_changed(MN_b_amp_s_func)\nMN_b_a_s.on_changed(MN_b_a_s_func)\nMN_b_b_s.on_changed(MN_b_b_s_func)\nMN_td_amp_s.on_changed(MN_td_amp_s_func)\nMN_td_a_s.on_changed(MN_td_a_s_func)\nMN_td_b_s.on_changed(MN_td_b_s_func)\nMN_tkd_amp_s.on_changed(MN_tkd_amp_s_func)\nMN_tkd_a_s.on_changed(MN_tkd_a_s_func)\nMN_tkd_b_s.on_changed(MN_tkd_b_s_func)\nNFW_amp_s.on_changed(NFW_amp_s_func)\nNFW_a_s.on_changed(NFW_a_s_func)\nBK_amp_s.on_changed(BK_amp_s_func)\nBK_a_s.on_changed(BK_a_s_func)\nMN_ed_amp_s.on_changed(MN_ed_amp_s_func)\nMN_ed_a_s.on_changed(MN_ed_a_s_func)\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\naxcolor = 'lavender'\nresetax = fig.add_axes((0.07, 0.08, 0.08, 0.05))\nbutton_reset = Button(resetax, 'Reset', color=axcolor)\nbutton_reset.on_clicked(reset)\n\n\ndef check_on_clicked(label):\n if label == 'MN Bulge (GRAY)':\n MN_b_plot.set_visible(not MN_b_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thin Disc (PURPLE)':\n MN_td_plot.set_visible(not MN_td_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thick Disc (BLUE)':\n MN_tkd_plot.set_visible(not MN_tkd_plot.get_visible())\n update_rot_curve()\n elif label == 'Exp. Disc (CYAN)':\n EX_d_plot.set_visible(not EX_d_plot.get_visible())\n update_rot_curve()\n elif label == 'NFW - Halo (GREEN)':\n NFW_plot.set_visible(not NFW_plot.get_visible())\n update_rot_curve()\n elif label == 'Burkert - Halo (ORANGE)':\n BK_plot.set_visible(not BK_plot.get_visible())\n update_rot_curve()\n plt.draw()\n\n\nax.set_xlabel('$R(kpc)$', fontsize=20)\nax.set_ylabel('$v_c(km/s)$', fontsize=20)\nax.tick_params(axis='both', which='both', labelsize=15)\nax.set_xlim([0, np.max(lista)])\nax.set_ylim([0, np.max(v_c_data) * 1.2])\ncheck.on_clicked(check_on_clicked)\n<docstring token>\naxcolor = 'lavender'\nresetax = fig.add_axes((0.2, 0.08, 0.08, 0.05))\nbutton_start = Button(resetax, 'Start', color=axcolor)\n\n\ndef start(event):\n plt.close(1)\n\n\nbutton_start.on_clicked(start)\nplt.show()\nchk = []\nif MN_b_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif MN_td_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif MN_tkd_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif EX_d_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif NFW_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif BK_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\ncompnts = ['BULGE', 'THIN DISC', 'THICK DISC', 'EXP. DISC', 'DARK HALO',\n 'BURKERT HALO']\nmasses = [amp1, amp2, amp3, amp4, amp5, amp6]\naa = [a1, a2, a3, h_r, a5, a6]\nbb = [b1, b2, b3, 'None', 'None', 'None']\ninit_parameters = Table.Table([compnts, masses, aa, bb, chk], names=(\n 'component', 'mass', 'a (kpc)', 'b (kpc)', 'checked'))\ninit_parameters.write('init_guess_params.txt', format='ascii.tab',\n overwrite=True)\nprint(\n \"\"\"\n#####################################################################\"\"\"\n )\nprint('###################### GalRotpy ######################')\nprint(\n '#####################################################################\\n\\n'\n )\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\ndef lnprior(parameters):\n booL = []\n for i in parameters:\n if i > 0.0:\n booL.append(True)\n else:\n booL.append(False)\n if False in booL:\n return -np.inf\n else:\n return 0.0\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\npara_labels = []\nlabels = []\nlabels_log = []\npara_in = []\nif chk[0] == True:\n if aa[0] == 0.0:\n para_labels.append('b1')\n para_in.append(bb[0])\n labels.append('$b_B$')\n labels_log.append('$\\\\log(b_B)$')\n para_labels.append('amp1')\n para_in.append(masses[0])\n labels.append('$M_B$')\n labels_log.append('$\\\\log(M_B)$')\n else:\n para_labels.append('a1')\n para_in.append(aa[0])\n labels.append('$a_B$')\n labels_log.append('$\\\\log(a_b)$')\n para_labels.append('b1')\n para_in.append(bb[0])\n labels.append('$b_B$')\n labels_log.append('$\\\\log(b_b)$')\n para_labels.append('amp1')\n para_in.append(masses[0])\n labels.append('$M_B$')\n labels_log.append('$\\\\log(M_b)$')\nif chk[1] == True:\n para_labels.append('a2')\n para_in.append(aa[1])\n labels.append('$a_{TD}$')\n labels_log.append('$\\\\log(a_{TD})$')\n para_labels.append('b2')\n para_in.append(bb[1])\n labels.append('$b_{TD}$')\n labels_log.append('$\\\\log(b_{TD})$')\n para_labels.append('amp2')\n para_in.append(masses[1])\n labels.append('$M_{TD}$')\n labels_log.append('$\\\\log(M_{TD})$')\nif chk[2] == True:\n para_labels.append('a3')\n para_in.append(aa[2])\n labels.append('$a_{TkD}$')\n labels_log.append('$\\\\log(a_{TkD})$')\n para_labels.append('b3')\n para_in.append(bb[2])\n labels.append('$b_{TkD}$')\n labels_log.append('$\\\\log(b_{TkD})$')\n para_labels.append('amp3')\n para_in.append(masses[2])\n labels.append('$M_{TkD}$')\n labels_log.append('$\\\\log(M_{TkD})$')\nif chk[3] == True:\n para_labels.append('h_r')\n para_in.append(aa[3])\n labels.append('$h_{r}$')\n labels_log.append('$\\\\log(h_{r})$')\n para_labels.append('amp4')\n para_in.append(masses[3])\n labels.append('$\\\\Sigma_{0}$')\n labels_log.append('$\\\\log(\\\\Sigma_{0})$')\nif chk[4] == True:\n para_labels.append('a5')\n para_in.append(aa[4])\n labels.append('$a_{NFW}$')\n labels_log.append('$\\\\log(a_{NFW})$')\n para_labels.append('amp5')\n para_in.append(masses[4])\n labels.append('$M_{0}$')\n labels_log.append('$\\\\log(M_{0})$')\nif chk[5] == True:\n para_labels.append('a6')\n para_in.append(aa[5])\n labels.append('$a_{Bk}$')\n labels_log.append('$\\\\log(a_{Bk})$')\n para_labels.append('amp6')\n para_in.append(masses[5])\n labels.append('$\\\\rho_{0}$')\n labels_log.append('$\\\\log(\\\\rho_{0})$')\nstart = np.array(para_in)\nndim = len(start)\nprint('Dimension: ', ndim, '\\n')\nif chk[4] == True or chk[5] == True:\n Delta_c = float(input(\n 'Enter the cosmological overdensity you want to use:\\n'))\nnwalkers = int(input(\"\"\"\nEnter the number of walkers you want to use:\n\"\"\"))\nsteps = int(input(\n \"\"\"\nEnter the number of steps you want the walkers to take:\n\"\"\"))\npos_step = 1e-08\npos_in = [abs(start + pos_step * start * np.random.randn(ndim) + 1e-09 * np\n .random.randn(ndim)) for i in range(nwalkers)]\nsampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(r_data,\n v_c_data, v_c_err_data), threads=ndim * mp.cpu_count())\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\nRound = int(input('Enter the number of times you want GalRotpy to run:\\n'))\nif Round <= 0:\n print('\\nStart over...')\n exit()\nprint('\\nRunning...\\n')\ntime0 = time.time()\nif Round == 1:\n p0, lp, _ = sampler.run_mcmc(pos_in, steps)\n print('It took ', (time.time() - time0) / 60, 'minutes\\n')\nif Round > 1:\n for j in range(Round - 1):\n ti = time.time()\n PARA = []\n p0, lp, _ = sampler.run_mcmc(pos_in, steps)\n SAMPLES = sampler.chain[:, int(0.5 * steps):, :].reshape((-1, ndim))\n for i in range(ndim):\n mcmc = np.percentile(SAMPLES[:, i], [50.0 - 0.5 * 68, 50.0, \n 50.0 + 0.5 * 68])\n PARA.append(mcmc[1])\n p = np.array(PARA)\n pos_in = [abs(p + pos_step * p * np.random.randn(ndim) + 1e-08 * np\n .random.randn(ndim)) for i in range(nwalkers)]\n sampler.reset()\n print('Run ' + str(j + 1) + ' done')\n print('Time: ', (time.time() - ti) / 60, 'minutes\\n')\n ti = time.time()\n if Round > 1:\n steps = 3 * steps\n p0, lp, _ = sampler.run_mcmc(pos_in, steps)\n print('Run ' + str(Round) + ' done')\n print('Time: ', (time.time() - ti) / 60, 'minutes\\n')\n print('It took ', (time.time() - time0) / 60, 'minutes\\n')\nprint('#####################################################################\\n'\n )\nfig = plt.figure(2)\nax = fig.add_axes((0.15, 0.3, 0.75, 0.6))\nchain_steps = [i for i in range(len(sampler.chain[:, :, 0].T))]\nchain_W = []\nfor i in range(nwalkers):\n chain_value = sampler.chain[:, :, 0].T[:][:, i]\n ax.plot(chain_steps, chain_value, '-', color='k', alpha=0.3)\nax.plot(chain_steps, len(chain_steps) * [start[0]], '-', color='r', lw=1)\nax.set_xlim(0, len(chain_steps) - 1)\nax.set_xlabel('$Steps$', fontsize=10)\nax.set_ylabel(labels[0], fontsize=15)\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\naxcolor = 'lavender'\ncallback = Index()\naxprev = plt.axes([0.3, 0.05, 0.1, 0.075])\naxnext = plt.axes([0.6, 0.05, 0.1, 0.075])\nbnext = Button(axnext, 'Next', color=axcolor)\nbnext.on_clicked(callback.next)\nbprev = Button(axprev, 'Previous', color=axcolor)\nbprev.on_clicked(callback.prev)\n\n\ndef burn(event):\n plt.close()\n\n\nresetax = fig.add_axes((0.45, 0.05, 0.1, 0.075))\nbutton_reset = Button(resetax, 'Burn-in', color=axcolor)\nbutton_reset.on_clicked(burn)\nplt.show()\nburn_in = int(input('Enter the number of steps you want to burn-in:\\n'))\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\nprint('Plotting...')\nif burn_in == 0.0:\n samples = sampler.chain[:, :, :].reshape((-1, ndim))\nelse:\n samples = sampler.chain[:, burn_in:, :].reshape((-1, ndim))\nsamples.shape\npercentage = 0.68\nfig = corner.corner(np.log10(samples), labels=labels_log, label_kwargs={\n 'fontsize': 21.5}, bins=50, use_math_text=True, color='gray',\n max_n_ticks=3, smooth=1.0, levels=[1 - np.exp(-0.5), 1 - np.exp(-2.0)],\n quantiles=[0.5 - 0.5 * percentage, 0.5, 0.5 + 0.5 * percentage],\n fill_contours=True, plot_datapoints=True)\naxes = np.array(fig.axes).reshape((ndim, ndim))\nfor yi in range(ndim):\n for xi in range(yi + 1):\n ax = axes[yi, xi]\n ax.tick_params(axis='both', which='major', labelsize=14.5, pad=3,\n direction='in')\nfig.savefig('Conf_Regions.pdf', bbox_inches='tight', pad_inches=0.15)\n<docstring token>\nH_0 = 2.1972483582604943e-18\nG = 4.517103050001136e-39\nrho_c = 127.5791469578729\n\n\ndef eq_nfw(x, rho_0, rho_c):\n global Delta_c\n return np.log(1 + x) - x / (1 + x) - Delta_c * rho_c / (3.0 * rho_0\n ) * x ** 3\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\ndef mass_b(x, rho_0, a):\n return np.pi * rho_0 * a ** 3 * (2.0 * np.log(1 + x) + np.log(1 + x ** \n 2) - 2.0 * np.arctan(x))\n\n\ntable_data = []\nindex = []\npara = []\nparap68 = []\nparan68 = []\nparap95 = []\nparan95 = []\ntable_para = []\ntable_units = []\nfinal_para_labels = []\nfit_para = []\nfor i in range(ndim):\n mcmc = np.percentile(samples[:, i], [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, \n 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n fit_para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append(para_labels[i])\n if para_labels[i] == 'h_r':\n ed_h_r = np.array(samples[:, i])\n if para_labels[i] == 'amp4':\n ed_sigma0 = np.array(samples[:, i])\n M_disc = 2.0 * np.pi * ed_sigma0 * (1000 * ed_h_r) ** 2\n mcmc = np.percentile(M_disc, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, \n 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('M_star')\n if para_labels[i] == 'a5':\n nfw_a = np.array(samples[:, i])\n if para_labels[i] == 'amp5':\n nfw_M0 = np.array(samples[:, i])\n rho_0 = nfw_M0 / (4.0 * np.pi * nfw_a ** 3)\n mcmc = np.percentile(rho_0, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, 50.0,\n 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('rho_0_NFW')\n X_nfw = []\n for density in rho_0:\n X_nfw.append(fsolve(eq_nfw, 100000.0, args=(density, rho_c))[0])\n mcmc = np.percentile(np.array(X_nfw), [50.0 - 0.5 * 95, 50.0 - 0.5 *\n 68, 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('X_NFW')\n M_h_nfw = mass_nfw(np.array(X_nfw), rho_0, nfw_a)\n mcmc = np.percentile(M_h_nfw, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, \n 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('M_h_NFW')\n if para_labels[i] == 'a6':\n b_a = np.array(samples[:, i])\n if para_labels[i] == 'amp6':\n X_b = []\n for density in samples[:, i]:\n X_b.append(fsolve(eq_b, 100000.0, args=(density, rho_c))[0])\n mcmc = np.percentile(np.array(X_b), [50.0 - 0.5 * 95, 50.0 - 0.5 * \n 68, 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('X_Bk')\n M_h_b = mass_b(np.array(X_b), samples[:, i], b_a)\n mcmc = np.percentile(M_h_b, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, 50.0,\n 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('M_h_Bk')\nr = np.linspace(0.001, 1.02 * np.amax(r_data), 10000)\ncurva = model(fit_para, r)\nY_guess = model(start, r)\nnp.warnings.filterwarnings('ignore')\nplt.figure(figsize=(6, 6))\nbest_para = {}\nfor i in range(len(final_para_labels)):\n best_para[final_para_labels[i]] = para[i]\nif chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = best_para['amp1']\n b1 = best_para['b1']\n else:\n amp1 = best_para['amp1']\n a1 = best_para['a1']\n b1 = best_para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 * units\n .kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_b = calcRotcurve(MN_Bulge_p, r, phi=None) * 220\n plt.plot(r, vc_b, '--', color='gray', label='Bulge')\nif chk[1] == True:\n amp2 = best_para['amp2']\n a2 = best_para['a2']\n b2 = best_para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_td = calcRotcurve(MN_Thin_Disk_p, r, phi=None) * 220\n plt.plot(r, vc_td, '--', color='purple', label='Thin Disk')\nif chk[2] == True:\n amp3 = best_para['amp3']\n a3 = best_para['a3']\n b3 = best_para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=a3 *\n units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_tkd = calcRotcurve(MN_Thick_Disk_p, r, phi=None) * 220\n plt.plot(r, vc_tkd, '--', color='blue', label='Thick Disk')\nif chk[3] == True:\n amp4 = best_para['amp4']\n h_r = best_para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.Msun / \n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n vc_exp = calcRotcurve(EX_Disk_p, r, phi=None) * 220\n plt.plot(r, vc_exp, '--', color='cyan', label='Exp. Disk')\nif chk[4] == True:\n amp5 = best_para['amp5']\n a5 = best_para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc, normalize\n =False, ro=r_0, vo=v_0)\n vc_nfw = calcRotcurve(NFW_p, r, phi=None) * 220\n plt.plot(r, vc_nfw, '--', color='green', label='NFW - Halo')\nif chk[5] == True:\n amp6 = best_para['amp6']\n a6 = best_para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_bk = calcRotcurve(BK_p, r, phi=None) * 220\n plt.plot(r, vc_bk, '--', color='orange', label='Burkert - Halo')\nplt.errorbar(r_data, v_c_data, yerr=v_c_err_data, fmt='ko', ecolor='black',\n ms=4, label=None)\nplt.plot(r, curva, '-', color='red', lw=1.5, label='Best Fit')\nplt.xlabel('$R(kpc)$', fontsize=20)\nplt.ylabel('$V_c(km/s)$', fontsize=20)\nplt.tick_params(axis='both', which='major', labelsize=15)\nplt.xlim(0.0, 1.02 * np.amax(r_data))\nplt.ylim(0.0, 1.1 * np.amax(v_c_data))\nplt.tight_layout()\nplt.legend(loc='lower right', fontsize=15)\nplt.savefig('GalRotpy_fit.pdf')\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\nif chk[0] == True:\n if aa[0] == 0.0:\n index.append('BULGE')\n index.append('---')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\n else:\n index.append('BULGE')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[1] == True:\n index.append('THIN DISK')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[2] == True:\n index.append('THICK DISK')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[3] == True:\n index.append('EXPONENTIAL DISK')\n index.append('---')\n index.append('---')\n table_para.append('h_r')\n table_units.append('kpc')\n table_para.append('Sigma_0')\n table_units.append('M_Sun/pc^2')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[4] == True:\n index.append('NFW HALO')\n index.append('---')\n index.append('---')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('M_0')\n table_units.append('M_Sun')\n table_para.append('rho_0')\n table_units.append('M_Sun/kpc^3')\n table_para.append('X')\n table_units.append('---')\n table_para.append('M_h')\n table_units.append('M_Sun')\nif chk[5] == True:\n index.append('BURKERT HALO')\n index.append('---')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('rho_0')\n table_units.append('M_Sun/kpc^3')\n table_para.append('X')\n table_units.append('---')\n table_para.append('M_h')\n table_units.append('M_Sun')\nfor i in range(len(para)):\n table_data.append([table_para[i], table_units[i], paran95[i], paran68[i\n ], para[i], parap68[i], parap95[i]])\ncolumn_name = ['PARAMETER', 'UNITS', '95%(-)', '68%(-)', 'FIT', '68%(+)',\n '95%(+)']\ntable_p = pd.DataFrame(table_data, index=index, columns=column_name)\ntable_p.to_csv('final_params.txt', sep='\\t', encoding='utf-8')\nprint(table_p)\nprint(\n \"\"\"\n#####################################################################\"\"\"\n )\nprint('\\nDone')\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\n",
"<docstring token>\n<import token>\nnp.warnings.filterwarnings('ignore')\n<assignment token>\nfor i in range(len(r_data)):\n if r_data[i] < 0.001:\n r_data[i] = 0.001\n<assignment token>\nax.set_xlabel('$R(kpc)$', fontsize=20)\nax.set_ylabel('$v_c(km/s)$', fontsize=20)\nax.tick_params(axis='both', which='both', labelsize=15)\n<assignment token>\nfor r in check.rectangles:\n r.set_facecolor('lavender')\n r.set_edgecolor('black')\n[ll.set_color('black') for l in check.lines for ll in l]\n[ll.set_linewidth(2) for l in check.lines for ll in l]\n<assignment token>\n\n\ndef MN_b_amp_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n amp1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_a_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n a1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=val *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_amp_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n amp3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a3 *\n units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n a5 = val * 1\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=val * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\nMN_b_amp_s.on_changed(MN_b_amp_s_func)\nMN_b_a_s.on_changed(MN_b_a_s_func)\nMN_b_b_s.on_changed(MN_b_b_s_func)\nMN_td_amp_s.on_changed(MN_td_amp_s_func)\nMN_td_a_s.on_changed(MN_td_a_s_func)\nMN_td_b_s.on_changed(MN_td_b_s_func)\nMN_tkd_amp_s.on_changed(MN_tkd_amp_s_func)\nMN_tkd_a_s.on_changed(MN_tkd_a_s_func)\nMN_tkd_b_s.on_changed(MN_tkd_b_s_func)\nNFW_amp_s.on_changed(NFW_amp_s_func)\nNFW_a_s.on_changed(NFW_a_s_func)\nBK_amp_s.on_changed(BK_amp_s_func)\nBK_a_s.on_changed(BK_a_s_func)\nMN_ed_amp_s.on_changed(MN_ed_amp_s_func)\nMN_ed_a_s.on_changed(MN_ed_a_s_func)\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\nbutton_reset.on_clicked(reset)\n\n\ndef check_on_clicked(label):\n if label == 'MN Bulge (GRAY)':\n MN_b_plot.set_visible(not MN_b_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thin Disc (PURPLE)':\n MN_td_plot.set_visible(not MN_td_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thick Disc (BLUE)':\n MN_tkd_plot.set_visible(not MN_tkd_plot.get_visible())\n update_rot_curve()\n elif label == 'Exp. Disc (CYAN)':\n EX_d_plot.set_visible(not EX_d_plot.get_visible())\n update_rot_curve()\n elif label == 'NFW - Halo (GREEN)':\n NFW_plot.set_visible(not NFW_plot.get_visible())\n update_rot_curve()\n elif label == 'Burkert - Halo (ORANGE)':\n BK_plot.set_visible(not BK_plot.get_visible())\n update_rot_curve()\n plt.draw()\n\n\nax.set_xlabel('$R(kpc)$', fontsize=20)\nax.set_ylabel('$v_c(km/s)$', fontsize=20)\nax.tick_params(axis='both', which='both', labelsize=15)\nax.set_xlim([0, np.max(lista)])\nax.set_ylim([0, np.max(v_c_data) * 1.2])\ncheck.on_clicked(check_on_clicked)\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\nbutton_start.on_clicked(start)\nplt.show()\n<assignment token>\nif MN_b_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif MN_td_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif MN_tkd_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif EX_d_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif NFW_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\nif BK_plot.get_visible() == True:\n chk.append(True)\nelse:\n chk.append(False)\n<assignment token>\ninit_parameters.write('init_guess_params.txt', format='ascii.tab',\n overwrite=True)\nprint(\n \"\"\"\n#####################################################################\"\"\"\n )\nprint('###################### GalRotpy ######################')\nprint(\n '#####################################################################\\n\\n'\n )\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\ndef lnprior(parameters):\n booL = []\n for i in parameters:\n if i > 0.0:\n booL.append(True)\n else:\n booL.append(False)\n if False in booL:\n return -np.inf\n else:\n return 0.0\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\n<assignment token>\nif chk[0] == True:\n if aa[0] == 0.0:\n para_labels.append('b1')\n para_in.append(bb[0])\n labels.append('$b_B$')\n labels_log.append('$\\\\log(b_B)$')\n para_labels.append('amp1')\n para_in.append(masses[0])\n labels.append('$M_B$')\n labels_log.append('$\\\\log(M_B)$')\n else:\n para_labels.append('a1')\n para_in.append(aa[0])\n labels.append('$a_B$')\n labels_log.append('$\\\\log(a_b)$')\n para_labels.append('b1')\n para_in.append(bb[0])\n labels.append('$b_B$')\n labels_log.append('$\\\\log(b_b)$')\n para_labels.append('amp1')\n para_in.append(masses[0])\n labels.append('$M_B$')\n labels_log.append('$\\\\log(M_b)$')\nif chk[1] == True:\n para_labels.append('a2')\n para_in.append(aa[1])\n labels.append('$a_{TD}$')\n labels_log.append('$\\\\log(a_{TD})$')\n para_labels.append('b2')\n para_in.append(bb[1])\n labels.append('$b_{TD}$')\n labels_log.append('$\\\\log(b_{TD})$')\n para_labels.append('amp2')\n para_in.append(masses[1])\n labels.append('$M_{TD}$')\n labels_log.append('$\\\\log(M_{TD})$')\nif chk[2] == True:\n para_labels.append('a3')\n para_in.append(aa[2])\n labels.append('$a_{TkD}$')\n labels_log.append('$\\\\log(a_{TkD})$')\n para_labels.append('b3')\n para_in.append(bb[2])\n labels.append('$b_{TkD}$')\n labels_log.append('$\\\\log(b_{TkD})$')\n para_labels.append('amp3')\n para_in.append(masses[2])\n labels.append('$M_{TkD}$')\n labels_log.append('$\\\\log(M_{TkD})$')\nif chk[3] == True:\n para_labels.append('h_r')\n para_in.append(aa[3])\n labels.append('$h_{r}$')\n labels_log.append('$\\\\log(h_{r})$')\n para_labels.append('amp4')\n para_in.append(masses[3])\n labels.append('$\\\\Sigma_{0}$')\n labels_log.append('$\\\\log(\\\\Sigma_{0})$')\nif chk[4] == True:\n para_labels.append('a5')\n para_in.append(aa[4])\n labels.append('$a_{NFW}$')\n labels_log.append('$\\\\log(a_{NFW})$')\n para_labels.append('amp5')\n para_in.append(masses[4])\n labels.append('$M_{0}$')\n labels_log.append('$\\\\log(M_{0})$')\nif chk[5] == True:\n para_labels.append('a6')\n para_in.append(aa[5])\n labels.append('$a_{Bk}$')\n labels_log.append('$\\\\log(a_{Bk})$')\n para_labels.append('amp6')\n para_in.append(masses[5])\n labels.append('$\\\\rho_{0}$')\n labels_log.append('$\\\\log(\\\\rho_{0})$')\n<assignment token>\nprint('Dimension: ', ndim, '\\n')\nif chk[4] == True or chk[5] == True:\n Delta_c = float(input(\n 'Enter the cosmological overdensity you want to use:\\n'))\n<assignment token>\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\n<assignment token>\nif Round <= 0:\n print('\\nStart over...')\n exit()\nprint('\\nRunning...\\n')\n<assignment token>\nif Round == 1:\n p0, lp, _ = sampler.run_mcmc(pos_in, steps)\n print('It took ', (time.time() - time0) / 60, 'minutes\\n')\nif Round > 1:\n for j in range(Round - 1):\n ti = time.time()\n PARA = []\n p0, lp, _ = sampler.run_mcmc(pos_in, steps)\n SAMPLES = sampler.chain[:, int(0.5 * steps):, :].reshape((-1, ndim))\n for i in range(ndim):\n mcmc = np.percentile(SAMPLES[:, i], [50.0 - 0.5 * 68, 50.0, \n 50.0 + 0.5 * 68])\n PARA.append(mcmc[1])\n p = np.array(PARA)\n pos_in = [abs(p + pos_step * p * np.random.randn(ndim) + 1e-08 * np\n .random.randn(ndim)) for i in range(nwalkers)]\n sampler.reset()\n print('Run ' + str(j + 1) + ' done')\n print('Time: ', (time.time() - ti) / 60, 'minutes\\n')\n ti = time.time()\n if Round > 1:\n steps = 3 * steps\n p0, lp, _ = sampler.run_mcmc(pos_in, steps)\n print('Run ' + str(Round) + ' done')\n print('Time: ', (time.time() - ti) / 60, 'minutes\\n')\n print('It took ', (time.time() - time0) / 60, 'minutes\\n')\nprint('#####################################################################\\n'\n )\n<assignment token>\nfor i in range(nwalkers):\n chain_value = sampler.chain[:, :, 0].T[:][:, i]\n ax.plot(chain_steps, chain_value, '-', color='k', alpha=0.3)\nax.plot(chain_steps, len(chain_steps) * [start[0]], '-', color='r', lw=1)\nax.set_xlim(0, len(chain_steps) - 1)\nax.set_xlabel('$Steps$', fontsize=10)\nax.set_ylabel(labels[0], fontsize=15)\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\nbnext.on_clicked(callback.next)\n<assignment token>\nbprev.on_clicked(callback.prev)\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\nbutton_reset.on_clicked(burn)\nplt.show()\n<assignment token>\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\nprint('Plotting...')\nif burn_in == 0.0:\n samples = sampler.chain[:, :, :].reshape((-1, ndim))\nelse:\n samples = sampler.chain[:, burn_in:, :].reshape((-1, ndim))\nsamples.shape\n<assignment token>\nfor yi in range(ndim):\n for xi in range(yi + 1):\n ax = axes[yi, xi]\n ax.tick_params(axis='both', which='major', labelsize=14.5, pad=3,\n direction='in')\nfig.savefig('Conf_Regions.pdf', bbox_inches='tight', pad_inches=0.15)\n<docstring token>\n<assignment token>\n\n\ndef eq_nfw(x, rho_0, rho_c):\n global Delta_c\n return np.log(1 + x) - x / (1 + x) - Delta_c * rho_c / (3.0 * rho_0\n ) * x ** 3\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\ndef mass_b(x, rho_0, a):\n return np.pi * rho_0 * a ** 3 * (2.0 * np.log(1 + x) + np.log(1 + x ** \n 2) - 2.0 * np.arctan(x))\n\n\n<assignment token>\nfor i in range(ndim):\n mcmc = np.percentile(samples[:, i], [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, \n 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n fit_para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append(para_labels[i])\n if para_labels[i] == 'h_r':\n ed_h_r = np.array(samples[:, i])\n if para_labels[i] == 'amp4':\n ed_sigma0 = np.array(samples[:, i])\n M_disc = 2.0 * np.pi * ed_sigma0 * (1000 * ed_h_r) ** 2\n mcmc = np.percentile(M_disc, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, \n 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('M_star')\n if para_labels[i] == 'a5':\n nfw_a = np.array(samples[:, i])\n if para_labels[i] == 'amp5':\n nfw_M0 = np.array(samples[:, i])\n rho_0 = nfw_M0 / (4.0 * np.pi * nfw_a ** 3)\n mcmc = np.percentile(rho_0, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, 50.0,\n 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('rho_0_NFW')\n X_nfw = []\n for density in rho_0:\n X_nfw.append(fsolve(eq_nfw, 100000.0, args=(density, rho_c))[0])\n mcmc = np.percentile(np.array(X_nfw), [50.0 - 0.5 * 95, 50.0 - 0.5 *\n 68, 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('X_NFW')\n M_h_nfw = mass_nfw(np.array(X_nfw), rho_0, nfw_a)\n mcmc = np.percentile(M_h_nfw, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, \n 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('M_h_NFW')\n if para_labels[i] == 'a6':\n b_a = np.array(samples[:, i])\n if para_labels[i] == 'amp6':\n X_b = []\n for density in samples[:, i]:\n X_b.append(fsolve(eq_b, 100000.0, args=(density, rho_c))[0])\n mcmc = np.percentile(np.array(X_b), [50.0 - 0.5 * 95, 50.0 - 0.5 * \n 68, 50.0, 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('X_Bk')\n M_h_b = mass_b(np.array(X_b), samples[:, i], b_a)\n mcmc = np.percentile(M_h_b, [50.0 - 0.5 * 95, 50.0 - 0.5 * 68, 50.0,\n 50.0 + 0.5 * 68, 50.0 + 0.5 * 95])\n para.append(mcmc[2])\n parap68.append(mcmc[3] - mcmc[2])\n paran68.append(mcmc[2] - mcmc[1])\n parap95.append(mcmc[4] - mcmc[2])\n paran95.append(mcmc[2] - mcmc[0])\n final_para_labels.append('M_h_Bk')\n<assignment token>\nnp.warnings.filterwarnings('ignore')\nplt.figure(figsize=(6, 6))\n<assignment token>\nfor i in range(len(final_para_labels)):\n best_para[final_para_labels[i]] = para[i]\nif chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = best_para['amp1']\n b1 = best_para['b1']\n else:\n amp1 = best_para['amp1']\n a1 = best_para['a1']\n b1 = best_para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 * units\n .kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_b = calcRotcurve(MN_Bulge_p, r, phi=None) * 220\n plt.plot(r, vc_b, '--', color='gray', label='Bulge')\nif chk[1] == True:\n amp2 = best_para['amp2']\n a2 = best_para['a2']\n b2 = best_para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_td = calcRotcurve(MN_Thin_Disk_p, r, phi=None) * 220\n plt.plot(r, vc_td, '--', color='purple', label='Thin Disk')\nif chk[2] == True:\n amp3 = best_para['amp3']\n a3 = best_para['a3']\n b3 = best_para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=a3 *\n units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_tkd = calcRotcurve(MN_Thick_Disk_p, r, phi=None) * 220\n plt.plot(r, vc_tkd, '--', color='blue', label='Thick Disk')\nif chk[3] == True:\n amp4 = best_para['amp4']\n h_r = best_para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.Msun / \n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n vc_exp = calcRotcurve(EX_Disk_p, r, phi=None) * 220\n plt.plot(r, vc_exp, '--', color='cyan', label='Exp. Disk')\nif chk[4] == True:\n amp5 = best_para['amp5']\n a5 = best_para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc, normalize\n =False, ro=r_0, vo=v_0)\n vc_nfw = calcRotcurve(NFW_p, r, phi=None) * 220\n plt.plot(r, vc_nfw, '--', color='green', label='NFW - Halo')\nif chk[5] == True:\n amp6 = best_para['amp6']\n a6 = best_para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n vc_bk = calcRotcurve(BK_p, r, phi=None) * 220\n plt.plot(r, vc_bk, '--', color='orange', label='Burkert - Halo')\nplt.errorbar(r_data, v_c_data, yerr=v_c_err_data, fmt='ko', ecolor='black',\n ms=4, label=None)\nplt.plot(r, curva, '-', color='red', lw=1.5, label='Best Fit')\nplt.xlabel('$R(kpc)$', fontsize=20)\nplt.ylabel('$V_c(km/s)$', fontsize=20)\nplt.tick_params(axis='both', which='major', labelsize=15)\nplt.xlim(0.0, 1.02 * np.amax(r_data))\nplt.ylim(0.0, 1.1 * np.amax(v_c_data))\nplt.tight_layout()\nplt.legend(loc='lower right', fontsize=15)\nplt.savefig('GalRotpy_fit.pdf')\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\nif chk[0] == True:\n if aa[0] == 0.0:\n index.append('BULGE')\n index.append('---')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\n else:\n index.append('BULGE')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[1] == True:\n index.append('THIN DISK')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[2] == True:\n index.append('THICK DISK')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('b')\n table_units.append('kpc')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[3] == True:\n index.append('EXPONENTIAL DISK')\n index.append('---')\n index.append('---')\n table_para.append('h_r')\n table_units.append('kpc')\n table_para.append('Sigma_0')\n table_units.append('M_Sun/pc^2')\n table_para.append('M')\n table_units.append('M_Sun')\nif chk[4] == True:\n index.append('NFW HALO')\n index.append('---')\n index.append('---')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('M_0')\n table_units.append('M_Sun')\n table_para.append('rho_0')\n table_units.append('M_Sun/kpc^3')\n table_para.append('X')\n table_units.append('---')\n table_para.append('M_h')\n table_units.append('M_Sun')\nif chk[5] == True:\n index.append('BURKERT HALO')\n index.append('---')\n index.append('---')\n index.append('---')\n table_para.append('a')\n table_units.append('kpc')\n table_para.append('rho_0')\n table_units.append('M_Sun/kpc^3')\n table_para.append('X')\n table_units.append('---')\n table_para.append('M_h')\n table_units.append('M_Sun')\nfor i in range(len(para)):\n table_data.append([table_para[i], table_units[i], paran95[i], paran68[i\n ], para[i], parap68[i], parap95[i]])\n<assignment token>\ntable_p.to_csv('final_params.txt', sep='\\t', encoding='utf-8')\nprint(table_p)\nprint(\n \"\"\"\n#####################################################################\"\"\"\n )\nprint('\\nDone')\nprint(\n \"\"\"\n#####################################################################\n\"\"\"\n )\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef MN_b_amp_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n amp1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_a_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n a1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=val *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_amp_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n amp3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a3 *\n units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n a5 = val * 1\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=val * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n\n\ndef check_on_clicked(label):\n if label == 'MN Bulge (GRAY)':\n MN_b_plot.set_visible(not MN_b_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thin Disc (PURPLE)':\n MN_td_plot.set_visible(not MN_td_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thick Disc (BLUE)':\n MN_tkd_plot.set_visible(not MN_tkd_plot.get_visible())\n update_rot_curve()\n elif label == 'Exp. Disc (CYAN)':\n EX_d_plot.set_visible(not EX_d_plot.get_visible())\n update_rot_curve()\n elif label == 'NFW - Halo (GREEN)':\n NFW_plot.set_visible(not NFW_plot.get_visible())\n update_rot_curve()\n elif label == 'Burkert - Halo (ORANGE)':\n BK_plot.set_visible(not BK_plot.get_visible())\n update_rot_curve()\n plt.draw()\n\n\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\ndef lnprior(parameters):\n booL = []\n for i in parameters:\n if i > 0.0:\n booL.append(True)\n else:\n booL.append(False)\n if False in booL:\n return -np.inf\n else:\n return 0.0\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef eq_nfw(x, rho_0, rho_c):\n global Delta_c\n return np.log(1 + x) - x / (1 + x) - Delta_c * rho_c / (3.0 * rho_0\n ) * x ** 3\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\ndef mass_b(x, rho_0, a):\n return np.pi * rho_0 * a ** 3 * (2.0 * np.log(1 + x) + np.log(1 + x ** \n 2) - 2.0 * np.arctan(x))\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef MN_b_amp_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n amp1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_a_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n a1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=val *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_amp_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n amp3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a3 *\n units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n a5 = val * 1\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=val * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n\n\ndef check_on_clicked(label):\n if label == 'MN Bulge (GRAY)':\n MN_b_plot.set_visible(not MN_b_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thin Disc (PURPLE)':\n MN_td_plot.set_visible(not MN_td_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thick Disc (BLUE)':\n MN_tkd_plot.set_visible(not MN_tkd_plot.get_visible())\n update_rot_curve()\n elif label == 'Exp. Disc (CYAN)':\n EX_d_plot.set_visible(not EX_d_plot.get_visible())\n update_rot_curve()\n elif label == 'NFW - Halo (GREEN)':\n NFW_plot.set_visible(not NFW_plot.get_visible())\n update_rot_curve()\n elif label == 'Burkert - Halo (ORANGE)':\n BK_plot.set_visible(not BK_plot.get_visible())\n update_rot_curve()\n plt.draw()\n\n\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\ndef lnprior(parameters):\n booL = []\n for i in parameters:\n if i > 0.0:\n booL.append(True)\n else:\n booL.append(False)\n if False in booL:\n return -np.inf\n else:\n return 0.0\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\ndef mass_b(x, rho_0, a):\n return np.pi * rho_0 * a ** 3 * (2.0 * np.log(1 + x) + np.log(1 + x ** \n 2) - 2.0 * np.arctan(x))\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef MN_b_amp_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n amp1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_a_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n a1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=val *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_amp_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n amp3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a3 *\n units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n a5 = val * 1\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=val * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n\n\ndef check_on_clicked(label):\n if label == 'MN Bulge (GRAY)':\n MN_b_plot.set_visible(not MN_b_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thin Disc (PURPLE)':\n MN_td_plot.set_visible(not MN_td_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thick Disc (BLUE)':\n MN_tkd_plot.set_visible(not MN_tkd_plot.get_visible())\n update_rot_curve()\n elif label == 'Exp. Disc (CYAN)':\n EX_d_plot.set_visible(not EX_d_plot.get_visible())\n update_rot_curve()\n elif label == 'NFW - Halo (GREEN)':\n NFW_plot.set_visible(not NFW_plot.get_visible())\n update_rot_curve()\n elif label == 'Burkert - Halo (ORANGE)':\n BK_plot.set_visible(not BK_plot.get_visible())\n update_rot_curve()\n plt.draw()\n\n\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\ndef lnprior(parameters):\n booL = []\n for i in parameters:\n if i > 0.0:\n booL.append(True)\n else:\n booL.append(False)\n if False in booL:\n return -np.inf\n else:\n return 0.0\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef MN_b_amp_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n amp1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_a_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n a1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=val *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n a5 = val * 1\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=val * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n\n\ndef check_on_clicked(label):\n if label == 'MN Bulge (GRAY)':\n MN_b_plot.set_visible(not MN_b_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thin Disc (PURPLE)':\n MN_td_plot.set_visible(not MN_td_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thick Disc (BLUE)':\n MN_tkd_plot.set_visible(not MN_tkd_plot.get_visible())\n update_rot_curve()\n elif label == 'Exp. Disc (CYAN)':\n EX_d_plot.set_visible(not EX_d_plot.get_visible())\n update_rot_curve()\n elif label == 'NFW - Halo (GREEN)':\n NFW_plot.set_visible(not NFW_plot.get_visible())\n update_rot_curve()\n elif label == 'Burkert - Halo (ORANGE)':\n BK_plot.set_visible(not BK_plot.get_visible())\n update_rot_curve()\n plt.draw()\n\n\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\ndef lnprior(parameters):\n booL = []\n for i in parameters:\n if i > 0.0:\n booL.append(True)\n else:\n booL.append(False)\n if False in booL:\n return -np.inf\n else:\n return 0.0\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef MN_b_amp_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n amp1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n a5 = val * 1\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=val * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n\n\ndef check_on_clicked(label):\n if label == 'MN Bulge (GRAY)':\n MN_b_plot.set_visible(not MN_b_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thin Disc (PURPLE)':\n MN_td_plot.set_visible(not MN_td_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thick Disc (BLUE)':\n MN_tkd_plot.set_visible(not MN_tkd_plot.get_visible())\n update_rot_curve()\n elif label == 'Exp. Disc (CYAN)':\n EX_d_plot.set_visible(not EX_d_plot.get_visible())\n update_rot_curve()\n elif label == 'NFW - Halo (GREEN)':\n NFW_plot.set_visible(not NFW_plot.get_visible())\n update_rot_curve()\n elif label == 'Burkert - Halo (ORANGE)':\n BK_plot.set_visible(not BK_plot.get_visible())\n update_rot_curve()\n plt.draw()\n\n\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\ndef lnprior(parameters):\n booL = []\n for i in parameters:\n if i > 0.0:\n booL.append(True)\n else:\n booL.append(False)\n if False in booL:\n return -np.inf\n else:\n return 0.0\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef MN_b_amp_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n amp1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n a5 = val * 1\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=val * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n\n\ndef check_on_clicked(label):\n if label == 'MN Bulge (GRAY)':\n MN_b_plot.set_visible(not MN_b_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thin Disc (PURPLE)':\n MN_td_plot.set_visible(not MN_td_plot.get_visible())\n update_rot_curve()\n elif label == 'MN Thick Disc (BLUE)':\n MN_tkd_plot.set_visible(not MN_tkd_plot.get_visible())\n update_rot_curve()\n elif label == 'Exp. Disc (CYAN)':\n EX_d_plot.set_visible(not EX_d_plot.get_visible())\n update_rot_curve()\n elif label == 'NFW - Halo (GREEN)':\n NFW_plot.set_visible(not NFW_plot.get_visible())\n update_rot_curve()\n elif label == 'Burkert - Halo (ORANGE)':\n BK_plot.set_visible(not BK_plot.get_visible())\n update_rot_curve()\n plt.draw()\n\n\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\n<function token>\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef MN_b_amp_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n amp1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n a5 = val * 1\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=val * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\n<function token>\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef NFW_a_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n a5 = val * 1\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=val * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\n<function token>\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\n<function token>\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\ndef lnprob(parameters, x, y, yerr):\n lp = lnprior(parameters)\n Model = model(parameters, x)\n if not np.isfinite(lp) or True in np.isnan(Model):\n return -np.inf\n else:\n return lp + lnlike(parameters, x, y, yerr)\n\n\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef MN_tkd_a_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n a3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n val * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\n<function token>\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\n<function token>\n\n\ndef lnlike(parameters, x, y, yerr):\n Model = model(parameters, x)\n return -0.5 * np.sum(((y - Model) / yerr) ** 2)\n\n\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef MN_ed_a_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n h_r = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=val * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\n<function token>\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n\n\ndef start(event):\n plt.close(1)\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\n<function token>\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef model(parameters, R):\n global chk, para_labels, aa\n para = {}\n for i in range(len(para_labels)):\n para[para_labels[i]] = parameters[i]\n r_0 = 1 * units.kpc\n v_0 = 220 * units.km / units.s\n check_pot = []\n if chk[0] == True:\n if aa[0] == 0.0:\n a1 = 0.0\n amp1 = para['amp1']\n b1 = para['b1']\n else:\n amp1 = para['amp1']\n a1 = para['a1']\n b1 = para['b1']\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=b1 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Bulge_p)\n if chk[1] == True:\n amp2 = para['amp2']\n a2 = para['a2']\n b2 = para['b2']\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thin_Disk_p)\n if chk[2] == True:\n amp3 = para['amp3']\n a3 = para['a3']\n b3 = para['b3']\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=b3 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(MN_Thick_Disk_p)\n if chk[3] == True:\n amp4 = para['amp4']\n h_r = para['h_r']\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=amp4 * (units.\n Msun / units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=\n 0.001, normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n check_pot.append(EX_Disk_p)\n if chk[4] == True:\n amp5 = para['amp5']\n a5 = para['a5']\n NFW_p = NFWPotential(amp=amp5 * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n check_pot.append(NFW_p)\n if chk[5] == True:\n amp6 = para['amp6']\n a6 = para['a6']\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n a6 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n check_pot.append(BK_p)\n vc_total = calcRotcurve(check_pot, R, phi=None) * 220\n return vc_total\n\n\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\n<function token>\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\ndef eq_b(x, rho_0, rho_c):\n global Delta_c\n return 2.0 * np.log(1 + x) + np.log(1 + x ** 2) - 2.0 * np.arctan(x\n ) - 4.0 * Delta_c * rho_c / (3.0 * rho_0) * x ** 3\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\n<function token>\n\n\ndef NFW_amp_s_func(val):\n if NFW_plot.get_visible() == True:\n global NFW_p, amp5, a5\n amp5 = val * 1\n NFW_p = NFWPotential(amp=val * units.Msun, a=a5 * units.kpc,\n normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef burn(event):\n plt.close()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef update_rot_curve():\n ax.clear()\n global MN_b_plot, MN_Bulge_p, MN_Thin_Disk_p, MN_Thick_Disk_p, MN_td_plot, MN_tkd_plot, NFW_p, NFW_plot, EX_d_plot, EX_Disk_p, CV_galaxy, CV_galaxy_dot, BK_p, BK_plot\n composite_pot_array = []\n ax.set_xlabel('$R(kpc)$', fontsize=20)\n ax.set_ylabel('$v_c(km/s)$', fontsize=20)\n ax.tick_params(axis='both', which='both', labelsize=15)\n ax.set_xlim([0, 1.02 * r_data[-1]])\n ax.set_ylim([0, np.max(v_c_data) * 1.2])\n if MN_b_plot.get_visible() == True:\n MN_Bulge = calcRotcurve(MN_Bulge_p, lista, phi=None) * 220\n MN_b_plot, = ax.plot(lista, MN_Bulge, linestyle='--', c='gray')\n composite_pot_array.append(MN_Bulge_p)\n if MN_td_plot.get_visible() == True:\n MN_Thin_Disk = calcRotcurve(MN_Thin_Disk_p, lista, phi=None) * 220\n MN_td_plot, = ax.plot(lista, MN_Thin_Disk, linestyle='--', c='purple')\n composite_pot_array.append(MN_Thin_Disk_p)\n if MN_tkd_plot.get_visible() == True:\n MN_Thick_Disk = calcRotcurve(MN_Thick_Disk_p, lista, phi=None) * 220\n MN_tkd_plot, = ax.plot(lista, MN_Thick_Disk, linestyle='--', c='blue')\n composite_pot_array.append(MN_Thick_Disk_p)\n if NFW_plot.get_visible() == True:\n NFW = calcRotcurve(NFW_p, lista, phi=None) * 220\n NFW_plot, = ax.plot(lista, NFW, linestyle='--', c='green')\n composite_pot_array.append(NFW_p)\n if EX_d_plot.get_visible() == True:\n EX_Disk = calcRotcurve(EX_Disk_p, lista, phi=None) * 220\n EX_d_plot, = ax.plot(lista, EX_Disk, linestyle='--', c='cyan')\n composite_pot_array.append(EX_Disk_p)\n if BK_plot.get_visible() == True:\n BK = calcRotcurve(BK_p, lista, phi=None) * 220\n BK_plot, = ax.plot(lista, BK, linestyle='--', c='orange')\n composite_pot_array.append(BK_p)\n CV_galaxy = ax.errorbar(r_data, v_c_data, v_c_err_data, c='k', fmt='',\n ls='none')\n CV_galaxy_dot = ax.scatter(r_data, v_c_data, c='k')\n v_circ_comp = calcRotcurve(composite_pot_array, lista, phi=None) * 220\n v_circ_comp_plot, = ax.plot(lista, v_circ_comp, c='k')\n\n\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n\n\ndef mass_nfw(x, rho_0, a):\n return 4.0 * np.pi * rho_0 * a ** 3 * (np.log(1 + x) - x / (1 + x))\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MN_b_b_s_func(val):\n if MN_b_plot.get_visible() == True:\n global MN_Bulge_p, amp1, a1, b1\n b1 = val * 1\n MN_Bulge_p = MiyamotoNagaiPotential(amp=amp1 * units.Msun, a=a1 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef MN_td_amp_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n amp2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=val * units.Msun, a=a2 *\n units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_ed_amp_s_func(val):\n if EX_d_plot.get_visible() == True:\n global EX_Disk_p, amp4, h_r\n amp4 = val * 1\n EX_Disk_p = RazorThinExponentialDiskPotential(amp=val * (units.Msun /\n units.pc ** 2), hr=h_r * units.kpc, maxiter=20, tol=0.001,\n normalize=False, ro=r_0, vo=v_0, new=True, glorder=100)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef MN_td_b_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n b2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=a2 *\n units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<code token>\n\n\ndef reset(event):\n MN_b_amp_s.reset()\n MN_b_a_s.reset()\n MN_b_b_s.reset()\n MN_td_amp_s.reset()\n MN_td_a_s.reset()\n MN_td_b_s.reset()\n MN_tkd_amp_s.reset()\n MN_tkd_a_s.reset()\n MN_tkd_b_s.reset()\n MN_ed_amp_s.reset()\n MN_ed_a_s.reset()\n NFW_amp_s.reset()\n NFW_a_s.reset()\n BK_amp_s.reset()\n BK_a_s.reset()\n\n\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\ndef BK_a_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n a6 = val * 1\n BK_p = BurkertPotential(amp=amp6 * units.Msun / units.kpc ** 3, a=\n val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef MN_td_a_s_func(val):\n if MN_td_plot.get_visible() == True:\n global MN_Thin_Disk_p, amp2, a2, b2\n a2 = val * 1\n MN_Thin_Disk_p = MiyamotoNagaiPotential(amp=amp2 * units.Msun, a=\n val * units.kpc, b=b2 * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef BK_amp_s_func(val):\n if BK_plot.get_visible() == True:\n global BK_p, amp6, a6\n amp6 = val * 1\n BK_p = BurkertPotential(amp=val * units.Msun / units.kpc ** 3, a=a6 *\n units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef MN_tkd_b_s_func(val):\n if MN_tkd_plot.get_visible() == True:\n global MN_Thick_Disk_p, amp3, a3, b3\n b3 = val * 1\n MN_Thick_Disk_p = MiyamotoNagaiPotential(amp=amp3 * units.Msun, a=\n a3 * units.kpc, b=val * units.kpc, normalize=False, ro=r_0, vo=v_0)\n update_rot_curve()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n <assignment token>\n\n def next(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind += 1\n if self.ind >= ndim:\n self.ind = 0\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n <assignment token>\n <function token>\n\n def prev(self, event):\n global ndim, start, chain_W, nwalkers, chain_steps\n self.ind -= 1\n if self.ind == -1:\n self.ind = ndim - 1\n ax.clear()\n for i in range(nwalkers):\n data_a = np.array(sampler.chain[:, :, self.ind].T)[:, i]\n ax.plot(chain_steps, data_a, '-', color='k', alpha=0.3)\n ax.plot(chain_steps, len(chain_steps) * [start[self.ind]], '-',\n color='r', lw=1)\n ax.set_xlim(0, len(chain_steps) - 1)\n ax.set_xlabel('$Steps$', fontsize=10)\n ax.set_ylabel(labels[self.ind], fontsize=15)\n plt.tight_layout()\n plt.draw()\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass Index(object):\n <assignment token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,565 |
79ae60c4d295469f1ff5cc5f338d6511ae9428ad
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class User(models.Model):
user = models.OneToOneField(User, null=True, on_delete=models.CASCADE)
username = models.CharField(max_length=200, null=True)
fullname = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=200, null=True)
phone = models.CharField(max_length=200, null=True)
address = models.CharField(max_length=200, null=True)
bio = models.CharField(max_length=500, null=True, blank=True)
profile_pic = models.ImageField(null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return self.username
|
[
"from django.db import models\nfrom django.contrib.auth.models import User\n# Create your models here.\n\nclass User(models.Model):\n user = models.OneToOneField(User, null=True, on_delete=models.CASCADE)\n username = models.CharField(max_length=200, null=True)\n fullname = models.CharField(max_length=200, null=True)\n email = models.CharField(max_length=200, null=True)\n phone = models.CharField(max_length=200, null=True)\n address = models.CharField(max_length=200, null=True)\n bio = models.CharField(max_length=500, null=True, blank=True)\n profile_pic = models.ImageField(null=True, blank=True)\n date_created = models.DateTimeField(auto_now_add=True, null=True)\n\n \n def __str__(self):\n return self.username\n",
"from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass User(models.Model):\n user = models.OneToOneField(User, null=True, on_delete=models.CASCADE)\n username = models.CharField(max_length=200, null=True)\n fullname = models.CharField(max_length=200, null=True)\n email = models.CharField(max_length=200, null=True)\n phone = models.CharField(max_length=200, null=True)\n address = models.CharField(max_length=200, null=True)\n bio = models.CharField(max_length=500, null=True, blank=True)\n profile_pic = models.ImageField(null=True, blank=True)\n date_created = models.DateTimeField(auto_now_add=True, null=True)\n\n def __str__(self):\n return self.username\n",
"<import token>\n\n\nclass User(models.Model):\n user = models.OneToOneField(User, null=True, on_delete=models.CASCADE)\n username = models.CharField(max_length=200, null=True)\n fullname = models.CharField(max_length=200, null=True)\n email = models.CharField(max_length=200, null=True)\n phone = models.CharField(max_length=200, null=True)\n address = models.CharField(max_length=200, null=True)\n bio = models.CharField(max_length=500, null=True, blank=True)\n profile_pic = models.ImageField(null=True, blank=True)\n date_created = models.DateTimeField(auto_now_add=True, null=True)\n\n def __str__(self):\n return self.username\n",
"<import token>\n\n\nclass User(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.username\n",
"<import token>\n\n\nclass User(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,566 |
93287a2fd737a8848425535a4318814e4758e138
|
#!/usr/bin/env python
# coding: utf-8
# In[19]:
import os
import csv
# In[20]:
import us_state_abbrev as usabb
from datetime import datetime
# In[70]:
employee_data = os.path.join("employee_data.csv")
with open(employee_data,"r") as file, open('new_employee_data.csv', 'w') as writer:
csv_reader_employee = csv.reader(file, delimiter=",")
next(csv_reader_employee,None)
writer.write('Emp ID,First Name,Last Name,DOB,SSN,State')
for row in csv_reader_employee:
writer.write('\n')
new_row = [row[0]]
new_row += row[1].split()
new_row.append(datetime.strptime(row[2], "%Y-%m-%d").strftime('%d/%m/%Y'))
new_row.append('***-**-' + row[3].split('-')[-1])
new_row.append(usabb.us_state_abbrev.get(row[4]))
writer.write(','.join(new_row))
|
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[19]:\n\n\nimport os\nimport csv\n\n\n# In[20]:\n\n\nimport us_state_abbrev as usabb\nfrom datetime import datetime\n\n\n# In[70]:\n\n\nemployee_data = os.path.join(\"employee_data.csv\")\nwith open(employee_data,\"r\") as file, open('new_employee_data.csv', 'w') as writer:\n csv_reader_employee = csv.reader(file, delimiter=\",\")\n next(csv_reader_employee,None)\n writer.write('Emp ID,First Name,Last Name,DOB,SSN,State')\n for row in csv_reader_employee:\n writer.write('\\n')\n new_row = [row[0]]\n new_row += row[1].split()\n new_row.append(datetime.strptime(row[2], \"%Y-%m-%d\").strftime('%d/%m/%Y'))\n new_row.append('***-**-' + row[3].split('-')[-1])\n new_row.append(usabb.us_state_abbrev.get(row[4]))\n writer.write(','.join(new_row))\n\n",
"import os\nimport csv\nimport us_state_abbrev as usabb\nfrom datetime import datetime\nemployee_data = os.path.join('employee_data.csv')\nwith open(employee_data, 'r') as file, open('new_employee_data.csv', 'w'\n ) as writer:\n csv_reader_employee = csv.reader(file, delimiter=',')\n next(csv_reader_employee, None)\n writer.write('Emp ID,First Name,Last Name,DOB,SSN,State')\n for row in csv_reader_employee:\n writer.write('\\n')\n new_row = [row[0]]\n new_row += row[1].split()\n new_row.append(datetime.strptime(row[2], '%Y-%m-%d').strftime(\n '%d/%m/%Y'))\n new_row.append('***-**-' + row[3].split('-')[-1])\n new_row.append(usabb.us_state_abbrev.get(row[4]))\n writer.write(','.join(new_row))\n",
"<import token>\nemployee_data = os.path.join('employee_data.csv')\nwith open(employee_data, 'r') as file, open('new_employee_data.csv', 'w'\n ) as writer:\n csv_reader_employee = csv.reader(file, delimiter=',')\n next(csv_reader_employee, None)\n writer.write('Emp ID,First Name,Last Name,DOB,SSN,State')\n for row in csv_reader_employee:\n writer.write('\\n')\n new_row = [row[0]]\n new_row += row[1].split()\n new_row.append(datetime.strptime(row[2], '%Y-%m-%d').strftime(\n '%d/%m/%Y'))\n new_row.append('***-**-' + row[3].split('-')[-1])\n new_row.append(usabb.us_state_abbrev.get(row[4]))\n writer.write(','.join(new_row))\n",
"<import token>\n<assignment token>\nwith open(employee_data, 'r') as file, open('new_employee_data.csv', 'w'\n ) as writer:\n csv_reader_employee = csv.reader(file, delimiter=',')\n next(csv_reader_employee, None)\n writer.write('Emp ID,First Name,Last Name,DOB,SSN,State')\n for row in csv_reader_employee:\n writer.write('\\n')\n new_row = [row[0]]\n new_row += row[1].split()\n new_row.append(datetime.strptime(row[2], '%Y-%m-%d').strftime(\n '%d/%m/%Y'))\n new_row.append('***-**-' + row[3].split('-')[-1])\n new_row.append(usabb.us_state_abbrev.get(row[4]))\n writer.write(','.join(new_row))\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,567 |
a56cd5205e09e9f9e35794d58ec7923468654f9b
|
# (c) 2019 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.network.tests.unit.compat.mock import patch
from ansible_collections.community.network.plugins.modules.network.cloudengine import ce_lldp
from ansible_collections.community.network.tests.unit.plugins.modules.network.cloudengine.ce_module import TestCloudEngineModule, load_fixture
from ansible_collections.community.network.tests.unit.plugins.modules.utils import set_module_args
class TestCloudEngineLacpModule(TestCloudEngineModule):
module = ce_lldp
def setUp(self):
super(TestCloudEngineLacpModule, self).setUp()
self.mock_get_config = patch('ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.get_nc_config')
self.get_nc_config = self.mock_get_config.start()
self.mock_set_config = patch('ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.set_nc_config')
self.set_nc_config = self.mock_set_config.start()
self.set_nc_config.return_value = None
xml_existing_1 = load_fixture('ce_lldp', 'ce_lldp_global_00.txt')
xml_existing_2 = load_fixture('ce_lldp', 'ce_lldp_global_01.txt')
xml_end_state_1 = load_fixture('ce_lldp', 'ce_lldpSysParameter_00.txt')
xml_end_state_2 = load_fixture('ce_lldp', 'ce_lldpSysParameter_01.txt')
self.get_side_effect = (xml_existing_1, xml_existing_2, xml_end_state_1, xml_end_state_2)
self.result_ok = load_fixture('ce_lldp', 'result_ok.txt')
def tearDown(self):
super(TestCloudEngineLacpModule, self).tearDown()
self.mock_set_config.stop()
self.mock_get_config.stop()
def test_lldp_global_present(self):
update = ['lldp enable',
'lldp mdn enable',
'lldp mdn enable',
'lldp transmit interval 8',
'lldp transmit multiplier 8',
'lldp restart 8',
'lldp transmit delay 8',
'lldp trap-interval 8',
'lldp fast-count 8',
'lldp mdn trap-interval 8',
'lldp management-address 1.1.1.1',
'lldp management-address bind interface bind-name']
self.get_nc_config.side_effect = self.get_side_effect
self.set_nc_config.side_effect = [self.result_ok] * 11
set_module_args(dict(
lldpenable='enabled',
mdnstatus='rxOnly',
interval=8,
hold_multiplier=8,
restart_delay=8,
transmit_delay=8,
notification_interval=8,
fast_count=8,
mdn_notification_interval=8,
management_address='1.1.1.1',
bind_name='bind-name')
)
result = self.execute_module(changed=True)
self.assertEqual(sorted(result['updates']), sorted(update))
def test_lacp_sys_parameter_present(self):
update = ['lldp enable',
'lldp mdn enable',
'lldp mdn enable',
'lldp transmit interval 8',
'lldp transmit multiplier 8',
'lldp restart 8',
'lldp transmit delay 8',
'lldp trap-interval 8',
'lldp fast-count 8',
'lldp mdn trap-interval 8',
'lldp management-address 1.1.1.1',
'lldp management-address bind interface bind-name']
self.get_nc_config.side_effect = self.get_side_effect
self.set_nc_config.side_effect = [self.result_ok] * 11
set_module_args(dict(
lldpenable='enabled',
mdnstatus='rxOnly',
interval=8,
hold_multiplier=8,
restart_delay=8,
transmit_delay=8,
notification_interval=8,
fast_count=8,
mdn_notification_interval=8,
management_address='1.1.1.1',
bind_name='bind-name')
)
result = self.execute_module(changed=True)
self.assertEqual(sorted(result['updates']), sorted(update))
|
[
"# (c) 2019 Red Hat Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n\n__metaclass__ = type\n\nfrom ansible_collections.community.network.tests.unit.compat.mock import patch\nfrom ansible_collections.community.network.plugins.modules.network.cloudengine import ce_lldp\nfrom ansible_collections.community.network.tests.unit.plugins.modules.network.cloudengine.ce_module import TestCloudEngineModule, load_fixture\nfrom ansible_collections.community.network.tests.unit.plugins.modules.utils import set_module_args\n\n\nclass TestCloudEngineLacpModule(TestCloudEngineModule):\n module = ce_lldp\n\n def setUp(self):\n super(TestCloudEngineLacpModule, self).setUp()\n\n self.mock_get_config = patch('ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.get_nc_config')\n self.get_nc_config = self.mock_get_config.start()\n\n self.mock_set_config = patch('ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.set_nc_config')\n self.set_nc_config = self.mock_set_config.start()\n self.set_nc_config.return_value = None\n xml_existing_1 = load_fixture('ce_lldp', 'ce_lldp_global_00.txt')\n xml_existing_2 = load_fixture('ce_lldp', 'ce_lldp_global_01.txt')\n xml_end_state_1 = load_fixture('ce_lldp', 'ce_lldpSysParameter_00.txt')\n xml_end_state_2 = load_fixture('ce_lldp', 'ce_lldpSysParameter_01.txt')\n self.get_side_effect = (xml_existing_1, xml_existing_2, xml_end_state_1, xml_end_state_2)\n self.result_ok = load_fixture('ce_lldp', 'result_ok.txt')\n\n def tearDown(self):\n super(TestCloudEngineLacpModule, self).tearDown()\n self.mock_set_config.stop()\n self.mock_get_config.stop()\n\n def test_lldp_global_present(self):\n update = ['lldp enable',\n 'lldp mdn enable',\n 'lldp mdn enable',\n 'lldp transmit interval 8',\n 'lldp transmit multiplier 8',\n 'lldp restart 8',\n 'lldp transmit delay 8',\n 'lldp trap-interval 8',\n 'lldp fast-count 8',\n 'lldp mdn trap-interval 8',\n 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(\n lldpenable='enabled',\n mdnstatus='rxOnly',\n interval=8,\n hold_multiplier=8,\n restart_delay=8,\n transmit_delay=8,\n notification_interval=8,\n fast_count=8,\n mdn_notification_interval=8,\n management_address='1.1.1.1',\n bind_name='bind-name')\n )\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n\n def test_lacp_sys_parameter_present(self):\n update = ['lldp enable',\n 'lldp mdn enable',\n 'lldp mdn enable',\n 'lldp transmit interval 8',\n 'lldp transmit multiplier 8',\n 'lldp restart 8',\n 'lldp transmit delay 8',\n 'lldp trap-interval 8',\n 'lldp fast-count 8',\n 'lldp mdn trap-interval 8',\n 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(\n lldpenable='enabled',\n mdnstatus='rxOnly',\n interval=8,\n hold_multiplier=8,\n restart_delay=8,\n transmit_delay=8,\n notification_interval=8,\n fast_count=8,\n mdn_notification_interval=8,\n management_address='1.1.1.1',\n bind_name='bind-name')\n )\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n",
"from __future__ import absolute_import, division, print_function\n__metaclass__ = type\nfrom ansible_collections.community.network.tests.unit.compat.mock import patch\nfrom ansible_collections.community.network.plugins.modules.network.cloudengine import ce_lldp\nfrom ansible_collections.community.network.tests.unit.plugins.modules.network.cloudengine.ce_module import TestCloudEngineModule, load_fixture\nfrom ansible_collections.community.network.tests.unit.plugins.modules.utils import set_module_args\n\n\nclass TestCloudEngineLacpModule(TestCloudEngineModule):\n module = ce_lldp\n\n def setUp(self):\n super(TestCloudEngineLacpModule, self).setUp()\n self.mock_get_config = patch(\n 'ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.get_nc_config'\n )\n self.get_nc_config = self.mock_get_config.start()\n self.mock_set_config = patch(\n 'ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.set_nc_config'\n )\n self.set_nc_config = self.mock_set_config.start()\n self.set_nc_config.return_value = None\n xml_existing_1 = load_fixture('ce_lldp', 'ce_lldp_global_00.txt')\n xml_existing_2 = load_fixture('ce_lldp', 'ce_lldp_global_01.txt')\n xml_end_state_1 = load_fixture('ce_lldp', 'ce_lldpSysParameter_00.txt')\n xml_end_state_2 = load_fixture('ce_lldp', 'ce_lldpSysParameter_01.txt')\n self.get_side_effect = (xml_existing_1, xml_existing_2,\n xml_end_state_1, xml_end_state_2)\n self.result_ok = load_fixture('ce_lldp', 'result_ok.txt')\n\n def tearDown(self):\n super(TestCloudEngineLacpModule, self).tearDown()\n self.mock_set_config.stop()\n self.mock_get_config.stop()\n\n def test_lldp_global_present(self):\n update = ['lldp enable', 'lldp mdn enable', 'lldp mdn enable',\n 'lldp transmit interval 8', 'lldp transmit multiplier 8',\n 'lldp restart 8', 'lldp transmit delay 8',\n 'lldp trap-interval 8', 'lldp fast-count 8',\n 'lldp mdn trap-interval 8', 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(lldpenable='enabled', mdnstatus='rxOnly',\n interval=8, hold_multiplier=8, restart_delay=8, transmit_delay=\n 8, notification_interval=8, fast_count=8,\n mdn_notification_interval=8, management_address='1.1.1.1',\n bind_name='bind-name'))\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n\n def test_lacp_sys_parameter_present(self):\n update = ['lldp enable', 'lldp mdn enable', 'lldp mdn enable',\n 'lldp transmit interval 8', 'lldp transmit multiplier 8',\n 'lldp restart 8', 'lldp transmit delay 8',\n 'lldp trap-interval 8', 'lldp fast-count 8',\n 'lldp mdn trap-interval 8', 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(lldpenable='enabled', mdnstatus='rxOnly',\n interval=8, hold_multiplier=8, restart_delay=8, transmit_delay=\n 8, notification_interval=8, fast_count=8,\n mdn_notification_interval=8, management_address='1.1.1.1',\n bind_name='bind-name'))\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n",
"<import token>\n__metaclass__ = type\n<import token>\n\n\nclass TestCloudEngineLacpModule(TestCloudEngineModule):\n module = ce_lldp\n\n def setUp(self):\n super(TestCloudEngineLacpModule, self).setUp()\n self.mock_get_config = patch(\n 'ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.get_nc_config'\n )\n self.get_nc_config = self.mock_get_config.start()\n self.mock_set_config = patch(\n 'ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.set_nc_config'\n )\n self.set_nc_config = self.mock_set_config.start()\n self.set_nc_config.return_value = None\n xml_existing_1 = load_fixture('ce_lldp', 'ce_lldp_global_00.txt')\n xml_existing_2 = load_fixture('ce_lldp', 'ce_lldp_global_01.txt')\n xml_end_state_1 = load_fixture('ce_lldp', 'ce_lldpSysParameter_00.txt')\n xml_end_state_2 = load_fixture('ce_lldp', 'ce_lldpSysParameter_01.txt')\n self.get_side_effect = (xml_existing_1, xml_existing_2,\n xml_end_state_1, xml_end_state_2)\n self.result_ok = load_fixture('ce_lldp', 'result_ok.txt')\n\n def tearDown(self):\n super(TestCloudEngineLacpModule, self).tearDown()\n self.mock_set_config.stop()\n self.mock_get_config.stop()\n\n def test_lldp_global_present(self):\n update = ['lldp enable', 'lldp mdn enable', 'lldp mdn enable',\n 'lldp transmit interval 8', 'lldp transmit multiplier 8',\n 'lldp restart 8', 'lldp transmit delay 8',\n 'lldp trap-interval 8', 'lldp fast-count 8',\n 'lldp mdn trap-interval 8', 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(lldpenable='enabled', mdnstatus='rxOnly',\n interval=8, hold_multiplier=8, restart_delay=8, transmit_delay=\n 8, notification_interval=8, fast_count=8,\n mdn_notification_interval=8, management_address='1.1.1.1',\n bind_name='bind-name'))\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n\n def test_lacp_sys_parameter_present(self):\n update = ['lldp enable', 'lldp mdn enable', 'lldp mdn enable',\n 'lldp transmit interval 8', 'lldp transmit multiplier 8',\n 'lldp restart 8', 'lldp transmit delay 8',\n 'lldp trap-interval 8', 'lldp fast-count 8',\n 'lldp mdn trap-interval 8', 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(lldpenable='enabled', mdnstatus='rxOnly',\n interval=8, hold_multiplier=8, restart_delay=8, transmit_delay=\n 8, notification_interval=8, fast_count=8,\n mdn_notification_interval=8, management_address='1.1.1.1',\n bind_name='bind-name'))\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n",
"<import token>\n<assignment token>\n<import token>\n\n\nclass TestCloudEngineLacpModule(TestCloudEngineModule):\n module = ce_lldp\n\n def setUp(self):\n super(TestCloudEngineLacpModule, self).setUp()\n self.mock_get_config = patch(\n 'ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.get_nc_config'\n )\n self.get_nc_config = self.mock_get_config.start()\n self.mock_set_config = patch(\n 'ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.set_nc_config'\n )\n self.set_nc_config = self.mock_set_config.start()\n self.set_nc_config.return_value = None\n xml_existing_1 = load_fixture('ce_lldp', 'ce_lldp_global_00.txt')\n xml_existing_2 = load_fixture('ce_lldp', 'ce_lldp_global_01.txt')\n xml_end_state_1 = load_fixture('ce_lldp', 'ce_lldpSysParameter_00.txt')\n xml_end_state_2 = load_fixture('ce_lldp', 'ce_lldpSysParameter_01.txt')\n self.get_side_effect = (xml_existing_1, xml_existing_2,\n xml_end_state_1, xml_end_state_2)\n self.result_ok = load_fixture('ce_lldp', 'result_ok.txt')\n\n def tearDown(self):\n super(TestCloudEngineLacpModule, self).tearDown()\n self.mock_set_config.stop()\n self.mock_get_config.stop()\n\n def test_lldp_global_present(self):\n update = ['lldp enable', 'lldp mdn enable', 'lldp mdn enable',\n 'lldp transmit interval 8', 'lldp transmit multiplier 8',\n 'lldp restart 8', 'lldp transmit delay 8',\n 'lldp trap-interval 8', 'lldp fast-count 8',\n 'lldp mdn trap-interval 8', 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(lldpenable='enabled', mdnstatus='rxOnly',\n interval=8, hold_multiplier=8, restart_delay=8, transmit_delay=\n 8, notification_interval=8, fast_count=8,\n mdn_notification_interval=8, management_address='1.1.1.1',\n bind_name='bind-name'))\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n\n def test_lacp_sys_parameter_present(self):\n update = ['lldp enable', 'lldp mdn enable', 'lldp mdn enable',\n 'lldp transmit interval 8', 'lldp transmit multiplier 8',\n 'lldp restart 8', 'lldp transmit delay 8',\n 'lldp trap-interval 8', 'lldp fast-count 8',\n 'lldp mdn trap-interval 8', 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(lldpenable='enabled', mdnstatus='rxOnly',\n interval=8, hold_multiplier=8, restart_delay=8, transmit_delay=\n 8, notification_interval=8, fast_count=8,\n mdn_notification_interval=8, management_address='1.1.1.1',\n bind_name='bind-name'))\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n",
"<import token>\n<assignment token>\n<import token>\n\n\nclass TestCloudEngineLacpModule(TestCloudEngineModule):\n <assignment token>\n\n def setUp(self):\n super(TestCloudEngineLacpModule, self).setUp()\n self.mock_get_config = patch(\n 'ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.get_nc_config'\n )\n self.get_nc_config = self.mock_get_config.start()\n self.mock_set_config = patch(\n 'ansible_collections.community.network.plugins.modules.network.cloudengine.ce_lldp.set_nc_config'\n )\n self.set_nc_config = self.mock_set_config.start()\n self.set_nc_config.return_value = None\n xml_existing_1 = load_fixture('ce_lldp', 'ce_lldp_global_00.txt')\n xml_existing_2 = load_fixture('ce_lldp', 'ce_lldp_global_01.txt')\n xml_end_state_1 = load_fixture('ce_lldp', 'ce_lldpSysParameter_00.txt')\n xml_end_state_2 = load_fixture('ce_lldp', 'ce_lldpSysParameter_01.txt')\n self.get_side_effect = (xml_existing_1, xml_existing_2,\n xml_end_state_1, xml_end_state_2)\n self.result_ok = load_fixture('ce_lldp', 'result_ok.txt')\n\n def tearDown(self):\n super(TestCloudEngineLacpModule, self).tearDown()\n self.mock_set_config.stop()\n self.mock_get_config.stop()\n\n def test_lldp_global_present(self):\n update = ['lldp enable', 'lldp mdn enable', 'lldp mdn enable',\n 'lldp transmit interval 8', 'lldp transmit multiplier 8',\n 'lldp restart 8', 'lldp transmit delay 8',\n 'lldp trap-interval 8', 'lldp fast-count 8',\n 'lldp mdn trap-interval 8', 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(lldpenable='enabled', mdnstatus='rxOnly',\n interval=8, hold_multiplier=8, restart_delay=8, transmit_delay=\n 8, notification_interval=8, fast_count=8,\n mdn_notification_interval=8, management_address='1.1.1.1',\n bind_name='bind-name'))\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n\n def test_lacp_sys_parameter_present(self):\n update = ['lldp enable', 'lldp mdn enable', 'lldp mdn enable',\n 'lldp transmit interval 8', 'lldp transmit multiplier 8',\n 'lldp restart 8', 'lldp transmit delay 8',\n 'lldp trap-interval 8', 'lldp fast-count 8',\n 'lldp mdn trap-interval 8', 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(lldpenable='enabled', mdnstatus='rxOnly',\n interval=8, hold_multiplier=8, restart_delay=8, transmit_delay=\n 8, notification_interval=8, fast_count=8,\n mdn_notification_interval=8, management_address='1.1.1.1',\n bind_name='bind-name'))\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n",
"<import token>\n<assignment token>\n<import token>\n\n\nclass TestCloudEngineLacpModule(TestCloudEngineModule):\n <assignment token>\n <function token>\n\n def tearDown(self):\n super(TestCloudEngineLacpModule, self).tearDown()\n self.mock_set_config.stop()\n self.mock_get_config.stop()\n\n def test_lldp_global_present(self):\n update = ['lldp enable', 'lldp mdn enable', 'lldp mdn enable',\n 'lldp transmit interval 8', 'lldp transmit multiplier 8',\n 'lldp restart 8', 'lldp transmit delay 8',\n 'lldp trap-interval 8', 'lldp fast-count 8',\n 'lldp mdn trap-interval 8', 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(lldpenable='enabled', mdnstatus='rxOnly',\n interval=8, hold_multiplier=8, restart_delay=8, transmit_delay=\n 8, notification_interval=8, fast_count=8,\n mdn_notification_interval=8, management_address='1.1.1.1',\n bind_name='bind-name'))\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n\n def test_lacp_sys_parameter_present(self):\n update = ['lldp enable', 'lldp mdn enable', 'lldp mdn enable',\n 'lldp transmit interval 8', 'lldp transmit multiplier 8',\n 'lldp restart 8', 'lldp transmit delay 8',\n 'lldp trap-interval 8', 'lldp fast-count 8',\n 'lldp mdn trap-interval 8', 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(lldpenable='enabled', mdnstatus='rxOnly',\n interval=8, hold_multiplier=8, restart_delay=8, transmit_delay=\n 8, notification_interval=8, fast_count=8,\n mdn_notification_interval=8, management_address='1.1.1.1',\n bind_name='bind-name'))\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n",
"<import token>\n<assignment token>\n<import token>\n\n\nclass TestCloudEngineLacpModule(TestCloudEngineModule):\n <assignment token>\n <function token>\n\n def tearDown(self):\n super(TestCloudEngineLacpModule, self).tearDown()\n self.mock_set_config.stop()\n self.mock_get_config.stop()\n\n def test_lldp_global_present(self):\n update = ['lldp enable', 'lldp mdn enable', 'lldp mdn enable',\n 'lldp transmit interval 8', 'lldp transmit multiplier 8',\n 'lldp restart 8', 'lldp transmit delay 8',\n 'lldp trap-interval 8', 'lldp fast-count 8',\n 'lldp mdn trap-interval 8', 'lldp management-address 1.1.1.1',\n 'lldp management-address bind interface bind-name']\n self.get_nc_config.side_effect = self.get_side_effect\n self.set_nc_config.side_effect = [self.result_ok] * 11\n set_module_args(dict(lldpenable='enabled', mdnstatus='rxOnly',\n interval=8, hold_multiplier=8, restart_delay=8, transmit_delay=\n 8, notification_interval=8, fast_count=8,\n mdn_notification_interval=8, management_address='1.1.1.1',\n bind_name='bind-name'))\n result = self.execute_module(changed=True)\n self.assertEqual(sorted(result['updates']), sorted(update))\n <function token>\n",
"<import token>\n<assignment token>\n<import token>\n\n\nclass TestCloudEngineLacpModule(TestCloudEngineModule):\n <assignment token>\n <function token>\n\n def tearDown(self):\n super(TestCloudEngineLacpModule, self).tearDown()\n self.mock_set_config.stop()\n self.mock_get_config.stop()\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<import token>\n\n\nclass TestCloudEngineLacpModule(TestCloudEngineModule):\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<import token>\n<class token>\n"
] | false |
98,568 |
e6163430c333be41a001cb7c7e42922494844843
|
import deepchem as dc
import numpy as np
import tensorflow as tf
import deepchem.models.layers as layers
from tensorflow.python.framework import test_util
class TestLayers(test_util.TensorFlowTestCase):
def test_highway(self):
"""Test invoking Highway."""
width = 5
batch_size = 10
input = np.random.rand(batch_size, width).astype(np.float32)
layer = layers.Highway()
result = layer(input)
assert result.shape == (batch_size, width)
assert len(layer.trainable_variables) == 4
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Highway()
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_combine_mean_std(self):
"""Test invoking CombineMeanStd."""
mean = np.random.rand(5, 3).astype(np.float32)
std = np.random.rand(5, 3).astype(np.float32)
layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)
result1 = layer([mean, std], training=False)
assert np.array_equal(result1, mean) # No noise in test mode
result2 = layer([mean, std], training=True)
assert not np.array_equal(result2, mean)
assert np.allclose(result2, mean, atol=0.1)
def test_stack(self):
"""Test invoking Stack."""
input1 = np.random.rand(5, 4).astype(np.float32)
input2 = np.random.rand(5, 4).astype(np.float32)
result = layers.Stack()([input1, input2])
assert result.shape == (5, 2, 4)
assert np.array_equal(input1, result[:, 0, :])
assert np.array_equal(input2, result[:, 1, :])
def test_variable(self):
"""Test invoking Variable."""
value = np.random.rand(5, 4).astype(np.float32)
layer = layers.Variable(value)
layer.build([])
result = layer.call([]).numpy()
assert np.allclose(result, value)
assert len(layer.trainable_variables) == 1
def test_interatomic_l2_distances(self):
"""Test invoking InteratomicL2Distances."""
atoms = 5
neighbors = 2
coords = np.random.rand(atoms, 3)
neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))
layer = layers.InteratomicL2Distances(atoms, neighbors, 3)
result = layer([coords, neighbor_list])
assert result.shape == (atoms, neighbors)
for atom in range(atoms):
for neighbor in range(neighbors):
delta = coords[atom] - coords[neighbor_list[atom, neighbor]]
dist2 = np.dot(delta, delta)
assert np.allclose(dist2, result[atom, neighbor])
def test_weave_layer(self):
"""Test invoking WeaveLayer."""
out_channels = 2
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.WeaveFeaturizer()
mols = featurizer.featurize(mols)
weave = layers.WeaveLayer()
atom_feat = []
pair_feat = []
atom_to_pair = []
pair_split = []
start = 0
n_pair_feat = 14
for im, mol in enumerate(mols):
n_atoms = mol.get_num_atoms()
# index of pair features
C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))
atom_to_pair.append(
np.transpose(np.array([C1.flatten() + start,
C0.flatten() + start])))
# number of pairs for each atom
pair_split.extend(C1.flatten() + start)
start = start + n_atoms
# atom features
atom_feat.append(mol.get_atom_features())
# pair features
pair_feat.append(
np.reshape(mol.get_pair_features(), (n_atoms * n_atoms, n_pair_feat)))
inputs = [
np.array(np.concatenate(atom_feat, axis=0), dtype=np.float32),
np.concatenate(pair_feat, axis=0),
np.array(pair_split),
np.concatenate(atom_to_pair, axis=0)
]
# Outputs should be [A, P]
outputs = weave(inputs)
assert len(outputs) == 2
def test_graph_conv(self):
"""Test invoking GraphConv."""
out_channels = 2
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
layer = layers.GraphConv(out_channels)
result = layer(args)
assert result.shape == (n_atoms, out_channels)
num_deg = 2 * layer.max_degree + (1 - layer.min_degree)
assert len(layer.trainable_variables) == 2 * num_deg
def test_graph_pool(self):
"""Test invoking GraphPool."""
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
result = layers.GraphPool()(args)
assert result.shape[0] == n_atoms
# TODO What should shape[1] be? It's not documented.
def test_graph_gather(self):
"""Test invoking GraphGather."""
batch_size = 2
n_features = 75
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
result = layers.GraphGather(batch_size)(args)
# TODO(rbharath): Why is it 2*n_features instead of n_features?
assert result.shape == (batch_size, 2 * n_features)
def test_lstm_step(self):
"""Test invoking LSTMStep."""
max_depth = 5
n_test = 5
n_feat = 10
y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)
state_zero = np.random.rand(n_test, n_feat).astype(np.float32)
state_one = np.random.rand(n_test, n_feat).astype(np.float32)
layer = layers.LSTMStep(n_feat, 2 * n_feat)
result = layer([y, state_zero, state_one])
h_out, h_copy_out, c_out = (result[0], result[1][0], result[1][1])
assert h_out.shape == (n_test, n_feat)
assert h_copy_out.shape == (n_test, n_feat)
assert c_out.shape == (n_test, n_feat)
assert len(layer.trainable_variables) == 1
def test_attn_lstm_embedding(self):
"""Test invoking AttnLSTMEmbedding."""
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat).astype(np.float32)
support = np.random.rand(n_support, n_feat).astype(np.float32)
layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)
test_out, support_out = layer([test, support])
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
assert len(layer.trainable_variables) == 4
def test_iter_ref_lstm_embedding(self):
"""Test invoking IterRefLSTMEmbedding."""
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat).astype(np.float32)
support = np.random.rand(n_support, n_feat).astype(np.float32)
layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat, max_depth)
test_out, support_out = layer([test, support])
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
assert len(layer.trainable_variables) == 8
def test_vina_free_energy(self):
"""Test invoking VinaFreeEnergy."""
n_atoms = 5
m_nbrs = 1
ndim = 3
nbr_cutoff = 1
start = 0
stop = 4
X = np.random.rand(n_atoms, ndim).astype(np.float32)
Z = np.random.randint(0, 2, (n_atoms)).astype(np.float32)
layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result = layer([X, Z])
assert len(layer.trainable_variables) == 6
assert result.shape == tuple()
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result2 = layer2([X, Z])
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer([X, Z])
assert np.allclose(result, result3)
def test_weighted_linear_combo(self):
"""Test invoking WeightedLinearCombo."""
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
layer = layers.WeightedLinearCombo()
result = layer([input1, input2])
assert len(layer.trainable_variables) == 2
expected = input1 * layer.trainable_variables[0] + input2 * layer.trainable_variables[1]
assert np.allclose(result, expected)
def test_neighbor_list(self):
"""Test invoking NeighborList."""
N_atoms = 5
start = 0
stop = 12
nbr_cutoff = 3
ndim = 3
M_nbrs = 2
coords = start + np.random.rand(N_atoms, ndim) * (stop - start)
coords = tf.cast(tf.stack(coords), tf.float32)
layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff, start, stop)
result = layer(coords)
assert result.shape == (N_atoms, M_nbrs)
def test_atomic_convolution(self):
"""Test invoking AtomicConvolution."""
batch_size = 4
max_atoms = 5
max_neighbors = 2
dimensions = 3
params = [[5.0, 2.0, 0.5], [10.0, 2.0, 0.5]]
input1 = np.random.rand(batch_size, max_atoms,
dimensions).astype(np.float32)
input2 = np.random.randint(
max_atoms, size=(batch_size, max_atoms, max_neighbors))
input3 = np.random.randint(
1, 10, size=(batch_size, max_atoms, max_neighbors))
layer = layers.AtomicConvolution(radial_params=params)
result = layer([input1, input2, input3])
assert result.shape == (batch_size, max_atoms, len(params))
assert len(layer.trainable_variables) == 3
def test_alpha_share_layer(self):
"""Test invoking AlphaShareLayer."""
batch_size = 10
length = 6
input1 = np.random.rand(batch_size, length).astype(np.float32)
input2 = np.random.rand(batch_size, length).astype(np.float32)
layer = layers.AlphaShareLayer()
result = layer([input1, input2])
assert input1.shape == result[0].shape
assert input2.shape == result[1].shape
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.AlphaShareLayer()
result2 = layer2([input1, input2])
assert not np.allclose(result[0], result2[0])
assert not np.allclose(result[1], result2[1])
# But evaluating the first layer again should produce the same result as before.
result3 = layer([input1, input2])
assert np.allclose(result[0], result3[0])
assert np.allclose(result[1], result3[1])
def test_sluice_loss(self):
"""Test invoking SluiceLoss."""
input1 = np.ones((3, 4)).astype(np.float32)
input2 = np.ones((2, 2)).astype(np.float32)
result = layers.SluiceLoss()([input1, input2])
assert np.allclose(result, 40.0)
def test_beta_share(self):
"""Test invoking BetaShare."""
batch_size = 10
length = 6
input1 = np.random.rand(batch_size, length).astype(np.float32)
input2 = np.random.rand(batch_size, length).astype(np.float32)
layer = layers.BetaShare()
result = layer([input1, input2])
assert input1.shape == result.shape
assert input2.shape == result.shape
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.BetaShare()
result2 = layer2([input1, input2])
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer([input1, input2])
assert np.allclose(result, result3)
def test_ani_feat(self):
"""Test invoking ANIFeat."""
batch_size = 10
max_atoms = 5
input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)
layer = layers.ANIFeat(max_atoms=max_atoms)
result = layer(input)
# TODO What should the output shape be? It's not documented, and there
# are no other test cases for it.
def test_graph_embed_pool_layer(self):
"""Test invoking GraphEmbedPoolLayer."""
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
layer = layers.GraphEmbedPoolLayer(num_vertices=6)
result = layer([V, adjs])
assert result[0].shape == (10, 6, 50)
assert result[1].shape == (10, 6, 5, 6)
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)
result2 = layer2([V, adjs])
assert not np.allclose(result[0], result2[0])
assert not np.allclose(result[1], result2[1])
# But evaluating the first layer again should produce the same result as before.
result3 = layer([V, adjs])
assert np.allclose(result[0], result3[0])
assert np.allclose(result[1], result3[1])
def test_graph_cnn(self):
"""Test invoking GraphCNN."""
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
layer = layers.GraphCNN(num_filters=6)
result = layer([V, adjs])
assert result.shape == (10, 100, 6)
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GraphCNN(num_filters=6)
result2 = layer2([V, adjs])
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer([V, adjs])
assert np.allclose(result, result3)
def test_DAG_layer(self):
"""Test invoking DAGLayer."""
batch_size = 10
n_graph_feat = 30
n_atom_feat = 75
max_atoms = 50
layer_sizes = [100]
atom_features = np.random.rand(batch_size, n_atom_feat)
parents = np.random.randint(
0, max_atoms, size=(batch_size, max_atoms, max_atoms))
calculation_orders = np.random.randint(
0, batch_size, size=(batch_size, max_atoms))
calculation_masks = np.random.randint(0, 2, size=(batch_size, max_atoms))
# Recall that the DAG layer expects a MultiConvMol as input,
# so the "batch" is a pooled set of atoms from all the
# molecules in the batch, just as it is for the graph conv.
# This means that n_atoms is the batch-size
n_atoms = batch_size
#dropout_switch = False
layer = layers.DAGLayer(
n_graph_feat=n_graph_feat,
n_atom_feat=n_atom_feat,
max_atoms=max_atoms,
layer_sizes=layer_sizes)
outputs = layer([
atom_features,
parents,
calculation_orders,
calculation_masks,
n_atoms,
#dropout_switch
])
## TODO(rbharath): What is the shape of outputs supposed to be?
## I'm getting (7, 30) here. Where does 7 come from??
def test_DAG_gather(self):
"""Test invoking DAGGather."""
# TODO(rbharath): We need more documentation about why
# these numbers work.
batch_size = 10
n_graph_feat = 30
n_atom_feat = 30
n_outputs = 75
max_atoms = 50
layer_sizes = [100]
layer = layers.DAGGather(
n_graph_feat=n_graph_feat,
n_outputs=n_outputs,
max_atoms=max_atoms,
layer_sizes=layer_sizes)
atom_features = np.random.rand(batch_size, n_atom_feat)
membership = np.sort(np.random.randint(0, batch_size, size=(batch_size)))
outputs = layer([atom_features, membership])
|
[
"import deepchem as dc\nimport numpy as np\nimport tensorflow as tf\nimport deepchem.models.layers as layers\nfrom tensorflow.python.framework import test_util\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n\n def test_highway(self):\n \"\"\"Test invoking Highway.\"\"\"\n width = 5\n batch_size = 10\n input = np.random.rand(batch_size, width).astype(np.float32)\n layer = layers.Highway()\n result = layer(input)\n assert result.shape == (batch_size, width)\n assert len(layer.trainable_variables) == 4\n\n # Creating a second layer should produce different results, since it has\n # different random weights.\n\n layer2 = layers.Highway()\n result2 = layer2(input)\n assert not np.allclose(result, result2)\n\n # But evaluating the first layer again should produce the same result as before.\n\n result3 = layer(input)\n assert np.allclose(result, result3)\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean) # No noise in test mode\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n\n def test_weave_layer(self):\n \"\"\"Test invoking WeaveLayer.\"\"\"\n out_channels = 2\n n_atoms = 4 # In CCC and C, there are 4 atoms\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.WeaveFeaturizer()\n mols = featurizer.featurize(mols)\n weave = layers.WeaveLayer()\n atom_feat = []\n pair_feat = []\n atom_to_pair = []\n pair_split = []\n start = 0\n n_pair_feat = 14\n for im, mol in enumerate(mols):\n n_atoms = mol.get_num_atoms()\n # index of pair features\n C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))\n atom_to_pair.append(\n np.transpose(np.array([C1.flatten() + start,\n C0.flatten() + start])))\n # number of pairs for each atom\n pair_split.extend(C1.flatten() + start)\n start = start + n_atoms\n\n # atom features\n atom_feat.append(mol.get_atom_features())\n # pair features\n pair_feat.append(\n np.reshape(mol.get_pair_features(), (n_atoms * n_atoms, n_pair_feat)))\n inputs = [\n np.array(np.concatenate(atom_feat, axis=0), dtype=np.float32),\n np.concatenate(pair_feat, axis=0),\n np.array(pair_split),\n np.concatenate(atom_to_pair, axis=0)\n ]\n # Outputs should be [A, P]\n outputs = weave(inputs)\n assert len(outputs) == 2\n\n def test_graph_conv(self):\n \"\"\"Test invoking GraphConv.\"\"\"\n out_channels = 2\n n_atoms = 4 # In CCC and C, there are 4 atoms\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n layer = layers.GraphConv(out_channels)\n result = layer(args)\n assert result.shape == (n_atoms, out_channels)\n num_deg = 2 * layer.max_degree + (1 - layer.min_degree)\n assert len(layer.trainable_variables) == 2 * num_deg\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4 # In CCC and C, there are 4 atoms\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n # TODO What should shape[1] be? It's not documented.\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4 # In CCC and C, there are 4 atoms\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n # TODO(rbharath): Why is it 2*n_features instead of n_features?\n assert result.shape == (batch_size, 2 * n_features)\n\n def test_lstm_step(self):\n \"\"\"Test invoking LSTMStep.\"\"\"\n max_depth = 5\n n_test = 5\n n_feat = 10\n y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)\n state_zero = np.random.rand(n_test, n_feat).astype(np.float32)\n state_one = np.random.rand(n_test, n_feat).astype(np.float32)\n layer = layers.LSTMStep(n_feat, 2 * n_feat)\n result = layer([y, state_zero, state_one])\n h_out, h_copy_out, c_out = (result[0], result[1][0], result[1][1])\n assert h_out.shape == (n_test, n_feat)\n assert h_copy_out.shape == (n_test, n_feat)\n assert c_out.shape == (n_test, n_feat)\n assert len(layer.trainable_variables) == 1\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n\n def test_vina_free_energy(self):\n \"\"\"Test invoking VinaFreeEnergy.\"\"\"\n n_atoms = 5\n m_nbrs = 1\n ndim = 3\n nbr_cutoff = 1\n start = 0\n stop = 4\n X = np.random.rand(n_atoms, ndim).astype(np.float32)\n Z = np.random.randint(0, 2, (n_atoms)).astype(np.float32)\n layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,\n stop)\n result = layer([X, Z])\n assert len(layer.trainable_variables) == 6\n assert result.shape == tuple()\n\n # Creating a second layer should produce different results, since it has\n # different random weights.\n\n layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,\n stop)\n result2 = layer2([X, Z])\n assert not np.allclose(result, result2)\n\n # But evaluating the first layer again should produce the same result as before.\n\n result3 = layer([X, Z])\n assert np.allclose(result, result3)\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n\n def test_neighbor_list(self):\n \"\"\"Test invoking NeighborList.\"\"\"\n N_atoms = 5\n start = 0\n stop = 12\n nbr_cutoff = 3\n ndim = 3\n M_nbrs = 2\n coords = start + np.random.rand(N_atoms, ndim) * (stop - start)\n coords = tf.cast(tf.stack(coords), tf.float32)\n layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff, start, stop)\n result = layer(coords)\n assert result.shape == (N_atoms, M_nbrs)\n\n def test_atomic_convolution(self):\n \"\"\"Test invoking AtomicConvolution.\"\"\"\n batch_size = 4\n max_atoms = 5\n max_neighbors = 2\n dimensions = 3\n params = [[5.0, 2.0, 0.5], [10.0, 2.0, 0.5]]\n input1 = np.random.rand(batch_size, max_atoms,\n dimensions).astype(np.float32)\n input2 = np.random.randint(\n max_atoms, size=(batch_size, max_atoms, max_neighbors))\n input3 = np.random.randint(\n 1, 10, size=(batch_size, max_atoms, max_neighbors))\n layer = layers.AtomicConvolution(radial_params=params)\n result = layer([input1, input2, input3])\n assert result.shape == (batch_size, max_atoms, len(params))\n assert len(layer.trainable_variables) == 3\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n\n # Creating a second layer should produce different results, since it has\n # different random weights.\n\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n\n # But evaluating the first layer again should produce the same result as before.\n\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n\n # Creating a second layer should produce different results, since it has\n # different random weights.\n\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n\n # But evaluating the first layer again should produce the same result as before.\n\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n\n def test_ani_feat(self):\n \"\"\"Test invoking ANIFeat.\"\"\"\n batch_size = 10\n max_atoms = 5\n input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)\n layer = layers.ANIFeat(max_atoms=max_atoms)\n result = layer(input)\n # TODO What should the output shape be? It's not documented, and there\n # are no other test cases for it.\n\n def test_graph_embed_pool_layer(self):\n \"\"\"Test invoking GraphEmbedPoolLayer.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphEmbedPoolLayer(num_vertices=6)\n result = layer([V, adjs])\n assert result[0].shape == (10, 6, 50)\n assert result[1].shape == (10, 6, 5, 6)\n\n # Creating a second layer should produce different results, since it has\n # different random weights.\n\n layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n\n # But evaluating the first layer again should produce the same result as before.\n\n result3 = layer([V, adjs])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n\n # Creating a second layer should produce different results, since it has\n # different random weights.\n\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n\n # But evaluating the first layer again should produce the same result as before.\n\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(\n 0, max_atoms, size=(batch_size, max_atoms, max_atoms))\n calculation_orders = np.random.randint(\n 0, batch_size, size=(batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size, max_atoms))\n # Recall that the DAG layer expects a MultiConvMol as input,\n # so the \"batch\" is a pooled set of atoms from all the\n # molecules in the batch, just as it is for the graph conv.\n # This means that n_atoms is the batch-size\n n_atoms = batch_size\n #dropout_switch = False\n layer = layers.DAGLayer(\n n_graph_feat=n_graph_feat,\n n_atom_feat=n_atom_feat,\n max_atoms=max_atoms,\n layer_sizes=layer_sizes)\n outputs = layer([\n atom_features,\n parents,\n calculation_orders,\n calculation_masks,\n n_atoms,\n #dropout_switch\n ])\n ## TODO(rbharath): What is the shape of outputs supposed to be?\n ## I'm getting (7, 30) here. Where does 7 come from??\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n # TODO(rbharath): We need more documentation about why\n # these numbers work.\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(\n n_graph_feat=n_graph_feat,\n n_outputs=n_outputs,\n max_atoms=max_atoms,\n layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=(batch_size)))\n outputs = layer([atom_features, membership])\n",
"import deepchem as dc\nimport numpy as np\nimport tensorflow as tf\nimport deepchem.models.layers as layers\nfrom tensorflow.python.framework import test_util\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n\n def test_highway(self):\n \"\"\"Test invoking Highway.\"\"\"\n width = 5\n batch_size = 10\n input = np.random.rand(batch_size, width).astype(np.float32)\n layer = layers.Highway()\n result = layer(input)\n assert result.shape == (batch_size, width)\n assert len(layer.trainable_variables) == 4\n layer2 = layers.Highway()\n result2 = layer2(input)\n assert not np.allclose(result, result2)\n result3 = layer(input)\n assert np.allclose(result, result3)\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n\n def test_weave_layer(self):\n \"\"\"Test invoking WeaveLayer.\"\"\"\n out_channels = 2\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.WeaveFeaturizer()\n mols = featurizer.featurize(mols)\n weave = layers.WeaveLayer()\n atom_feat = []\n pair_feat = []\n atom_to_pair = []\n pair_split = []\n start = 0\n n_pair_feat = 14\n for im, mol in enumerate(mols):\n n_atoms = mol.get_num_atoms()\n C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))\n atom_to_pair.append(np.transpose(np.array([C1.flatten() + start,\n C0.flatten() + start])))\n pair_split.extend(C1.flatten() + start)\n start = start + n_atoms\n atom_feat.append(mol.get_atom_features())\n pair_feat.append(np.reshape(mol.get_pair_features(), (n_atoms *\n n_atoms, n_pair_feat)))\n inputs = [np.array(np.concatenate(atom_feat, axis=0), dtype=np.\n float32), np.concatenate(pair_feat, axis=0), np.array(\n pair_split), np.concatenate(atom_to_pair, axis=0)]\n outputs = weave(inputs)\n assert len(outputs) == 2\n\n def test_graph_conv(self):\n \"\"\"Test invoking GraphConv.\"\"\"\n out_channels = 2\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n layer = layers.GraphConv(out_channels)\n result = layer(args)\n assert result.shape == (n_atoms, out_channels)\n num_deg = 2 * layer.max_degree + (1 - layer.min_degree)\n assert len(layer.trainable_variables) == 2 * num_deg\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n\n def test_lstm_step(self):\n \"\"\"Test invoking LSTMStep.\"\"\"\n max_depth = 5\n n_test = 5\n n_feat = 10\n y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)\n state_zero = np.random.rand(n_test, n_feat).astype(np.float32)\n state_one = np.random.rand(n_test, n_feat).astype(np.float32)\n layer = layers.LSTMStep(n_feat, 2 * n_feat)\n result = layer([y, state_zero, state_one])\n h_out, h_copy_out, c_out = result[0], result[1][0], result[1][1]\n assert h_out.shape == (n_test, n_feat)\n assert h_copy_out.shape == (n_test, n_feat)\n assert c_out.shape == (n_test, n_feat)\n assert len(layer.trainable_variables) == 1\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n\n def test_vina_free_energy(self):\n \"\"\"Test invoking VinaFreeEnergy.\"\"\"\n n_atoms = 5\n m_nbrs = 1\n ndim = 3\n nbr_cutoff = 1\n start = 0\n stop = 4\n X = np.random.rand(n_atoms, ndim).astype(np.float32)\n Z = np.random.randint(0, 2, n_atoms).astype(np.float32)\n layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer([X, Z])\n assert len(layer.trainable_variables) == 6\n assert result.shape == tuple()\n layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result2 = layer2([X, Z])\n assert not np.allclose(result, result2)\n result3 = layer([X, Z])\n assert np.allclose(result, result3)\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n\n def test_neighbor_list(self):\n \"\"\"Test invoking NeighborList.\"\"\"\n N_atoms = 5\n start = 0\n stop = 12\n nbr_cutoff = 3\n ndim = 3\n M_nbrs = 2\n coords = start + np.random.rand(N_atoms, ndim) * (stop - start)\n coords = tf.cast(tf.stack(coords), tf.float32)\n layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer(coords)\n assert result.shape == (N_atoms, M_nbrs)\n\n def test_atomic_convolution(self):\n \"\"\"Test invoking AtomicConvolution.\"\"\"\n batch_size = 4\n max_atoms = 5\n max_neighbors = 2\n dimensions = 3\n params = [[5.0, 2.0, 0.5], [10.0, 2.0, 0.5]]\n input1 = np.random.rand(batch_size, max_atoms, dimensions).astype(np\n .float32)\n input2 = np.random.randint(max_atoms, size=(batch_size, max_atoms,\n max_neighbors))\n input3 = np.random.randint(1, 10, size=(batch_size, max_atoms,\n max_neighbors))\n layer = layers.AtomicConvolution(radial_params=params)\n result = layer([input1, input2, input3])\n assert result.shape == (batch_size, max_atoms, len(params))\n assert len(layer.trainable_variables) == 3\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n\n def test_ani_feat(self):\n \"\"\"Test invoking ANIFeat.\"\"\"\n batch_size = 10\n max_atoms = 5\n input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)\n layer = layers.ANIFeat(max_atoms=max_atoms)\n result = layer(input)\n\n def test_graph_embed_pool_layer(self):\n \"\"\"Test invoking GraphEmbedPoolLayer.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphEmbedPoolLayer(num_vertices=6)\n result = layer([V, adjs])\n assert result[0].shape == (10, 6, 50)\n assert result[1].shape == (10, 6, 5, 6)\n layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([V, adjs])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n\n def test_highway(self):\n \"\"\"Test invoking Highway.\"\"\"\n width = 5\n batch_size = 10\n input = np.random.rand(batch_size, width).astype(np.float32)\n layer = layers.Highway()\n result = layer(input)\n assert result.shape == (batch_size, width)\n assert len(layer.trainable_variables) == 4\n layer2 = layers.Highway()\n result2 = layer2(input)\n assert not np.allclose(result, result2)\n result3 = layer(input)\n assert np.allclose(result, result3)\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n\n def test_weave_layer(self):\n \"\"\"Test invoking WeaveLayer.\"\"\"\n out_channels = 2\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.WeaveFeaturizer()\n mols = featurizer.featurize(mols)\n weave = layers.WeaveLayer()\n atom_feat = []\n pair_feat = []\n atom_to_pair = []\n pair_split = []\n start = 0\n n_pair_feat = 14\n for im, mol in enumerate(mols):\n n_atoms = mol.get_num_atoms()\n C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))\n atom_to_pair.append(np.transpose(np.array([C1.flatten() + start,\n C0.flatten() + start])))\n pair_split.extend(C1.flatten() + start)\n start = start + n_atoms\n atom_feat.append(mol.get_atom_features())\n pair_feat.append(np.reshape(mol.get_pair_features(), (n_atoms *\n n_atoms, n_pair_feat)))\n inputs = [np.array(np.concatenate(atom_feat, axis=0), dtype=np.\n float32), np.concatenate(pair_feat, axis=0), np.array(\n pair_split), np.concatenate(atom_to_pair, axis=0)]\n outputs = weave(inputs)\n assert len(outputs) == 2\n\n def test_graph_conv(self):\n \"\"\"Test invoking GraphConv.\"\"\"\n out_channels = 2\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n layer = layers.GraphConv(out_channels)\n result = layer(args)\n assert result.shape == (n_atoms, out_channels)\n num_deg = 2 * layer.max_degree + (1 - layer.min_degree)\n assert len(layer.trainable_variables) == 2 * num_deg\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n\n def test_lstm_step(self):\n \"\"\"Test invoking LSTMStep.\"\"\"\n max_depth = 5\n n_test = 5\n n_feat = 10\n y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)\n state_zero = np.random.rand(n_test, n_feat).astype(np.float32)\n state_one = np.random.rand(n_test, n_feat).astype(np.float32)\n layer = layers.LSTMStep(n_feat, 2 * n_feat)\n result = layer([y, state_zero, state_one])\n h_out, h_copy_out, c_out = result[0], result[1][0], result[1][1]\n assert h_out.shape == (n_test, n_feat)\n assert h_copy_out.shape == (n_test, n_feat)\n assert c_out.shape == (n_test, n_feat)\n assert len(layer.trainable_variables) == 1\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n\n def test_vina_free_energy(self):\n \"\"\"Test invoking VinaFreeEnergy.\"\"\"\n n_atoms = 5\n m_nbrs = 1\n ndim = 3\n nbr_cutoff = 1\n start = 0\n stop = 4\n X = np.random.rand(n_atoms, ndim).astype(np.float32)\n Z = np.random.randint(0, 2, n_atoms).astype(np.float32)\n layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer([X, Z])\n assert len(layer.trainable_variables) == 6\n assert result.shape == tuple()\n layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result2 = layer2([X, Z])\n assert not np.allclose(result, result2)\n result3 = layer([X, Z])\n assert np.allclose(result, result3)\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n\n def test_neighbor_list(self):\n \"\"\"Test invoking NeighborList.\"\"\"\n N_atoms = 5\n start = 0\n stop = 12\n nbr_cutoff = 3\n ndim = 3\n M_nbrs = 2\n coords = start + np.random.rand(N_atoms, ndim) * (stop - start)\n coords = tf.cast(tf.stack(coords), tf.float32)\n layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer(coords)\n assert result.shape == (N_atoms, M_nbrs)\n\n def test_atomic_convolution(self):\n \"\"\"Test invoking AtomicConvolution.\"\"\"\n batch_size = 4\n max_atoms = 5\n max_neighbors = 2\n dimensions = 3\n params = [[5.0, 2.0, 0.5], [10.0, 2.0, 0.5]]\n input1 = np.random.rand(batch_size, max_atoms, dimensions).astype(np\n .float32)\n input2 = np.random.randint(max_atoms, size=(batch_size, max_atoms,\n max_neighbors))\n input3 = np.random.randint(1, 10, size=(batch_size, max_atoms,\n max_neighbors))\n layer = layers.AtomicConvolution(radial_params=params)\n result = layer([input1, input2, input3])\n assert result.shape == (batch_size, max_atoms, len(params))\n assert len(layer.trainable_variables) == 3\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n\n def test_ani_feat(self):\n \"\"\"Test invoking ANIFeat.\"\"\"\n batch_size = 10\n max_atoms = 5\n input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)\n layer = layers.ANIFeat(max_atoms=max_atoms)\n result = layer(input)\n\n def test_graph_embed_pool_layer(self):\n \"\"\"Test invoking GraphEmbedPoolLayer.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphEmbedPoolLayer(num_vertices=6)\n result = layer([V, adjs])\n assert result[0].shape == (10, 6, 50)\n assert result[1].shape == (10, 6, 5, 6)\n layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([V, adjs])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n\n def test_highway(self):\n \"\"\"Test invoking Highway.\"\"\"\n width = 5\n batch_size = 10\n input = np.random.rand(batch_size, width).astype(np.float32)\n layer = layers.Highway()\n result = layer(input)\n assert result.shape == (batch_size, width)\n assert len(layer.trainable_variables) == 4\n layer2 = layers.Highway()\n result2 = layer2(input)\n assert not np.allclose(result, result2)\n result3 = layer(input)\n assert np.allclose(result, result3)\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n\n def test_weave_layer(self):\n \"\"\"Test invoking WeaveLayer.\"\"\"\n out_channels = 2\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.WeaveFeaturizer()\n mols = featurizer.featurize(mols)\n weave = layers.WeaveLayer()\n atom_feat = []\n pair_feat = []\n atom_to_pair = []\n pair_split = []\n start = 0\n n_pair_feat = 14\n for im, mol in enumerate(mols):\n n_atoms = mol.get_num_atoms()\n C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))\n atom_to_pair.append(np.transpose(np.array([C1.flatten() + start,\n C0.flatten() + start])))\n pair_split.extend(C1.flatten() + start)\n start = start + n_atoms\n atom_feat.append(mol.get_atom_features())\n pair_feat.append(np.reshape(mol.get_pair_features(), (n_atoms *\n n_atoms, n_pair_feat)))\n inputs = [np.array(np.concatenate(atom_feat, axis=0), dtype=np.\n float32), np.concatenate(pair_feat, axis=0), np.array(\n pair_split), np.concatenate(atom_to_pair, axis=0)]\n outputs = weave(inputs)\n assert len(outputs) == 2\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n\n def test_lstm_step(self):\n \"\"\"Test invoking LSTMStep.\"\"\"\n max_depth = 5\n n_test = 5\n n_feat = 10\n y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)\n state_zero = np.random.rand(n_test, n_feat).astype(np.float32)\n state_one = np.random.rand(n_test, n_feat).astype(np.float32)\n layer = layers.LSTMStep(n_feat, 2 * n_feat)\n result = layer([y, state_zero, state_one])\n h_out, h_copy_out, c_out = result[0], result[1][0], result[1][1]\n assert h_out.shape == (n_test, n_feat)\n assert h_copy_out.shape == (n_test, n_feat)\n assert c_out.shape == (n_test, n_feat)\n assert len(layer.trainable_variables) == 1\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n\n def test_vina_free_energy(self):\n \"\"\"Test invoking VinaFreeEnergy.\"\"\"\n n_atoms = 5\n m_nbrs = 1\n ndim = 3\n nbr_cutoff = 1\n start = 0\n stop = 4\n X = np.random.rand(n_atoms, ndim).astype(np.float32)\n Z = np.random.randint(0, 2, n_atoms).astype(np.float32)\n layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer([X, Z])\n assert len(layer.trainable_variables) == 6\n assert result.shape == tuple()\n layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result2 = layer2([X, Z])\n assert not np.allclose(result, result2)\n result3 = layer([X, Z])\n assert np.allclose(result, result3)\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n\n def test_neighbor_list(self):\n \"\"\"Test invoking NeighborList.\"\"\"\n N_atoms = 5\n start = 0\n stop = 12\n nbr_cutoff = 3\n ndim = 3\n M_nbrs = 2\n coords = start + np.random.rand(N_atoms, ndim) * (stop - start)\n coords = tf.cast(tf.stack(coords), tf.float32)\n layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer(coords)\n assert result.shape == (N_atoms, M_nbrs)\n\n def test_atomic_convolution(self):\n \"\"\"Test invoking AtomicConvolution.\"\"\"\n batch_size = 4\n max_atoms = 5\n max_neighbors = 2\n dimensions = 3\n params = [[5.0, 2.0, 0.5], [10.0, 2.0, 0.5]]\n input1 = np.random.rand(batch_size, max_atoms, dimensions).astype(np\n .float32)\n input2 = np.random.randint(max_atoms, size=(batch_size, max_atoms,\n max_neighbors))\n input3 = np.random.randint(1, 10, size=(batch_size, max_atoms,\n max_neighbors))\n layer = layers.AtomicConvolution(radial_params=params)\n result = layer([input1, input2, input3])\n assert result.shape == (batch_size, max_atoms, len(params))\n assert len(layer.trainable_variables) == 3\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n\n def test_ani_feat(self):\n \"\"\"Test invoking ANIFeat.\"\"\"\n batch_size = 10\n max_atoms = 5\n input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)\n layer = layers.ANIFeat(max_atoms=max_atoms)\n result = layer(input)\n\n def test_graph_embed_pool_layer(self):\n \"\"\"Test invoking GraphEmbedPoolLayer.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphEmbedPoolLayer(num_vertices=6)\n result = layer([V, adjs])\n assert result[0].shape == (10, 6, 50)\n assert result[1].shape == (10, 6, 5, 6)\n layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([V, adjs])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n\n def test_highway(self):\n \"\"\"Test invoking Highway.\"\"\"\n width = 5\n batch_size = 10\n input = np.random.rand(batch_size, width).astype(np.float32)\n layer = layers.Highway()\n result = layer(input)\n assert result.shape == (batch_size, width)\n assert len(layer.trainable_variables) == 4\n layer2 = layers.Highway()\n result2 = layer2(input)\n assert not np.allclose(result, result2)\n result3 = layer(input)\n assert np.allclose(result, result3)\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n\n def test_weave_layer(self):\n \"\"\"Test invoking WeaveLayer.\"\"\"\n out_channels = 2\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.WeaveFeaturizer()\n mols = featurizer.featurize(mols)\n weave = layers.WeaveLayer()\n atom_feat = []\n pair_feat = []\n atom_to_pair = []\n pair_split = []\n start = 0\n n_pair_feat = 14\n for im, mol in enumerate(mols):\n n_atoms = mol.get_num_atoms()\n C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))\n atom_to_pair.append(np.transpose(np.array([C1.flatten() + start,\n C0.flatten() + start])))\n pair_split.extend(C1.flatten() + start)\n start = start + n_atoms\n atom_feat.append(mol.get_atom_features())\n pair_feat.append(np.reshape(mol.get_pair_features(), (n_atoms *\n n_atoms, n_pair_feat)))\n inputs = [np.array(np.concatenate(atom_feat, axis=0), dtype=np.\n float32), np.concatenate(pair_feat, axis=0), np.array(\n pair_split), np.concatenate(atom_to_pair, axis=0)]\n outputs = weave(inputs)\n assert len(outputs) == 2\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n\n def test_lstm_step(self):\n \"\"\"Test invoking LSTMStep.\"\"\"\n max_depth = 5\n n_test = 5\n n_feat = 10\n y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)\n state_zero = np.random.rand(n_test, n_feat).astype(np.float32)\n state_one = np.random.rand(n_test, n_feat).astype(np.float32)\n layer = layers.LSTMStep(n_feat, 2 * n_feat)\n result = layer([y, state_zero, state_one])\n h_out, h_copy_out, c_out = result[0], result[1][0], result[1][1]\n assert h_out.shape == (n_test, n_feat)\n assert h_copy_out.shape == (n_test, n_feat)\n assert c_out.shape == (n_test, n_feat)\n assert len(layer.trainable_variables) == 1\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n\n def test_vina_free_energy(self):\n \"\"\"Test invoking VinaFreeEnergy.\"\"\"\n n_atoms = 5\n m_nbrs = 1\n ndim = 3\n nbr_cutoff = 1\n start = 0\n stop = 4\n X = np.random.rand(n_atoms, ndim).astype(np.float32)\n Z = np.random.randint(0, 2, n_atoms).astype(np.float32)\n layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer([X, Z])\n assert len(layer.trainable_variables) == 6\n assert result.shape == tuple()\n layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result2 = layer2([X, Z])\n assert not np.allclose(result, result2)\n result3 = layer([X, Z])\n assert np.allclose(result, result3)\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n\n def test_neighbor_list(self):\n \"\"\"Test invoking NeighborList.\"\"\"\n N_atoms = 5\n start = 0\n stop = 12\n nbr_cutoff = 3\n ndim = 3\n M_nbrs = 2\n coords = start + np.random.rand(N_atoms, ndim) * (stop - start)\n coords = tf.cast(tf.stack(coords), tf.float32)\n layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer(coords)\n assert result.shape == (N_atoms, M_nbrs)\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n\n def test_ani_feat(self):\n \"\"\"Test invoking ANIFeat.\"\"\"\n batch_size = 10\n max_atoms = 5\n input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)\n layer = layers.ANIFeat(max_atoms=max_atoms)\n result = layer(input)\n\n def test_graph_embed_pool_layer(self):\n \"\"\"Test invoking GraphEmbedPoolLayer.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphEmbedPoolLayer(num_vertices=6)\n result = layer([V, adjs])\n assert result[0].shape == (10, 6, 50)\n assert result[1].shape == (10, 6, 5, 6)\n layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([V, adjs])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n\n def test_weave_layer(self):\n \"\"\"Test invoking WeaveLayer.\"\"\"\n out_channels = 2\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.WeaveFeaturizer()\n mols = featurizer.featurize(mols)\n weave = layers.WeaveLayer()\n atom_feat = []\n pair_feat = []\n atom_to_pair = []\n pair_split = []\n start = 0\n n_pair_feat = 14\n for im, mol in enumerate(mols):\n n_atoms = mol.get_num_atoms()\n C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))\n atom_to_pair.append(np.transpose(np.array([C1.flatten() + start,\n C0.flatten() + start])))\n pair_split.extend(C1.flatten() + start)\n start = start + n_atoms\n atom_feat.append(mol.get_atom_features())\n pair_feat.append(np.reshape(mol.get_pair_features(), (n_atoms *\n n_atoms, n_pair_feat)))\n inputs = [np.array(np.concatenate(atom_feat, axis=0), dtype=np.\n float32), np.concatenate(pair_feat, axis=0), np.array(\n pair_split), np.concatenate(atom_to_pair, axis=0)]\n outputs = weave(inputs)\n assert len(outputs) == 2\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n\n def test_lstm_step(self):\n \"\"\"Test invoking LSTMStep.\"\"\"\n max_depth = 5\n n_test = 5\n n_feat = 10\n y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)\n state_zero = np.random.rand(n_test, n_feat).astype(np.float32)\n state_one = np.random.rand(n_test, n_feat).astype(np.float32)\n layer = layers.LSTMStep(n_feat, 2 * n_feat)\n result = layer([y, state_zero, state_one])\n h_out, h_copy_out, c_out = result[0], result[1][0], result[1][1]\n assert h_out.shape == (n_test, n_feat)\n assert h_copy_out.shape == (n_test, n_feat)\n assert c_out.shape == (n_test, n_feat)\n assert len(layer.trainable_variables) == 1\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n\n def test_vina_free_energy(self):\n \"\"\"Test invoking VinaFreeEnergy.\"\"\"\n n_atoms = 5\n m_nbrs = 1\n ndim = 3\n nbr_cutoff = 1\n start = 0\n stop = 4\n X = np.random.rand(n_atoms, ndim).astype(np.float32)\n Z = np.random.randint(0, 2, n_atoms).astype(np.float32)\n layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer([X, Z])\n assert len(layer.trainable_variables) == 6\n assert result.shape == tuple()\n layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result2 = layer2([X, Z])\n assert not np.allclose(result, result2)\n result3 = layer([X, Z])\n assert np.allclose(result, result3)\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n\n def test_neighbor_list(self):\n \"\"\"Test invoking NeighborList.\"\"\"\n N_atoms = 5\n start = 0\n stop = 12\n nbr_cutoff = 3\n ndim = 3\n M_nbrs = 2\n coords = start + np.random.rand(N_atoms, ndim) * (stop - start)\n coords = tf.cast(tf.stack(coords), tf.float32)\n layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer(coords)\n assert result.shape == (N_atoms, M_nbrs)\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n\n def test_ani_feat(self):\n \"\"\"Test invoking ANIFeat.\"\"\"\n batch_size = 10\n max_atoms = 5\n input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)\n layer = layers.ANIFeat(max_atoms=max_atoms)\n result = layer(input)\n\n def test_graph_embed_pool_layer(self):\n \"\"\"Test invoking GraphEmbedPoolLayer.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphEmbedPoolLayer(num_vertices=6)\n result = layer([V, adjs])\n assert result[0].shape == (10, 6, 50)\n assert result[1].shape == (10, 6, 5, 6)\n layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([V, adjs])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n\n def test_weave_layer(self):\n \"\"\"Test invoking WeaveLayer.\"\"\"\n out_channels = 2\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.WeaveFeaturizer()\n mols = featurizer.featurize(mols)\n weave = layers.WeaveLayer()\n atom_feat = []\n pair_feat = []\n atom_to_pair = []\n pair_split = []\n start = 0\n n_pair_feat = 14\n for im, mol in enumerate(mols):\n n_atoms = mol.get_num_atoms()\n C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))\n atom_to_pair.append(np.transpose(np.array([C1.flatten() + start,\n C0.flatten() + start])))\n pair_split.extend(C1.flatten() + start)\n start = start + n_atoms\n atom_feat.append(mol.get_atom_features())\n pair_feat.append(np.reshape(mol.get_pair_features(), (n_atoms *\n n_atoms, n_pair_feat)))\n inputs = [np.array(np.concatenate(atom_feat, axis=0), dtype=np.\n float32), np.concatenate(pair_feat, axis=0), np.array(\n pair_split), np.concatenate(atom_to_pair, axis=0)]\n outputs = weave(inputs)\n assert len(outputs) == 2\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n\n def test_lstm_step(self):\n \"\"\"Test invoking LSTMStep.\"\"\"\n max_depth = 5\n n_test = 5\n n_feat = 10\n y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)\n state_zero = np.random.rand(n_test, n_feat).astype(np.float32)\n state_one = np.random.rand(n_test, n_feat).astype(np.float32)\n layer = layers.LSTMStep(n_feat, 2 * n_feat)\n result = layer([y, state_zero, state_one])\n h_out, h_copy_out, c_out = result[0], result[1][0], result[1][1]\n assert h_out.shape == (n_test, n_feat)\n assert h_copy_out.shape == (n_test, n_feat)\n assert c_out.shape == (n_test, n_feat)\n assert len(layer.trainable_variables) == 1\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n\n def test_vina_free_energy(self):\n \"\"\"Test invoking VinaFreeEnergy.\"\"\"\n n_atoms = 5\n m_nbrs = 1\n ndim = 3\n nbr_cutoff = 1\n start = 0\n stop = 4\n X = np.random.rand(n_atoms, ndim).astype(np.float32)\n Z = np.random.randint(0, 2, n_atoms).astype(np.float32)\n layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer([X, Z])\n assert len(layer.trainable_variables) == 6\n assert result.shape == tuple()\n layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result2 = layer2([X, Z])\n assert not np.allclose(result, result2)\n result3 = layer([X, Z])\n assert np.allclose(result, result3)\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n\n def test_neighbor_list(self):\n \"\"\"Test invoking NeighborList.\"\"\"\n N_atoms = 5\n start = 0\n stop = 12\n nbr_cutoff = 3\n ndim = 3\n M_nbrs = 2\n coords = start + np.random.rand(N_atoms, ndim) * (stop - start)\n coords = tf.cast(tf.stack(coords), tf.float32)\n layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer(coords)\n assert result.shape == (N_atoms, M_nbrs)\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n\n def test_ani_feat(self):\n \"\"\"Test invoking ANIFeat.\"\"\"\n batch_size = 10\n max_atoms = 5\n input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)\n layer = layers.ANIFeat(max_atoms=max_atoms)\n result = layer(input)\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n\n def test_weave_layer(self):\n \"\"\"Test invoking WeaveLayer.\"\"\"\n out_channels = 2\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.WeaveFeaturizer()\n mols = featurizer.featurize(mols)\n weave = layers.WeaveLayer()\n atom_feat = []\n pair_feat = []\n atom_to_pair = []\n pair_split = []\n start = 0\n n_pair_feat = 14\n for im, mol in enumerate(mols):\n n_atoms = mol.get_num_atoms()\n C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))\n atom_to_pair.append(np.transpose(np.array([C1.flatten() + start,\n C0.flatten() + start])))\n pair_split.extend(C1.flatten() + start)\n start = start + n_atoms\n atom_feat.append(mol.get_atom_features())\n pair_feat.append(np.reshape(mol.get_pair_features(), (n_atoms *\n n_atoms, n_pair_feat)))\n inputs = [np.array(np.concatenate(atom_feat, axis=0), dtype=np.\n float32), np.concatenate(pair_feat, axis=0), np.array(\n pair_split), np.concatenate(atom_to_pair, axis=0)]\n outputs = weave(inputs)\n assert len(outputs) == 2\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n <function token>\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n\n def test_vina_free_energy(self):\n \"\"\"Test invoking VinaFreeEnergy.\"\"\"\n n_atoms = 5\n m_nbrs = 1\n ndim = 3\n nbr_cutoff = 1\n start = 0\n stop = 4\n X = np.random.rand(n_atoms, ndim).astype(np.float32)\n Z = np.random.randint(0, 2, n_atoms).astype(np.float32)\n layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer([X, Z])\n assert len(layer.trainable_variables) == 6\n assert result.shape == tuple()\n layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result2 = layer2([X, Z])\n assert not np.allclose(result, result2)\n result3 = layer([X, Z])\n assert np.allclose(result, result3)\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n\n def test_neighbor_list(self):\n \"\"\"Test invoking NeighborList.\"\"\"\n N_atoms = 5\n start = 0\n stop = 12\n nbr_cutoff = 3\n ndim = 3\n M_nbrs = 2\n coords = start + np.random.rand(N_atoms, ndim) * (stop - start)\n coords = tf.cast(tf.stack(coords), tf.float32)\n layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer(coords)\n assert result.shape == (N_atoms, M_nbrs)\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n\n def test_ani_feat(self):\n \"\"\"Test invoking ANIFeat.\"\"\"\n batch_size = 10\n max_atoms = 5\n input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)\n layer = layers.ANIFeat(max_atoms=max_atoms)\n result = layer(input)\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n\n def test_weave_layer(self):\n \"\"\"Test invoking WeaveLayer.\"\"\"\n out_channels = 2\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.WeaveFeaturizer()\n mols = featurizer.featurize(mols)\n weave = layers.WeaveLayer()\n atom_feat = []\n pair_feat = []\n atom_to_pair = []\n pair_split = []\n start = 0\n n_pair_feat = 14\n for im, mol in enumerate(mols):\n n_atoms = mol.get_num_atoms()\n C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))\n atom_to_pair.append(np.transpose(np.array([C1.flatten() + start,\n C0.flatten() + start])))\n pair_split.extend(C1.flatten() + start)\n start = start + n_atoms\n atom_feat.append(mol.get_atom_features())\n pair_feat.append(np.reshape(mol.get_pair_features(), (n_atoms *\n n_atoms, n_pair_feat)))\n inputs = [np.array(np.concatenate(atom_feat, axis=0), dtype=np.\n float32), np.concatenate(pair_feat, axis=0), np.array(\n pair_split), np.concatenate(atom_to_pair, axis=0)]\n outputs = weave(inputs)\n assert len(outputs) == 2\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n <function token>\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n\n def test_vina_free_energy(self):\n \"\"\"Test invoking VinaFreeEnergy.\"\"\"\n n_atoms = 5\n m_nbrs = 1\n ndim = 3\n nbr_cutoff = 1\n start = 0\n stop = 4\n X = np.random.rand(n_atoms, ndim).astype(np.float32)\n Z = np.random.randint(0, 2, n_atoms).astype(np.float32)\n layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer([X, Z])\n assert len(layer.trainable_variables) == 6\n assert result.shape == tuple()\n layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result2 = layer2([X, Z])\n assert not np.allclose(result, result2)\n result3 = layer([X, Z])\n assert np.allclose(result, result3)\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n\n def test_ani_feat(self):\n \"\"\"Test invoking ANIFeat.\"\"\"\n batch_size = 10\n max_atoms = 5\n input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)\n layer = layers.ANIFeat(max_atoms=max_atoms)\n result = layer(input)\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n\n def test_weave_layer(self):\n \"\"\"Test invoking WeaveLayer.\"\"\"\n out_channels = 2\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.WeaveFeaturizer()\n mols = featurizer.featurize(mols)\n weave = layers.WeaveLayer()\n atom_feat = []\n pair_feat = []\n atom_to_pair = []\n pair_split = []\n start = 0\n n_pair_feat = 14\n for im, mol in enumerate(mols):\n n_atoms = mol.get_num_atoms()\n C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))\n atom_to_pair.append(np.transpose(np.array([C1.flatten() + start,\n C0.flatten() + start])))\n pair_split.extend(C1.flatten() + start)\n start = start + n_atoms\n atom_feat.append(mol.get_atom_features())\n pair_feat.append(np.reshape(mol.get_pair_features(), (n_atoms *\n n_atoms, n_pair_feat)))\n inputs = [np.array(np.concatenate(atom_feat, axis=0), dtype=np.\n float32), np.concatenate(pair_feat, axis=0), np.array(\n pair_split), np.concatenate(atom_to_pair, axis=0)]\n outputs = weave(inputs)\n assert len(outputs) == 2\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n <function token>\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n\n def test_vina_free_energy(self):\n \"\"\"Test invoking VinaFreeEnergy.\"\"\"\n n_atoms = 5\n m_nbrs = 1\n ndim = 3\n nbr_cutoff = 1\n start = 0\n stop = 4\n X = np.random.rand(n_atoms, ndim).astype(np.float32)\n Z = np.random.randint(0, 2, n_atoms).astype(np.float32)\n layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer([X, Z])\n assert len(layer.trainable_variables) == 6\n assert result.shape == tuple()\n layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result2 = layer2([X, Z])\n assert not np.allclose(result, result2)\n result3 = layer([X, Z])\n assert np.allclose(result, result3)\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n <function token>\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n <function token>\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n <function token>\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n\n def test_vina_free_energy(self):\n \"\"\"Test invoking VinaFreeEnergy.\"\"\"\n n_atoms = 5\n m_nbrs = 1\n ndim = 3\n nbr_cutoff = 1\n start = 0\n stop = 4\n X = np.random.rand(n_atoms, ndim).astype(np.float32)\n Z = np.random.randint(0, 2, n_atoms).astype(np.float32)\n layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result = layer([X, Z])\n assert len(layer.trainable_variables) == 6\n assert result.shape == tuple()\n layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff,\n start, stop)\n result2 = layer2([X, Z])\n assert not np.allclose(result, result2)\n result3 = layer([X, Z])\n assert np.allclose(result, result3)\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n <function token>\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n <function token>\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n <function token>\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n <function token>\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n\n def test_beta_share(self):\n \"\"\"Test invoking BetaShare.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.BetaShare()\n result = layer([input1, input2])\n assert input1.shape == result.shape\n assert input2.shape == result.shape\n layer2 = layers.BetaShare()\n result2 = layer2([input1, input2])\n assert not np.allclose(result, result2)\n result3 = layer([input1, input2])\n assert np.allclose(result, result3)\n <function token>\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n <function token>\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n <function token>\n\n def test_attn_lstm_embedding(self):\n \"\"\"Test invoking AttnLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 4\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n <function token>\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n <function token>\n <function token>\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n\n def test_interatomic_l2_distances(self):\n \"\"\"Test invoking InteratomicL2Distances.\"\"\"\n atoms = 5\n neighbors = 2\n coords = np.random.rand(atoms, 3)\n neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))\n layer = layers.InteratomicL2Distances(atoms, neighbors, 3)\n result = layer([coords, neighbor_list])\n assert result.shape == (atoms, neighbors)\n for atom in range(atoms):\n for neighbor in range(neighbors):\n delta = coords[atom] - coords[neighbor_list[atom, neighbor]]\n dist2 = np.dot(delta, delta)\n assert np.allclose(dist2, result[atom, neighbor])\n <function token>\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n <function token>\n <function token>\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n <function token>\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n <function token>\n <function token>\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n <function token>\n <function token>\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n <function token>\n <function token>\n\n def test_iter_ref_lstm_embedding(self):\n \"\"\"Test invoking IterRefLSTMEmbedding.\"\"\"\n max_depth = 5\n n_test = 5\n n_support = 11\n n_feat = 10\n test = np.random.rand(n_test, n_feat).astype(np.float32)\n support = np.random.rand(n_support, n_feat).astype(np.float32)\n layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat,\n max_depth)\n test_out, support_out = layer([test, support])\n assert test_out.shape == (n_test, n_feat)\n assert support_out.shape == (n_support, n_feat)\n assert len(layer.trainable_variables) == 8\n <function token>\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n <function token>\n <function token>\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n <function token>\n <function token>\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n\n def test_graph_gather(self):\n \"\"\"Test invoking GraphGather.\"\"\"\n batch_size = 2\n n_features = 75\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphGather(batch_size)(args)\n assert result.shape == (batch_size, 2 * n_features)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n <function token>\n <function token>\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n <function token>\n <function token>\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n\n def test_sluice_loss(self):\n \"\"\"Test invoking SluiceLoss.\"\"\"\n input1 = np.ones((3, 4)).astype(np.float32)\n input2 = np.ones((2, 2)).astype(np.float32)\n result = layers.SluiceLoss()([input1, input2])\n assert np.allclose(result, 40.0)\n <function token>\n <function token>\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n\n def test_combine_mean_std(self):\n \"\"\"Test invoking CombineMeanStd.\"\"\"\n mean = np.random.rand(5, 3).astype(np.float32)\n std = np.random.rand(5, 3).astype(np.float32)\n layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)\n result1 = layer([mean, std], training=False)\n assert np.array_equal(result1, mean)\n result2 = layer([mean, std], training=True)\n assert not np.array_equal(result2, mean)\n assert np.allclose(result2, mean, atol=0.1)\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n <function token>\n <function token>\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n <function token>\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n <function token>\n <function token>\n <function token>\n\n def test_graph_pool(self):\n \"\"\"Test invoking GraphPool.\"\"\"\n n_atoms = 4\n raw_smiles = ['CCC', 'C']\n import rdkit\n mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]\n featurizer = dc.feat.graph_features.ConvMolFeaturizer()\n mols = featurizer.featurize(mols)\n multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)\n atom_features = multi_mol.get_atom_features().astype(np.float32)\n degree_slice = multi_mol.deg_slice\n membership = multi_mol.membership\n deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]\n args = [atom_features, degree_slice, membership] + deg_adjs\n result = layers.GraphPool()(args)\n assert result.shape[0] == n_atoms\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n <function token>\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_graph_cnn(self):\n \"\"\"Test invoking GraphCNN.\"\"\"\n V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)\n adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)\n layer = layers.GraphCNN(num_filters=6)\n result = layer([V, adjs])\n assert result.shape == (10, 100, 6)\n layer2 = layers.GraphCNN(num_filters=6)\n result2 = layer2([V, adjs])\n assert not np.allclose(result, result2)\n result3 = layer([V, adjs])\n assert np.allclose(result, result3)\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n <function token>\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_weighted_linear_combo(self):\n \"\"\"Test invoking WeightedLinearCombo.\"\"\"\n input1 = np.random.rand(5, 10).astype(np.float32)\n input2 = np.random.rand(5, 10).astype(np.float32)\n layer = layers.WeightedLinearCombo()\n result = layer([input1, input2])\n assert len(layer.trainable_variables) == 2\n expected = input1 * layer.trainable_variables[0\n ] + input2 * layer.trainable_variables[1]\n assert np.allclose(result, expected)\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n <function token>\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n\n def test_DAG_gather(self):\n \"\"\"Test invoking DAGGather.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 30\n n_outputs = 75\n max_atoms = 50\n layer_sizes = [100]\n layer = layers.DAGGather(n_graph_feat=n_graph_feat, n_outputs=\n n_outputs, max_atoms=max_atoms, layer_sizes=layer_sizes)\n atom_features = np.random.rand(batch_size, n_atom_feat)\n membership = np.sort(np.random.randint(0, batch_size, size=batch_size))\n outputs = layer([atom_features, membership])\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n <function token>\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n\n def test_variable(self):\n \"\"\"Test invoking Variable.\"\"\"\n value = np.random.rand(5, 4).astype(np.float32)\n layer = layers.Variable(value)\n layer.build([])\n result = layer.call([]).numpy()\n assert np.allclose(result, value)\n assert len(layer.trainable_variables) == 1\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n <function token>\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n <function token>\n\n def test_stack(self):\n \"\"\"Test invoking Stack.\"\"\"\n input1 = np.random.rand(5, 4).astype(np.float32)\n input2 = np.random.rand(5, 4).astype(np.float32)\n result = layers.Stack()([input1, input2])\n assert result.shape == (5, 2, 4)\n assert np.array_equal(input1, result[:, 0, :])\n assert np.array_equal(input2, result[:, 1, :])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n <function token>\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_DAG_layer(self):\n \"\"\"Test invoking DAGLayer.\"\"\"\n batch_size = 10\n n_graph_feat = 30\n n_atom_feat = 75\n max_atoms = 50\n layer_sizes = [100]\n atom_features = np.random.rand(batch_size, n_atom_feat)\n parents = np.random.randint(0, max_atoms, size=(batch_size,\n max_atoms, max_atoms))\n calculation_orders = np.random.randint(0, batch_size, size=(\n batch_size, max_atoms))\n calculation_masks = np.random.randint(0, 2, size=(batch_size,\n max_atoms))\n n_atoms = batch_size\n layer = layers.DAGLayer(n_graph_feat=n_graph_feat, n_atom_feat=\n n_atom_feat, max_atoms=max_atoms, layer_sizes=layer_sizes)\n outputs = layer([atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n <function token>\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_alpha_share_layer(self):\n \"\"\"Test invoking AlphaShareLayer.\"\"\"\n batch_size = 10\n length = 6\n input1 = np.random.rand(batch_size, length).astype(np.float32)\n input2 = np.random.rand(batch_size, length).astype(np.float32)\n layer = layers.AlphaShareLayer()\n result = layer([input1, input2])\n assert input1.shape == result[0].shape\n assert input2.shape == result[1].shape\n layer2 = layers.AlphaShareLayer()\n result2 = layer2([input1, input2])\n assert not np.allclose(result[0], result2[0])\n assert not np.allclose(result[1], result2[1])\n result3 = layer([input1, input2])\n assert np.allclose(result[0], result3[0])\n assert np.allclose(result[1], result3[1])\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TestLayers(test_util.TensorFlowTestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,569 |
56e363b0b374ff2cfbaacbd35fa55027b3e145c1
|
# Generated by Django 3.0 on 2019-12-10 09:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('magazines', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='magazine',
name='ebook',
field=models.FileField(null=True, upload_to=''),
),
]
|
[
"# Generated by Django 3.0 on 2019-12-10 09:16\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('magazines', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='magazine',\n name='ebook',\n field=models.FileField(null=True, upload_to=''),\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('magazines', '0001_initial')]\n operations = [migrations.AddField(model_name='magazine', name='ebook',\n field=models.FileField(null=True, upload_to=''))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('magazines', '0001_initial')]\n operations = [migrations.AddField(model_name='magazine', name='ebook',\n field=models.FileField(null=True, upload_to=''))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,570 |
32efae76663d5dd4981a37679c4432bbb8e4572d
|
#!/usr/bin/env python
#-*- coding:utf-8 _*-
'''
@author: Jiawei Adrian Li
@contact: [email protected] / [email protected]
Better to run than curse the road !
'''
# import necessary packages
import tensorflow as tf
import argparse
import sys
import io
import time
import numpy as np
from models import Model # import main model
from models.model_configs import params_model # import model parameters configuration
from data_loading.batch_iterator import batch_iterator # import batch iterator for training and evaluating
from data_loading.file_reader import process_file, vocab_builder, read_vocab_file # tools for data loading
from GPUmanager import GPUManager # gpu manager (optional)
from omnibox import * # toolkits (optional)
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') # some printing settings
np.set_printoptions(linewidth=236, precision=4, threshold=10000)
class Config:
'''
define paths, hyper params and so forth
'''
data_dir = '\DATA\WebQA.v1.0\WebQA.v1.0'
train = jpath(data_dir, 'train_dataset_PNratio_0.5.pkl')
dev = jpath(data_dir, 'dev.pkl')
test = jpath(data_dir, 'test.pkl')
vocab = jpath(data_dir, 'vocab.pkl')
#experiment settings
is_train = True
model_ckpt = 'ckpt/model_ckpt'
sim_mode = 'sim2'
max_seq_len = 512
train_epoch = 1000
train_batch_size = 512
eval_epoch = 5
def evaluate(model, sess, k,v,y, bz, mode):
'''
some evaluation function
'''
batcher = batch_iterator(k,v,y, batch_size=bz) # here we use batch_iterator to traversing whole eval data
total_loss = []
for kb, vb, yb in batcher.yield_batch(): # batch size batch
feed_dict = {model.input_k:kb,
model.input_v:vb,
model.input_y:yb}
if mode=='sim1':
loss = sess.run(model.sim1_loss, feed_dict=feed_dict)
total_loss.append(loss)
if mode == 'sim2':
loss = sess.run(model.sim2_loss, feed_dict=feed_dict)
total_loss.append(loss)
return np.mean(total_loss) # return some metrics you want
def main(config=Config):
'''
main training pipeline
:param config: config for data paths and so forth
'''
model_path = config.model_ckpt # model save path
gm = GPUManager()
with gm.auto_choice():
configProto = tf.ConfigProto(allow_soft_placement=True)
configProto.gpu_options.allow_growth = True
sess = tf.Session(config=configProto)
# construct computational Graph
# data loading
word2id = read_vocab_file(config.vocab)
k_train, v_train, y_train = process_file(config.train, word2id, config.max_seq_len)
k_dev, v_dev, y_dev = process_file(config.dev, word2id, config.max_seq_len)
k_test, v_test, y_test = process_file(config.test, word2id, config.max_seq_len)
# init model
model = Model(params_model)
# init saver
mysaver = tf.train.Saver(tf.trainable_variables())
# do training
if config.is_train:
# init all variables
sess.run(tf.global_variables_initializer())
# load old model if finetune
ckpt = tf.train.latest_checkpoint(model_path)
if ckpt is not None:
mysaver.restore(sess, ckpt)
# base loss for recording
best_loss = 100000.
train_loss = 0.0
# begin epochs iteration
for epoch in range(config.train_epoch):
epoch_total_loss = [] # record epoch average loss
count = 0
# define the batch iterators
batcher = batch_iterator(k_train, v_train, y_train, batch_size=config.train_batch_size)
for k_batch, v_batch, y_batch in batcher.yield_batch():
feed_dict = {model.input_k: k_batch,
model.input_v: v_batch,
model.input_y: y_batch}
# loss and opt
if config.sim_mode == 'sim1':
fetches = {'opt': model.opt1,
'loss': model.sim1_loss}
result = sess.run(fetches=fetches, feed_dict=feed_dict)
train_loss = result['loss']
epoch_total_loss.append(train_loss)
elif config.sim_mode == 'sim2':
fetches = {'opt': model.opt2,
'loss': model.sim2_loss}
result = sess.run(fetches=fetches, feed_dict=feed_dict)
train_loss = result['loss']
epoch_total_loss.append(train_loss)
# (optional) can also eval during batch iterations if the data is so big
print('\r[Train]:Epoch-{},batch-{}/{},current avg mse-{}'.format(epoch, count, batcher.num_batch, np.mean(epoch_total_loss)), end='')
sys.stdout.flush()
time.sleep(0.01)
if count % 100 == 0 and count != 0: # every 100 batches and exclude the first batch
dev_loss = evaluate(model,sess,k_dev, v_dev, y_dev, 512, mode)
if dev_loss < best_loss:
best_loss = dev_loss
mysaver.save(sess=sess, save_path=config.model_ckpt)
print('\nUpdated model!')
print("[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\n".format(epoch, count, batcher.num_batch, dev_loss), end='')
print("\r[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\n".format(epoch, count, batcher.num_batch, dev_loss), end='')
sys.stdout.flush()
time.sleep(0.01)
count += 1 # record batch idx
epoch_avg_loss = np.mean(epoch_total_loss)
# eval during an epoch and at the end of an epoch
if epoch % config.eval_epoch == 0:
dev_loss = evaluate(model, sess, k_dev, v_dev, y_dev, 64, config.sim_mode)
if dev_loss < best_loss:
best_loss = dev_loss
mysaver.save(sess=sess, save_path=config.model_ckpt)
print('Updated model !')
print("[Eval]: Epoch - {} , eval average mse loss: {}".format(epoch, dev_loss))
print("[train]: Epoch - {} , train average mse loss: {}".format(epoch, epoch_avg_loss))
# do testing / predicting
elif not config.is_train:
ckpt = tf.train.latest_checkpoint(model_path)
if ckpt is not None:
mysaver.restore(sess, ckpt)
else:
raise FileNotFoundError('Cannot load model ckpt, plz check model path')
test_loss = evaluate(model, sess, k_test, v_test, y_test, 64, config.sim_mode)
print("[Test]: test mse: %.4f" % (test_loss))
|
[
"#!/usr/bin/env python \n#-*- coding:utf-8 _*- \n'''\n@author: Jiawei Adrian Li\n@contact: [email protected] / [email protected]\nBetter to run than curse the road !\n'''\n\n# import necessary packages\nimport tensorflow as tf\nimport argparse\nimport sys\nimport io\nimport time\nimport numpy as np\nfrom models import Model # import main model\nfrom models.model_configs import params_model # import model parameters configuration\nfrom data_loading.batch_iterator import batch_iterator # import batch iterator for training and evaluating\nfrom data_loading.file_reader import process_file, vocab_builder, read_vocab_file # tools for data loading\nfrom GPUmanager import GPUManager # gpu manager (optional)\nfrom omnibox import * # toolkits (optional)\n\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') # some printing settings\nnp.set_printoptions(linewidth=236, precision=4, threshold=10000)\n\nclass Config:\n '''\n define paths, hyper params and so forth\n '''\n data_dir = '\\DATA\\WebQA.v1.0\\WebQA.v1.0'\n train = jpath(data_dir, 'train_dataset_PNratio_0.5.pkl')\n dev = jpath(data_dir, 'dev.pkl')\n test = jpath(data_dir, 'test.pkl')\n vocab = jpath(data_dir, 'vocab.pkl')\n #experiment settings\n is_train = True\n model_ckpt = 'ckpt/model_ckpt'\n sim_mode = 'sim2'\n max_seq_len = 512\n train_epoch = 1000\n train_batch_size = 512\n eval_epoch = 5\n\n\ndef evaluate(model, sess, k,v,y, bz, mode):\n '''\n some evaluation function\n '''\n batcher = batch_iterator(k,v,y, batch_size=bz) # here we use batch_iterator to traversing whole eval data\n total_loss = []\n for kb, vb, yb in batcher.yield_batch(): # batch size batch\n feed_dict = {model.input_k:kb,\n model.input_v:vb,\n model.input_y:yb}\n if mode=='sim1':\n loss = sess.run(model.sim1_loss, feed_dict=feed_dict)\n total_loss.append(loss)\n if mode == 'sim2':\n loss = sess.run(model.sim2_loss, feed_dict=feed_dict)\n total_loss.append(loss)\n return np.mean(total_loss) # return some metrics you want\n\n\ndef main(config=Config):\n '''\n main training pipeline\n :param config: config for data paths and so forth\n '''\n model_path = config.model_ckpt # model save path\n gm = GPUManager()\n with gm.auto_choice():\n configProto = tf.ConfigProto(allow_soft_placement=True)\n configProto.gpu_options.allow_growth = True\n sess = tf.Session(config=configProto)\n # construct computational Graph\n # data loading\n word2id = read_vocab_file(config.vocab)\n k_train, v_train, y_train = process_file(config.train, word2id, config.max_seq_len)\n k_dev, v_dev, y_dev = process_file(config.dev, word2id, config.max_seq_len)\n k_test, v_test, y_test = process_file(config.test, word2id, config.max_seq_len)\n\n # init model\n model = Model(params_model)\n\n # init saver\n mysaver = tf.train.Saver(tf.trainable_variables())\n\n # do training\n if config.is_train:\n # init all variables\n sess.run(tf.global_variables_initializer())\n # load old model if finetune\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n # base loss for recording\n best_loss = 100000.\n train_loss = 0.0\n\n # begin epochs iteration\n for epoch in range(config.train_epoch):\n epoch_total_loss = [] # record epoch average loss\n count = 0\n # define the batch iterators\n batcher = batch_iterator(k_train, v_train, y_train, batch_size=config.train_batch_size)\n for k_batch, v_batch, y_batch in batcher.yield_batch():\n feed_dict = {model.input_k: k_batch,\n model.input_v: v_batch,\n model.input_y: y_batch}\n\n # loss and opt\n if config.sim_mode == 'sim1':\n fetches = {'opt': model.opt1,\n 'loss': model.sim1_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n elif config.sim_mode == 'sim2':\n fetches = {'opt': model.opt2,\n 'loss': model.sim2_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n\n # (optional) can also eval during batch iterations if the data is so big\n print('\\r[Train]:Epoch-{},batch-{}/{},current avg mse-{}'.format(epoch, count, batcher.num_batch, np.mean(epoch_total_loss)), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n if count % 100 == 0 and count != 0: # every 100 batches and exclude the first batch\n dev_loss = evaluate(model,sess,k_dev, v_dev, y_dev, 512, mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt)\n print('\\nUpdated model!')\n print(\"[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n\".format(epoch, count, batcher.num_batch, dev_loss), end='')\n print(\"\\r[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n\".format(epoch, count, batcher.num_batch, dev_loss), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n count += 1 # record batch idx\n\n epoch_avg_loss = np.mean(epoch_total_loss)\n # eval during an epoch and at the end of an epoch\n if epoch % config.eval_epoch == 0:\n dev_loss = evaluate(model, sess, k_dev, v_dev, y_dev, 64, config.sim_mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt)\n print('Updated model !')\n print(\"[Eval]: Epoch - {} , eval average mse loss: {}\".format(epoch, dev_loss))\n print(\"[train]: Epoch - {} , train average mse loss: {}\".format(epoch, epoch_avg_loss))\n\n\n # do testing / predicting\n elif not config.is_train:\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n else:\n raise FileNotFoundError('Cannot load model ckpt, plz check model path')\n\n test_loss = evaluate(model, sess, k_test, v_test, y_test, 64, config.sim_mode)\n print(\"[Test]: test mse: %.4f\" % (test_loss))",
"<docstring token>\nimport tensorflow as tf\nimport argparse\nimport sys\nimport io\nimport time\nimport numpy as np\nfrom models import Model\nfrom models.model_configs import params_model\nfrom data_loading.batch_iterator import batch_iterator\nfrom data_loading.file_reader import process_file, vocab_builder, read_vocab_file\nfrom GPUmanager import GPUManager\nfrom omnibox import *\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\nnp.set_printoptions(linewidth=236, precision=4, threshold=10000)\n\n\nclass Config:\n \"\"\"\n define paths, hyper params and so forth\n \"\"\"\n data_dir = '\\\\DATA\\\\WebQA.v1.0\\\\WebQA.v1.0'\n train = jpath(data_dir, 'train_dataset_PNratio_0.5.pkl')\n dev = jpath(data_dir, 'dev.pkl')\n test = jpath(data_dir, 'test.pkl')\n vocab = jpath(data_dir, 'vocab.pkl')\n is_train = True\n model_ckpt = 'ckpt/model_ckpt'\n sim_mode = 'sim2'\n max_seq_len = 512\n train_epoch = 1000\n train_batch_size = 512\n eval_epoch = 5\n\n\ndef evaluate(model, sess, k, v, y, bz, mode):\n \"\"\"\n some evaluation function\n \"\"\"\n batcher = batch_iterator(k, v, y, batch_size=bz)\n total_loss = []\n for kb, vb, yb in batcher.yield_batch():\n feed_dict = {model.input_k: kb, model.input_v: vb, model.input_y: yb}\n if mode == 'sim1':\n loss = sess.run(model.sim1_loss, feed_dict=feed_dict)\n total_loss.append(loss)\n if mode == 'sim2':\n loss = sess.run(model.sim2_loss, feed_dict=feed_dict)\n total_loss.append(loss)\n return np.mean(total_loss)\n\n\ndef main(config=Config):\n \"\"\"\n main training pipeline\n :param config: config for data paths and so forth\n \"\"\"\n model_path = config.model_ckpt\n gm = GPUManager()\n with gm.auto_choice():\n configProto = tf.ConfigProto(allow_soft_placement=True)\n configProto.gpu_options.allow_growth = True\n sess = tf.Session(config=configProto)\n word2id = read_vocab_file(config.vocab)\n k_train, v_train, y_train = process_file(config.train, word2id,\n config.max_seq_len)\n k_dev, v_dev, y_dev = process_file(config.dev, word2id, config.\n max_seq_len)\n k_test, v_test, y_test = process_file(config.test, word2id, config.\n max_seq_len)\n model = Model(params_model)\n mysaver = tf.train.Saver(tf.trainable_variables())\n if config.is_train:\n sess.run(tf.global_variables_initializer())\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n best_loss = 100000.0\n train_loss = 0.0\n for epoch in range(config.train_epoch):\n epoch_total_loss = []\n count = 0\n batcher = batch_iterator(k_train, v_train, y_train,\n batch_size=config.train_batch_size)\n for k_batch, v_batch, y_batch in batcher.yield_batch():\n feed_dict = {model.input_k: k_batch, model.input_v:\n v_batch, model.input_y: y_batch}\n if config.sim_mode == 'sim1':\n fetches = {'opt': model.opt1, 'loss': model.sim1_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n elif config.sim_mode == 'sim2':\n fetches = {'opt': model.opt2, 'loss': model.sim2_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n print('\\r[Train]:Epoch-{},batch-{}/{},current avg mse-{}'\n .format(epoch, count, batcher.num_batch, np.mean(\n epoch_total_loss)), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n if count % 100 == 0 and count != 0:\n dev_loss = evaluate(model, sess, k_dev, v_dev,\n y_dev, 512, mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt\n )\n print('\\nUpdated model!')\n print(\n '[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n'\n .format(epoch, count, batcher.num_batch,\n dev_loss), end='')\n print(\n '\\r[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n'\n .format(epoch, count, batcher.num_batch,\n dev_loss), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n count += 1\n epoch_avg_loss = np.mean(epoch_total_loss)\n if epoch % config.eval_epoch == 0:\n dev_loss = evaluate(model, sess, k_dev, v_dev, y_dev, \n 64, config.sim_mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt)\n print('Updated model !')\n print('[Eval]: Epoch - {} , eval average mse loss: {}'.\n format(epoch, dev_loss))\n print('[train]: Epoch - {} , train average mse loss: {}'.\n format(epoch, epoch_avg_loss))\n elif not config.is_train:\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n else:\n raise FileNotFoundError(\n 'Cannot load model ckpt, plz check model path')\n test_loss = evaluate(model, sess, k_test, v_test, y_test, 64,\n config.sim_mode)\n print('[Test]: test mse: %.4f' % test_loss)\n",
"<docstring token>\n<import token>\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\nnp.set_printoptions(linewidth=236, precision=4, threshold=10000)\n\n\nclass Config:\n \"\"\"\n define paths, hyper params and so forth\n \"\"\"\n data_dir = '\\\\DATA\\\\WebQA.v1.0\\\\WebQA.v1.0'\n train = jpath(data_dir, 'train_dataset_PNratio_0.5.pkl')\n dev = jpath(data_dir, 'dev.pkl')\n test = jpath(data_dir, 'test.pkl')\n vocab = jpath(data_dir, 'vocab.pkl')\n is_train = True\n model_ckpt = 'ckpt/model_ckpt'\n sim_mode = 'sim2'\n max_seq_len = 512\n train_epoch = 1000\n train_batch_size = 512\n eval_epoch = 5\n\n\ndef evaluate(model, sess, k, v, y, bz, mode):\n \"\"\"\n some evaluation function\n \"\"\"\n batcher = batch_iterator(k, v, y, batch_size=bz)\n total_loss = []\n for kb, vb, yb in batcher.yield_batch():\n feed_dict = {model.input_k: kb, model.input_v: vb, model.input_y: yb}\n if mode == 'sim1':\n loss = sess.run(model.sim1_loss, feed_dict=feed_dict)\n total_loss.append(loss)\n if mode == 'sim2':\n loss = sess.run(model.sim2_loss, feed_dict=feed_dict)\n total_loss.append(loss)\n return np.mean(total_loss)\n\n\ndef main(config=Config):\n \"\"\"\n main training pipeline\n :param config: config for data paths and so forth\n \"\"\"\n model_path = config.model_ckpt\n gm = GPUManager()\n with gm.auto_choice():\n configProto = tf.ConfigProto(allow_soft_placement=True)\n configProto.gpu_options.allow_growth = True\n sess = tf.Session(config=configProto)\n word2id = read_vocab_file(config.vocab)\n k_train, v_train, y_train = process_file(config.train, word2id,\n config.max_seq_len)\n k_dev, v_dev, y_dev = process_file(config.dev, word2id, config.\n max_seq_len)\n k_test, v_test, y_test = process_file(config.test, word2id, config.\n max_seq_len)\n model = Model(params_model)\n mysaver = tf.train.Saver(tf.trainable_variables())\n if config.is_train:\n sess.run(tf.global_variables_initializer())\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n best_loss = 100000.0\n train_loss = 0.0\n for epoch in range(config.train_epoch):\n epoch_total_loss = []\n count = 0\n batcher = batch_iterator(k_train, v_train, y_train,\n batch_size=config.train_batch_size)\n for k_batch, v_batch, y_batch in batcher.yield_batch():\n feed_dict = {model.input_k: k_batch, model.input_v:\n v_batch, model.input_y: y_batch}\n if config.sim_mode == 'sim1':\n fetches = {'opt': model.opt1, 'loss': model.sim1_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n elif config.sim_mode == 'sim2':\n fetches = {'opt': model.opt2, 'loss': model.sim2_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n print('\\r[Train]:Epoch-{},batch-{}/{},current avg mse-{}'\n .format(epoch, count, batcher.num_batch, np.mean(\n epoch_total_loss)), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n if count % 100 == 0 and count != 0:\n dev_loss = evaluate(model, sess, k_dev, v_dev,\n y_dev, 512, mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt\n )\n print('\\nUpdated model!')\n print(\n '[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n'\n .format(epoch, count, batcher.num_batch,\n dev_loss), end='')\n print(\n '\\r[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n'\n .format(epoch, count, batcher.num_batch,\n dev_loss), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n count += 1\n epoch_avg_loss = np.mean(epoch_total_loss)\n if epoch % config.eval_epoch == 0:\n dev_loss = evaluate(model, sess, k_dev, v_dev, y_dev, \n 64, config.sim_mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt)\n print('Updated model !')\n print('[Eval]: Epoch - {} , eval average mse loss: {}'.\n format(epoch, dev_loss))\n print('[train]: Epoch - {} , train average mse loss: {}'.\n format(epoch, epoch_avg_loss))\n elif not config.is_train:\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n else:\n raise FileNotFoundError(\n 'Cannot load model ckpt, plz check model path')\n test_loss = evaluate(model, sess, k_test, v_test, y_test, 64,\n config.sim_mode)\n print('[Test]: test mse: %.4f' % test_loss)\n",
"<docstring token>\n<import token>\n<assignment token>\nnp.set_printoptions(linewidth=236, precision=4, threshold=10000)\n\n\nclass Config:\n \"\"\"\n define paths, hyper params and so forth\n \"\"\"\n data_dir = '\\\\DATA\\\\WebQA.v1.0\\\\WebQA.v1.0'\n train = jpath(data_dir, 'train_dataset_PNratio_0.5.pkl')\n dev = jpath(data_dir, 'dev.pkl')\n test = jpath(data_dir, 'test.pkl')\n vocab = jpath(data_dir, 'vocab.pkl')\n is_train = True\n model_ckpt = 'ckpt/model_ckpt'\n sim_mode = 'sim2'\n max_seq_len = 512\n train_epoch = 1000\n train_batch_size = 512\n eval_epoch = 5\n\n\ndef evaluate(model, sess, k, v, y, bz, mode):\n \"\"\"\n some evaluation function\n \"\"\"\n batcher = batch_iterator(k, v, y, batch_size=bz)\n total_loss = []\n for kb, vb, yb in batcher.yield_batch():\n feed_dict = {model.input_k: kb, model.input_v: vb, model.input_y: yb}\n if mode == 'sim1':\n loss = sess.run(model.sim1_loss, feed_dict=feed_dict)\n total_loss.append(loss)\n if mode == 'sim2':\n loss = sess.run(model.sim2_loss, feed_dict=feed_dict)\n total_loss.append(loss)\n return np.mean(total_loss)\n\n\ndef main(config=Config):\n \"\"\"\n main training pipeline\n :param config: config for data paths and so forth\n \"\"\"\n model_path = config.model_ckpt\n gm = GPUManager()\n with gm.auto_choice():\n configProto = tf.ConfigProto(allow_soft_placement=True)\n configProto.gpu_options.allow_growth = True\n sess = tf.Session(config=configProto)\n word2id = read_vocab_file(config.vocab)\n k_train, v_train, y_train = process_file(config.train, word2id,\n config.max_seq_len)\n k_dev, v_dev, y_dev = process_file(config.dev, word2id, config.\n max_seq_len)\n k_test, v_test, y_test = process_file(config.test, word2id, config.\n max_seq_len)\n model = Model(params_model)\n mysaver = tf.train.Saver(tf.trainable_variables())\n if config.is_train:\n sess.run(tf.global_variables_initializer())\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n best_loss = 100000.0\n train_loss = 0.0\n for epoch in range(config.train_epoch):\n epoch_total_loss = []\n count = 0\n batcher = batch_iterator(k_train, v_train, y_train,\n batch_size=config.train_batch_size)\n for k_batch, v_batch, y_batch in batcher.yield_batch():\n feed_dict = {model.input_k: k_batch, model.input_v:\n v_batch, model.input_y: y_batch}\n if config.sim_mode == 'sim1':\n fetches = {'opt': model.opt1, 'loss': model.sim1_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n elif config.sim_mode == 'sim2':\n fetches = {'opt': model.opt2, 'loss': model.sim2_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n print('\\r[Train]:Epoch-{},batch-{}/{},current avg mse-{}'\n .format(epoch, count, batcher.num_batch, np.mean(\n epoch_total_loss)), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n if count % 100 == 0 and count != 0:\n dev_loss = evaluate(model, sess, k_dev, v_dev,\n y_dev, 512, mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt\n )\n print('\\nUpdated model!')\n print(\n '[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n'\n .format(epoch, count, batcher.num_batch,\n dev_loss), end='')\n print(\n '\\r[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n'\n .format(epoch, count, batcher.num_batch,\n dev_loss), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n count += 1\n epoch_avg_loss = np.mean(epoch_total_loss)\n if epoch % config.eval_epoch == 0:\n dev_loss = evaluate(model, sess, k_dev, v_dev, y_dev, \n 64, config.sim_mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt)\n print('Updated model !')\n print('[Eval]: Epoch - {} , eval average mse loss: {}'.\n format(epoch, dev_loss))\n print('[train]: Epoch - {} , train average mse loss: {}'.\n format(epoch, epoch_avg_loss))\n elif not config.is_train:\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n else:\n raise FileNotFoundError(\n 'Cannot load model ckpt, plz check model path')\n test_loss = evaluate(model, sess, k_test, v_test, y_test, 64,\n config.sim_mode)\n print('[Test]: test mse: %.4f' % test_loss)\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n\n\nclass Config:\n \"\"\"\n define paths, hyper params and so forth\n \"\"\"\n data_dir = '\\\\DATA\\\\WebQA.v1.0\\\\WebQA.v1.0'\n train = jpath(data_dir, 'train_dataset_PNratio_0.5.pkl')\n dev = jpath(data_dir, 'dev.pkl')\n test = jpath(data_dir, 'test.pkl')\n vocab = jpath(data_dir, 'vocab.pkl')\n is_train = True\n model_ckpt = 'ckpt/model_ckpt'\n sim_mode = 'sim2'\n max_seq_len = 512\n train_epoch = 1000\n train_batch_size = 512\n eval_epoch = 5\n\n\ndef evaluate(model, sess, k, v, y, bz, mode):\n \"\"\"\n some evaluation function\n \"\"\"\n batcher = batch_iterator(k, v, y, batch_size=bz)\n total_loss = []\n for kb, vb, yb in batcher.yield_batch():\n feed_dict = {model.input_k: kb, model.input_v: vb, model.input_y: yb}\n if mode == 'sim1':\n loss = sess.run(model.sim1_loss, feed_dict=feed_dict)\n total_loss.append(loss)\n if mode == 'sim2':\n loss = sess.run(model.sim2_loss, feed_dict=feed_dict)\n total_loss.append(loss)\n return np.mean(total_loss)\n\n\ndef main(config=Config):\n \"\"\"\n main training pipeline\n :param config: config for data paths and so forth\n \"\"\"\n model_path = config.model_ckpt\n gm = GPUManager()\n with gm.auto_choice():\n configProto = tf.ConfigProto(allow_soft_placement=True)\n configProto.gpu_options.allow_growth = True\n sess = tf.Session(config=configProto)\n word2id = read_vocab_file(config.vocab)\n k_train, v_train, y_train = process_file(config.train, word2id,\n config.max_seq_len)\n k_dev, v_dev, y_dev = process_file(config.dev, word2id, config.\n max_seq_len)\n k_test, v_test, y_test = process_file(config.test, word2id, config.\n max_seq_len)\n model = Model(params_model)\n mysaver = tf.train.Saver(tf.trainable_variables())\n if config.is_train:\n sess.run(tf.global_variables_initializer())\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n best_loss = 100000.0\n train_loss = 0.0\n for epoch in range(config.train_epoch):\n epoch_total_loss = []\n count = 0\n batcher = batch_iterator(k_train, v_train, y_train,\n batch_size=config.train_batch_size)\n for k_batch, v_batch, y_batch in batcher.yield_batch():\n feed_dict = {model.input_k: k_batch, model.input_v:\n v_batch, model.input_y: y_batch}\n if config.sim_mode == 'sim1':\n fetches = {'opt': model.opt1, 'loss': model.sim1_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n elif config.sim_mode == 'sim2':\n fetches = {'opt': model.opt2, 'loss': model.sim2_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n print('\\r[Train]:Epoch-{},batch-{}/{},current avg mse-{}'\n .format(epoch, count, batcher.num_batch, np.mean(\n epoch_total_loss)), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n if count % 100 == 0 and count != 0:\n dev_loss = evaluate(model, sess, k_dev, v_dev,\n y_dev, 512, mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt\n )\n print('\\nUpdated model!')\n print(\n '[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n'\n .format(epoch, count, batcher.num_batch,\n dev_loss), end='')\n print(\n '\\r[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n'\n .format(epoch, count, batcher.num_batch,\n dev_loss), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n count += 1\n epoch_avg_loss = np.mean(epoch_total_loss)\n if epoch % config.eval_epoch == 0:\n dev_loss = evaluate(model, sess, k_dev, v_dev, y_dev, \n 64, config.sim_mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt)\n print('Updated model !')\n print('[Eval]: Epoch - {} , eval average mse loss: {}'.\n format(epoch, dev_loss))\n print('[train]: Epoch - {} , train average mse loss: {}'.\n format(epoch, epoch_avg_loss))\n elif not config.is_train:\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n else:\n raise FileNotFoundError(\n 'Cannot load model ckpt, plz check model path')\n test_loss = evaluate(model, sess, k_test, v_test, y_test, 64,\n config.sim_mode)\n print('[Test]: test mse: %.4f' % test_loss)\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n\n\nclass Config:\n \"\"\"\n define paths, hyper params and so forth\n \"\"\"\n data_dir = '\\\\DATA\\\\WebQA.v1.0\\\\WebQA.v1.0'\n train = jpath(data_dir, 'train_dataset_PNratio_0.5.pkl')\n dev = jpath(data_dir, 'dev.pkl')\n test = jpath(data_dir, 'test.pkl')\n vocab = jpath(data_dir, 'vocab.pkl')\n is_train = True\n model_ckpt = 'ckpt/model_ckpt'\n sim_mode = 'sim2'\n max_seq_len = 512\n train_epoch = 1000\n train_batch_size = 512\n eval_epoch = 5\n\n\n<function token>\n\n\ndef main(config=Config):\n \"\"\"\n main training pipeline\n :param config: config for data paths and so forth\n \"\"\"\n model_path = config.model_ckpt\n gm = GPUManager()\n with gm.auto_choice():\n configProto = tf.ConfigProto(allow_soft_placement=True)\n configProto.gpu_options.allow_growth = True\n sess = tf.Session(config=configProto)\n word2id = read_vocab_file(config.vocab)\n k_train, v_train, y_train = process_file(config.train, word2id,\n config.max_seq_len)\n k_dev, v_dev, y_dev = process_file(config.dev, word2id, config.\n max_seq_len)\n k_test, v_test, y_test = process_file(config.test, word2id, config.\n max_seq_len)\n model = Model(params_model)\n mysaver = tf.train.Saver(tf.trainable_variables())\n if config.is_train:\n sess.run(tf.global_variables_initializer())\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n best_loss = 100000.0\n train_loss = 0.0\n for epoch in range(config.train_epoch):\n epoch_total_loss = []\n count = 0\n batcher = batch_iterator(k_train, v_train, y_train,\n batch_size=config.train_batch_size)\n for k_batch, v_batch, y_batch in batcher.yield_batch():\n feed_dict = {model.input_k: k_batch, model.input_v:\n v_batch, model.input_y: y_batch}\n if config.sim_mode == 'sim1':\n fetches = {'opt': model.opt1, 'loss': model.sim1_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n elif config.sim_mode == 'sim2':\n fetches = {'opt': model.opt2, 'loss': model.sim2_loss}\n result = sess.run(fetches=fetches, feed_dict=feed_dict)\n train_loss = result['loss']\n epoch_total_loss.append(train_loss)\n print('\\r[Train]:Epoch-{},batch-{}/{},current avg mse-{}'\n .format(epoch, count, batcher.num_batch, np.mean(\n epoch_total_loss)), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n if count % 100 == 0 and count != 0:\n dev_loss = evaluate(model, sess, k_dev, v_dev,\n y_dev, 512, mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt\n )\n print('\\nUpdated model!')\n print(\n '[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n'\n .format(epoch, count, batcher.num_batch,\n dev_loss), end='')\n print(\n '\\r[Eval]:Epoch-{},batch-{}/{},eval average mse loss:{}\\n'\n .format(epoch, count, batcher.num_batch,\n dev_loss), end='')\n sys.stdout.flush()\n time.sleep(0.01)\n count += 1\n epoch_avg_loss = np.mean(epoch_total_loss)\n if epoch % config.eval_epoch == 0:\n dev_loss = evaluate(model, sess, k_dev, v_dev, y_dev, \n 64, config.sim_mode)\n if dev_loss < best_loss:\n best_loss = dev_loss\n mysaver.save(sess=sess, save_path=config.model_ckpt)\n print('Updated model !')\n print('[Eval]: Epoch - {} , eval average mse loss: {}'.\n format(epoch, dev_loss))\n print('[train]: Epoch - {} , train average mse loss: {}'.\n format(epoch, epoch_avg_loss))\n elif not config.is_train:\n ckpt = tf.train.latest_checkpoint(model_path)\n if ckpt is not None:\n mysaver.restore(sess, ckpt)\n else:\n raise FileNotFoundError(\n 'Cannot load model ckpt, plz check model path')\n test_loss = evaluate(model, sess, k_test, v_test, y_test, 64,\n config.sim_mode)\n print('[Test]: test mse: %.4f' % test_loss)\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n\n\nclass Config:\n \"\"\"\n define paths, hyper params and so forth\n \"\"\"\n data_dir = '\\\\DATA\\\\WebQA.v1.0\\\\WebQA.v1.0'\n train = jpath(data_dir, 'train_dataset_PNratio_0.5.pkl')\n dev = jpath(data_dir, 'dev.pkl')\n test = jpath(data_dir, 'test.pkl')\n vocab = jpath(data_dir, 'vocab.pkl')\n is_train = True\n model_ckpt = 'ckpt/model_ckpt'\n sim_mode = 'sim2'\n max_seq_len = 512\n train_epoch = 1000\n train_batch_size = 512\n eval_epoch = 5\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n\n\nclass Config:\n <docstring token>\n data_dir = '\\\\DATA\\\\WebQA.v1.0\\\\WebQA.v1.0'\n train = jpath(data_dir, 'train_dataset_PNratio_0.5.pkl')\n dev = jpath(data_dir, 'dev.pkl')\n test = jpath(data_dir, 'test.pkl')\n vocab = jpath(data_dir, 'vocab.pkl')\n is_train = True\n model_ckpt = 'ckpt/model_ckpt'\n sim_mode = 'sim2'\n max_seq_len = 512\n train_epoch = 1000\n train_batch_size = 512\n eval_epoch = 5\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n\n\nclass Config:\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<class token>\n<function token>\n<function token>\n"
] | false |
98,571 |
ccdc84c34f18c1f68df59d1c8cc6618d2e3b61db
|
#Parsing json file and creating many to many sqlite database
#Import libs
import sqlite3
import json
#Create database
conn = sqlite3.connect("rosterdb.sqlite")
cur = conn.cursor()
#Create new tables
cur.executescript('''
DROP TABLE IF EXISTS User;
DROP TABLE IF EXISTS Member;
DROP TABLE IF EXISTS Course;
CREATE TABLE User(
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE);
CREATE TABLE Course(
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
title TEXT UNIQUE);
CREATE TABLE Member(
user_id INTEGER,
course_id INTEGER,
role INTEGER,
PRIMARY KEY(user_id,course_id));
''')
#Read json file
fname=input("Enter file name:")
fhand=open(fname).read()
js = json.loads(fhand)
#Insert each data into database
for each in js:
name = each[0]
title = each[1]
role = each[2]
print(name,title,role)
cur.execute('''INSERT OR IGNORE INTO User(name)
VALUES (?)''',(name,))
cur.execute('''SELECT id FROM User WHERE name = ?''',(name,))
user_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Course(title)
VALUES (?)''',(title,))
cur.execute('''SELECT id FROM Course WHERE title = ?''',(title,))
course_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO
Member(user_id,course_id,role)
VALUES (?,?,?)''',(user_id,course_id,role))
conn.commit()
#Select JOIN results and print one example
cur.execute('''SELECT User.name,Member.role,Course.title FROM
User JOIN Member JOIN Course ON Member.user_id = User.id AND
Member.course_id = Course.id''')
print(cur.fetchone())
|
[
"#Parsing json file and creating many to many sqlite database\n\n#Import libs\nimport sqlite3\nimport json\n\n#Create database\nconn = sqlite3.connect(\"rosterdb.sqlite\")\ncur = conn.cursor()\n\n#Create new tables\ncur.executescript('''\nDROP TABLE IF EXISTS User;\nDROP TABLE IF EXISTS Member;\nDROP TABLE IF EXISTS Course;\n\nCREATE TABLE User(\nid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\nname TEXT UNIQUE);\n\nCREATE TABLE Course(\nid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\ntitle TEXT UNIQUE);\n\nCREATE TABLE Member(\nuser_id INTEGER,\ncourse_id INTEGER,\nrole INTEGER,\nPRIMARY KEY(user_id,course_id));\n''')\n\n#Read json file\nfname=input(\"Enter file name:\")\nfhand=open(fname).read()\njs = json.loads(fhand)\n\n#Insert each data into database\nfor each in js:\n name = each[0]\n title = each[1]\n role = each[2]\n\n print(name,title,role)\n\n cur.execute('''INSERT OR IGNORE INTO User(name)\n VALUES (?)''',(name,))\n cur.execute('''SELECT id FROM User WHERE name = ?''',(name,))\n user_id = cur.fetchone()[0]\n\n cur.execute('''INSERT OR IGNORE INTO Course(title)\n VALUES (?)''',(title,))\n cur.execute('''SELECT id FROM Course WHERE title = ?''',(title,))\n course_id = cur.fetchone()[0]\n\n cur.execute('''INSERT OR IGNORE INTO\n Member(user_id,course_id,role)\n VALUES (?,?,?)''',(user_id,course_id,role))\n\n conn.commit()\n\n#Select JOIN results and print one example\ncur.execute('''SELECT User.name,Member.role,Course.title FROM\nUser JOIN Member JOIN Course ON Member.user_id = User.id AND\nMember.course_id = Course.id''')\nprint(cur.fetchone())\n",
"import sqlite3\nimport json\nconn = sqlite3.connect('rosterdb.sqlite')\ncur = conn.cursor()\ncur.executescript(\n \"\"\"\nDROP TABLE IF EXISTS User;\nDROP TABLE IF EXISTS Member;\nDROP TABLE IF EXISTS Course;\n\nCREATE TABLE User(\nid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\nname TEXT UNIQUE);\n\nCREATE TABLE Course(\nid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\ntitle TEXT UNIQUE);\n\nCREATE TABLE Member(\nuser_id INTEGER,\ncourse_id INTEGER,\nrole INTEGER,\nPRIMARY KEY(user_id,course_id));\n\"\"\"\n )\nfname = input('Enter file name:')\nfhand = open(fname).read()\njs = json.loads(fhand)\nfor each in js:\n name = each[0]\n title = each[1]\n role = each[2]\n print(name, title, role)\n cur.execute(\"\"\"INSERT OR IGNORE INTO User(name)\n VALUES (?)\"\"\", (name,))\n cur.execute('SELECT id FROM User WHERE name = ?', (name,))\n user_id = cur.fetchone()[0]\n cur.execute(\"\"\"INSERT OR IGNORE INTO Course(title)\n VALUES (?)\"\"\", (\n title,))\n cur.execute('SELECT id FROM Course WHERE title = ?', (title,))\n course_id = cur.fetchone()[0]\n cur.execute(\n \"\"\"INSERT OR IGNORE INTO\n Member(user_id,course_id,role)\n VALUES (?,?,?)\"\"\"\n , (user_id, course_id, role))\n conn.commit()\ncur.execute(\n \"\"\"SELECT User.name,Member.role,Course.title FROM\nUser JOIN Member JOIN Course ON Member.user_id = User.id AND\nMember.course_id = Course.id\"\"\"\n )\nprint(cur.fetchone())\n",
"<import token>\nconn = sqlite3.connect('rosterdb.sqlite')\ncur = conn.cursor()\ncur.executescript(\n \"\"\"\nDROP TABLE IF EXISTS User;\nDROP TABLE IF EXISTS Member;\nDROP TABLE IF EXISTS Course;\n\nCREATE TABLE User(\nid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\nname TEXT UNIQUE);\n\nCREATE TABLE Course(\nid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\ntitle TEXT UNIQUE);\n\nCREATE TABLE Member(\nuser_id INTEGER,\ncourse_id INTEGER,\nrole INTEGER,\nPRIMARY KEY(user_id,course_id));\n\"\"\"\n )\nfname = input('Enter file name:')\nfhand = open(fname).read()\njs = json.loads(fhand)\nfor each in js:\n name = each[0]\n title = each[1]\n role = each[2]\n print(name, title, role)\n cur.execute(\"\"\"INSERT OR IGNORE INTO User(name)\n VALUES (?)\"\"\", (name,))\n cur.execute('SELECT id FROM User WHERE name = ?', (name,))\n user_id = cur.fetchone()[0]\n cur.execute(\"\"\"INSERT OR IGNORE INTO Course(title)\n VALUES (?)\"\"\", (\n title,))\n cur.execute('SELECT id FROM Course WHERE title = ?', (title,))\n course_id = cur.fetchone()[0]\n cur.execute(\n \"\"\"INSERT OR IGNORE INTO\n Member(user_id,course_id,role)\n VALUES (?,?,?)\"\"\"\n , (user_id, course_id, role))\n conn.commit()\ncur.execute(\n \"\"\"SELECT User.name,Member.role,Course.title FROM\nUser JOIN Member JOIN Course ON Member.user_id = User.id AND\nMember.course_id = Course.id\"\"\"\n )\nprint(cur.fetchone())\n",
"<import token>\n<assignment token>\ncur.executescript(\n \"\"\"\nDROP TABLE IF EXISTS User;\nDROP TABLE IF EXISTS Member;\nDROP TABLE IF EXISTS Course;\n\nCREATE TABLE User(\nid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\nname TEXT UNIQUE);\n\nCREATE TABLE Course(\nid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\ntitle TEXT UNIQUE);\n\nCREATE TABLE Member(\nuser_id INTEGER,\ncourse_id INTEGER,\nrole INTEGER,\nPRIMARY KEY(user_id,course_id));\n\"\"\"\n )\n<assignment token>\nfor each in js:\n name = each[0]\n title = each[1]\n role = each[2]\n print(name, title, role)\n cur.execute(\"\"\"INSERT OR IGNORE INTO User(name)\n VALUES (?)\"\"\", (name,))\n cur.execute('SELECT id FROM User WHERE name = ?', (name,))\n user_id = cur.fetchone()[0]\n cur.execute(\"\"\"INSERT OR IGNORE INTO Course(title)\n VALUES (?)\"\"\", (\n title,))\n cur.execute('SELECT id FROM Course WHERE title = ?', (title,))\n course_id = cur.fetchone()[0]\n cur.execute(\n \"\"\"INSERT OR IGNORE INTO\n Member(user_id,course_id,role)\n VALUES (?,?,?)\"\"\"\n , (user_id, course_id, role))\n conn.commit()\ncur.execute(\n \"\"\"SELECT User.name,Member.role,Course.title FROM\nUser JOIN Member JOIN Course ON Member.user_id = User.id AND\nMember.course_id = Course.id\"\"\"\n )\nprint(cur.fetchone())\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,572 |
88b6900a8174f26f57ed1fe1b78fa3aecd1d3afc
|
arr = eval(input())
print(arr.index(max(arr)))
|
[
"arr = eval(input())\nprint(arr.index(max(arr)))\n",
"<assignment token>\nprint(arr.index(max(arr)))\n",
"<assignment token>\n<code token>\n"
] | false |
98,573 |
306337137e4ac23146781ffa2f92f16c5aecf65c
|
from django.apps import AppConfig
class SoetConfig(AppConfig):
name = 'soet'
|
[
"from django.apps import AppConfig\n\n\nclass SoetConfig(AppConfig):\n name = 'soet'\n",
"<import token>\n\n\nclass SoetConfig(AppConfig):\n name = 'soet'\n",
"<import token>\n\n\nclass SoetConfig(AppConfig):\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,574 |
83a60a35403753713537855a7c0906e15d769b7e
|
#!/usr/bin/env python
##########################################################################
## ##
## MadWeight ##
## --------- ##
##########################################################################
## ##
## author: Mattelaer Olivier (CP3) ##
## email: [email protected] ##
## author: Artoisenet Pierre (CP3) ##
## email: [email protected] ##
## ##
##########################################################################
## ##
## license: GNU ##
## last-modif:10/06/08 ##
## ##
##########################################################################
## ##
## Content ##
## ------- ##
## +go_to_main_dir ##
## +read_card ##
## +check_for_help ##
## +Class MW_param ##
## | + init ##
## | | + init_run_opt ##
## | + check_info
## | + check_condor ##
## | + detect_SubProcess ##
## | + update_nb_card ##
## | + number_of_P_run ##
## | + set_run_opt ##
## | | + control_opt ##
## ##
##########################################################################
##
## BEGIN INCLUDE
##
import sys
import os
import re
import stat
import Cards
##
## END INCLUDE
## GLOBAL DEFINITION
num_to_tag={1:'param',2:'analyzer',3:'compilation',4:'event',5:'dir',6:'launch',7:'control',8:'collect',9:'plot',-1:'relaunch',-2:'clean',-3:'refine'}
tag_to_num={'param':1,'analyzer':2,'compilation':3,'event':4,'dir':5,'launch':6,'control':7,'collect':8,'plot':9,'relaunch':-1,'clean':-2,'refine':-3}
last_step=9
#1#########################################################################
## START CODE
#1#########################################################################
def go_to_main_dir():
""" move to main position """
pos=os.getcwd()
last=pos.split(os.sep)[-1]
if last=='bin':
os.chdir(os.pardir)
return
if last=='Python':
os.chdir(os.pardir+os.sep+os.pardir+os.sep+os.pardir)
return
list_dir=os.listdir('./')
if 'bin' in list_dir:
return
else:
print 'Error: script must be executed from the main, bin or Python directory'
sys.exit()
#1#########################################################################
def read_card(name_card):
"""put all card information in a dictionary"""
card=Cards.Card(name_card)
return card.info
#1#########################################################################
def check_for_help(opt):
""" check if the user use the -h or -help option or simply invalid option """
opt_pat=re.compile(r'''-?(?P<opt>\w*)[+-]?''',re.I)
help=0
authorized_opt=tag_to_num.keys()+['version']
for i in range(0,len(num_to_tag)):
authorized_opt+=[str(i)]
for i in range(1,len(opt)):
if opt_pat.search(opt[i]):
if opt_pat.search(opt[i]).group('opt').lower() not in authorized_opt:
try:
int(opt_pat.search(opt[i]).group('opt').lower())
except:
os.system('cat ./Source/MadWeight_File/MWP_template/Readme.txt')
sys.exit()
if opt_pat.search(opt[i]).group('opt').lower()=='version':
print 'MadWeight Version'
os.system('cat ./Source/MadWeight_File/MW_TemplateVersion.txt')
sys.exit()
#1#########################################################################
class MW_info(dict):
""" class containing all the option/information from the run """
#2#########################################################################
def __init__(self,card_name):
""" init all the param for the run """
self.mw_card=Cards.Card(card_name)
self.info=self.mw_card.info
for key,value in self.info.items():
self[key]=value
dict.__init__(self.info)
self.check_info()
#assign special value
self.nb_event=int(self.info['mw_run']['2'])
self.nb_card=self.number_of_P_run()
self.check_condor()
self.name=self.take_run_name()
self.P_listdir,self.MW_listdir=self.detect_SubProcess()
self.init_run_opt()
self.def_actif_param()
#3#########################################################################
def init_run_opt(self,value=1):
""" init all the run scheduling paramater to value """
self.run_opt={}
self.run_opt['param']=value
self.run_opt['analyzer']=value
self.run_opt['compilation']=value
self.run_opt['event']=value
self.run_opt['dir']=value
self.run_opt['launch']=value
self.run_opt['control']=value
self.run_opt['collect']=value
self.run_opt['plot']=value
self.run_opt['madweight_main']=value
self.run_opt['relaunch']=0 #only for bugging case... -> desactivate
self.run_opt['refine']=0 #only for bugging case... -> desactivate
self.run_opt['clean']=0 #dangerous... -> desactivate
self.control_opt()
#2#########################################################################
def check_info(self):
""" assign default value if not defined already and check the input type
those default value and the type are defined in MW_param_default.inc
structure of this file:
block tag type value #comment
"""
#define convertissor
def pass_in_integer(value):
return int(value)
def pass_in_logical(value):
if value in ['1','t','T','.true.']:
return 1
else:
return 0
def pass_in_float(value):
return float(value)
for line in open('./Source/MadWeight_File/Python/MW_param_default.inc'):
line=line.split('#')[0] #remove comment
splitline=line.split() #split the data
if len(splitline)!=4:
continue
#assign element
block=splitline[0].lower()
tag=splitline[1].lower()
type=splitline[2].lower()
value=splitline[3]
#check if exist -> default
try:
self[block][tag]
except:
try:
self[block][tag]=value
except:
self[block]={tag:value}
#change type
if type in ['integer','logical','float']:
self[block][tag]=eval('pass_in_'+type+'(self[block][tag])')
#2#########################################################################
def check_condor(self):
""" assign variable cluster and normalisation """
self.cluster=self.info['mw_run']['1']
self.norm_with_cross=self.info['mw_run']['4']
self.condor_req=self.info['mw_run']['11']
#type is automaticaly updated now
#self.cluster=int(condor)
#if norm_with_cross=="F":
# self.norm_with_cross=0
#else:
# self.norm_with_cross=1
#2#########################################################################
def take_run_name(self):
"""take the run name in run_card"""
name="run"
Pattern=re.compile(r'''\'(\S*)\'\s*=\s*run_tag''',re.I)
card=open("./Cards/run_card.dat")
while 1:
line=card.readline()
if line=='':
break
if Pattern.search(line):
name=Pattern.search(line).groups()[0]
break
return name
#2#########################################################################
def detect_SubProcess(self):
MW_SubProcess_list=[]
P_SubProcess_list=[]
list_dir=os.listdir("./SubProcesses/")
for name in list_dir:
try:
st = os.lstat(os.path.join("./SubProcesses/", name))
except os.error:
continue
if stat.S_ISDIR(st.st_mode):
if name[:2]=="MW":
MW_SubProcess_list.append(name)
elif self.norm_with_cross and name[0]=='P':
P_SubProcess_list.append(name)
return P_SubProcess_list,MW_SubProcess_list
#2##########################################################################
def update_nb_card(self):
"take the info from MW_runcard.dat"
self.nb_card=self.number_of_P_run()
self.def_actif_param()
#2##########################################################################
def number_of_P_run(self):
"take the info from MW_runcard.dat"
#check if we use different param_card.dat
# if self.info["mw_parameter"]["1"]=="1":
j=1
while 1:
if os.path.isfile('Cards/param_card_'+str(j)+'.dat'): j+=1
elif(j==1): return j
else: return j-1
#2##########################################################################
def load_events(self):
"detect the number of events for P and MW run"
self.P_nevents=self.info['mw_run']['5']
self.MW_nevents=self.info['mw_run']['6']
#2##########################################################################
def give_block_param_info(self):
""" return the number of modified parameter and the number of different value for each"""
nb_block=0
nb_values=[]
k=0
while 1:
k+=1
try:
self.info['mw_parameter'][str(10*k+1)]
except:
break
nb_block+=1
if type(self.info['mw_parameter'][str(10*k+3)])==list:
nb_values.append(len(self.info['mw_parameter'][str(10*k+3)]))
else:
nb_values.append(1)
return nb_block,nb_values
#3########################################################################
def CardNb_to_ParameterTag(self,num_card):
""" find from th card number, to which value for each block this card belong
num_card is the number of the card in the last generation.
card in previous generation are not accessible by this routine
(and are not related to this MadWeight card anyway)
"""
nb_block,nb_data_by_block=self.give_block_param_info()
if self['mw_parameter']['1']==2:
return [num_card-1]*len(nb_data_by_block)
tag=[]
for k in range(-1,-nb_block-1,-1):
tag.append((num_card-1)%nb_data_by_block[k])
num_card=1+(num_card-(num_card-1)%nb_data_by_block[k])/nb_data_by_block[k]
tag.reverse()
return tag
#2##########################################################################
def set_run_opt(self,option):
"""analyze option for the run"""
if len(option)>1:
self.init_run_opt(0)#put everything to false
else:
return
for i in range(1,len(option)):
if option[i][0]=='-' and option[i][-1]=='+':
num=int(option[i][1:-1])
for j in range(num,last_step+1):
self.run_opt[num_to_tag[j]]=1
elif option[i][0]=='-' and option[i][-1]=='-':
num=int(option[i][1:-1])+1
for j in range(1,num):
self.run_opt[num_to_tag[j]]=1
elif option[i][0]=='-':
num=int(option[i][1:])
for i in option[i][1:]:
self.run_opt[num_to_tag[int(i)]]=1
elif option[i][-1]=='+':
num=tag_to_num[option[i][:-1]]
for j in range(num,last_step+1):
self.run_opt[num_to_tag[j]]=1
elif option[i][-1]=='-':
num=tag_to_num[option[i][:-1]]+1
for j in range(1,num):
self.run_opt[num_to_tag[j]]=1
elif '=' in option[i]:
obj=option[i].split('=')
tag=obj[0]
value=obj[1]
self.run_opt[tag]=value
else:
self.run_opt[option[i]]=1
self.control_opt()
#3##########################################################################
def control_opt(self):
"""analyze option for the run to have coherent input"""
if self.run_opt['refine']:
self.run_opt['relaunch']=1
#check value for 'madweight_main'
for i in range(3,9)+[-1,-3]:
if self.run_opt[num_to_tag[i]]==1:
self.run_opt['madweight_main']=1
break
if self.run_opt['relaunch']==1:
self.run_opt['control']=1
#3##########################################################################
def def_actif_param(self):
""" find which card are still actif """
self.param_is_actif={}
try:
ff=open('Cards/mapping_card.dat')
except:
for i in range(1,self.nb_card+1):
self.param_is_actif[i]=1 #if no file defined all card are supose to be used
self.actif_param=range(1,self.nb_card+1)
return
self.actif_param=[]
for line in ff:
split=line.split()
nb=int(split[0])
actif=int(split[-1])
self.param_is_actif[nb]=actif
if actif:
self.actif_param.append(nb)
if len(self.param_is_actif)!=self.nb_card:
print 'WARNING: wrong mapping file'
|
[
"#!/usr/bin/env python\n##########################################################################\n## ##\n## MadWeight ##\n## --------- ##\n##########################################################################\n## ##\n## author: Mattelaer Olivier (CP3) ##\n## email: [email protected] ##\n## author: Artoisenet Pierre (CP3) ##\n## email: [email protected] ##\n## ##\n##########################################################################\n## ##\n## license: GNU ##\n## last-modif:10/06/08 ##\n## ##\n##########################################################################\n## ##\n## Content ##\n## ------- ##\n## +go_to_main_dir ##\n## +read_card ##\n## +check_for_help ##\n## +Class MW_param ##\n## | + init ##\n## | | + init_run_opt ##\n## | + check_info\n## | + check_condor ##\n## | + detect_SubProcess ##\n## | + update_nb_card ##\n## | + number_of_P_run ##\n## | + set_run_opt ##\n## | | + control_opt ##\n## ##\n##########################################################################\n##\n## BEGIN INCLUDE\n##\nimport sys\nimport os\nimport re\nimport stat\nimport Cards\n##\n## END INCLUDE\n## GLOBAL DEFINITION\n\nnum_to_tag={1:'param',2:'analyzer',3:'compilation',4:'event',5:'dir',6:'launch',7:'control',8:'collect',9:'plot',-1:'relaunch',-2:'clean',-3:'refine'}\ntag_to_num={'param':1,'analyzer':2,'compilation':3,'event':4,'dir':5,'launch':6,'control':7,'collect':8,'plot':9,'relaunch':-1,'clean':-2,'refine':-3}\nlast_step=9\n\n#1#########################################################################\n## START CODE\n#1#########################################################################\ndef go_to_main_dir():\n \"\"\" move to main position \"\"\"\n pos=os.getcwd()\n last=pos.split(os.sep)[-1]\n if last=='bin':\n os.chdir(os.pardir)\n return\n if last=='Python':\n os.chdir(os.pardir+os.sep+os.pardir+os.sep+os.pardir)\n return\n \n list_dir=os.listdir('./')\n if 'bin' in list_dir:\n return\n else:\n print 'Error: script must be executed from the main, bin or Python directory'\n \n sys.exit()\n\n\n\n#1#########################################################################\ndef read_card(name_card):\n \"\"\"put all card information in a dictionary\"\"\"\n\n card=Cards.Card(name_card)\n return card.info\n\n#1#########################################################################\ndef check_for_help(opt):\n \"\"\" check if the user use the -h or -help option or simply invalid option \"\"\"\n\n opt_pat=re.compile(r'''-?(?P<opt>\\w*)[+-]?''',re.I)\n help=0\n authorized_opt=tag_to_num.keys()+['version']\n for i in range(0,len(num_to_tag)):\n authorized_opt+=[str(i)]\n for i in range(1,len(opt)):\n if opt_pat.search(opt[i]):\n if opt_pat.search(opt[i]).group('opt').lower() not in authorized_opt:\n try:\n int(opt_pat.search(opt[i]).group('opt').lower())\n except:\n os.system('cat ./Source/MadWeight_File/MWP_template/Readme.txt')\n sys.exit()\n if opt_pat.search(opt[i]).group('opt').lower()=='version':\n print 'MadWeight Version'\n os.system('cat ./Source/MadWeight_File/MW_TemplateVersion.txt')\n sys.exit()\n\n#1#########################################################################\nclass MW_info(dict):\n \"\"\" class containing all the option/information from the run \"\"\"\n\n #2#########################################################################\n def __init__(self,card_name):\n \"\"\" init all the param for the run \"\"\"\n self.mw_card=Cards.Card(card_name)\n self.info=self.mw_card.info\n for key,value in self.info.items():\n self[key]=value\n\n dict.__init__(self.info)\n self.check_info()\n #assign special value\n self.nb_event=int(self.info['mw_run']['2'])\n self.nb_card=self.number_of_P_run()\n self.check_condor()\n self.name=self.take_run_name()\n self.P_listdir,self.MW_listdir=self.detect_SubProcess()\n self.init_run_opt()\n self.def_actif_param()\n\n\n #3#########################################################################\n def init_run_opt(self,value=1):\n \"\"\" init all the run scheduling paramater to value \"\"\"\n self.run_opt={}\n self.run_opt['param']=value\n self.run_opt['analyzer']=value\n self.run_opt['compilation']=value\n self.run_opt['event']=value\n self.run_opt['dir']=value\n self.run_opt['launch']=value\n self.run_opt['control']=value\n self.run_opt['collect']=value\n self.run_opt['plot']=value \n self.run_opt['madweight_main']=value\n self.run_opt['relaunch']=0 #only for bugging case... -> desactivate\n self.run_opt['refine']=0 #only for bugging case... -> desactivate\n self.run_opt['clean']=0 #dangerous... -> desactivate\n self.control_opt()\n\n #2#########################################################################\n def check_info(self):\n \"\"\" assign default value if not defined already and check the input type\n those default value and the type are defined in MW_param_default.inc\n structure of this file:\n block tag type value #comment\n \"\"\"\n #define convertissor\n def pass_in_integer(value):\n return int(value)\n def pass_in_logical(value):\n if value in ['1','t','T','.true.']:\n return 1\n else:\n return 0\n def pass_in_float(value):\n return float(value)\n\n for line in open('./Source/MadWeight_File/Python/MW_param_default.inc'):\n line=line.split('#')[0] #remove comment\n splitline=line.split() #split the data\n if len(splitline)!=4:\n continue\n #assign element\n block=splitline[0].lower()\n tag=splitline[1].lower()\n type=splitline[2].lower()\n value=splitline[3]\n #check if exist -> default\n try:\n self[block][tag]\n except:\n try:\n self[block][tag]=value\n except:\n self[block]={tag:value}\n #change type\n if type in ['integer','logical','float']:\n self[block][tag]=eval('pass_in_'+type+'(self[block][tag])')\n \n \n \n \n #2#########################################################################\n def check_condor(self):\n \"\"\" assign variable cluster and normalisation \"\"\"\n\n self.cluster=self.info['mw_run']['1']\n self.norm_with_cross=self.info['mw_run']['4']\n self.condor_req=self.info['mw_run']['11']\n\n #type is automaticaly updated now\n #self.cluster=int(condor)\n #if norm_with_cross==\"F\":\n # self.norm_with_cross=0\n #else:\n # self.norm_with_cross=1\n\n #2#########################################################################\n def take_run_name(self):\n \"\"\"take the run name in run_card\"\"\"\n name=\"run\"\n Pattern=re.compile(r'''\\'(\\S*)\\'\\s*=\\s*run_tag''',re.I)\n card=open(\"./Cards/run_card.dat\")\n\n while 1:\n line=card.readline()\n if line=='':\n break\n \n if Pattern.search(line):\n name=Pattern.search(line).groups()[0]\n break\n return name\n\n\n #2#########################################################################\n def detect_SubProcess(self):\n\n MW_SubProcess_list=[]\n P_SubProcess_list=[]\n\n list_dir=os.listdir(\"./SubProcesses/\")\n for name in list_dir:\n try: \n st = os.lstat(os.path.join(\"./SubProcesses/\", name))\n except os.error:\n continue\n if stat.S_ISDIR(st.st_mode):\n if name[:2]==\"MW\":\n MW_SubProcess_list.append(name)\n elif self.norm_with_cross and name[0]=='P':\n P_SubProcess_list.append(name) \n\n return P_SubProcess_list,MW_SubProcess_list\n\n #2##########################################################################\n def update_nb_card(self):\n \"take the info from MW_runcard.dat\"\n self.nb_card=self.number_of_P_run()\n self.def_actif_param()\n \n #2##########################################################################\n def number_of_P_run(self):\n \"take the info from MW_runcard.dat\"\n\n #check if we use different param_card.dat\n# if self.info[\"mw_parameter\"][\"1\"]==\"1\":\n j=1\n while 1:\n if os.path.isfile('Cards/param_card_'+str(j)+'.dat'): j+=1\n elif(j==1): return j\n else: return j-1\n \n\n\n #2##########################################################################\n def load_events(self):\n \"detect the number of events for P and MW run\"\n\n self.P_nevents=self.info['mw_run']['5']\n self.MW_nevents=self.info['mw_run']['6']\n\n \n #2##########################################################################\n def give_block_param_info(self):\n \"\"\" return the number of modified parameter and the number of different value for each\"\"\"\n\n nb_block=0\n nb_values=[]\n k=0\n while 1:\n k+=1\n try:\n self.info['mw_parameter'][str(10*k+1)]\n except:\n break\n nb_block+=1\n if type(self.info['mw_parameter'][str(10*k+3)])==list:\n nb_values.append(len(self.info['mw_parameter'][str(10*k+3)]))\n else:\n nb_values.append(1)\n\n return nb_block,nb_values\n\n #3########################################################################\n def CardNb_to_ParameterTag(self,num_card):\n \"\"\" find from th card number, to which value for each block this card belong\n num_card is the number of the card in the last generation. \n card in previous generation are not accessible by this routine\n (and are not related to this MadWeight card anyway)\n \"\"\"\n\n nb_block,nb_data_by_block=self.give_block_param_info()\n\n if self['mw_parameter']['1']==2:\n return [num_card-1]*len(nb_data_by_block)\n\n tag=[]\n for k in range(-1,-nb_block-1,-1):\n tag.append((num_card-1)%nb_data_by_block[k])\n num_card=1+(num_card-(num_card-1)%nb_data_by_block[k])/nb_data_by_block[k]\n tag.reverse()\n return tag\n\n #2##########################################################################\n def set_run_opt(self,option):\n \"\"\"analyze option for the run\"\"\"\n\n if len(option)>1:\n self.init_run_opt(0)#put everything to false\n else:\n return\n for i in range(1,len(option)):\n if option[i][0]=='-' and option[i][-1]=='+':\n num=int(option[i][1:-1])\n for j in range(num,last_step+1):\n self.run_opt[num_to_tag[j]]=1\n elif option[i][0]=='-' and option[i][-1]=='-':\n num=int(option[i][1:-1])+1\n for j in range(1,num):\n self.run_opt[num_to_tag[j]]=1\n elif option[i][0]=='-':\n num=int(option[i][1:])\n for i in option[i][1:]:\n self.run_opt[num_to_tag[int(i)]]=1\n elif option[i][-1]=='+':\n num=tag_to_num[option[i][:-1]]\n for j in range(num,last_step+1):\n self.run_opt[num_to_tag[j]]=1\n elif option[i][-1]=='-':\n num=tag_to_num[option[i][:-1]]+1\n for j in range(1,num):\n self.run_opt[num_to_tag[j]]=1\n elif '=' in option[i]:\n obj=option[i].split('=')\n tag=obj[0]\n value=obj[1]\n self.run_opt[tag]=value\n else:\n self.run_opt[option[i]]=1\n \n self.control_opt()\n\n #3##########################################################################\n def control_opt(self):\n \"\"\"analyze option for the run to have coherent input\"\"\"\n\n\n if self.run_opt['refine']:\n self.run_opt['relaunch']=1\n \n #check value for 'madweight_main'\n for i in range(3,9)+[-1,-3]:\n if self.run_opt[num_to_tag[i]]==1:\n self.run_opt['madweight_main']=1\n break\n\n if self.run_opt['relaunch']==1:\n self.run_opt['control']=1\n\n #3##########################################################################\n def def_actif_param(self):\n \"\"\" find which card are still actif \"\"\"\n\n self.param_is_actif={}\n try:\n ff=open('Cards/mapping_card.dat')\n except:\n for i in range(1,self.nb_card+1):\n self.param_is_actif[i]=1 #if no file defined all card are supose to be used\n self.actif_param=range(1,self.nb_card+1)\n return\n\n self.actif_param=[]\n for line in ff:\n split=line.split()\n nb=int(split[0])\n actif=int(split[-1])\n self.param_is_actif[nb]=actif\n if actif:\n self.actif_param.append(nb)\n\n if len(self.param_is_actif)!=self.nb_card:\n print 'WARNING: wrong mapping file'\n"
] | true |
98,575 |
4bc6a8ee8c9f8f76dc70ae05f107f31ad16f1fa7
|
import sys
import heapq
# import copy
input = sys.stdin.readline
T = int(input())
def dijkstra(start, dist): # + visited
dist[start] = 0
q = []
heapq.heappush(q, (dist[start], start))
while q:
d, now = heapq.heappop(q)
if dist[now] < d:
continue
for v, c in graph[now]:
cost = dist[now] + c
if cost < dist[v]:
dist[v] = cost
# visited[v] = copy.deepcopy(visited[now])
# visited[v].append(now)
heapq.heappush(q, (dist[v], v))
for _ in range(T):
n, m, t = map(int, input().split())
s, g, h = map(int, input().split())
res = []
graph = [[] for _ in range(n+1)]
distance = [1e9] * (n+1)
# visited = [[] for _ in range(n+1)]
# 각 정점에 도달하기까지의 경로 저장해서 비교하고싶었으나 33퍼에서 실패 :(
# 모든 도로의 길이에 2씩 곱해주고, g-h 경로만 -1해줘서
# 최종 최단 경로가 2의 배수인지 아닌지로 해당 경로 지나왔는지 판별
for _ in range(m):
a, b, d = map(int, input().split())
if (a == g and b == h) or (a == h and b == g):
d = 2*d -1
else:
d *= 2
graph[a].append((b, d))
graph[b].append((a, d))
for _ in range(t):
x = int(input())
res.append(x)
dijkstra(s, distance) # + visited
res.sort()
for i in res:
if distance[i] % 2 == 1:
# if g in visited[i] and h in visited[i]:
print(i, end=' ')
print()
|
[
"import sys\nimport heapq\n# import copy\ninput = sys.stdin.readline\n\nT = int(input())\n\ndef dijkstra(start, dist): # + visited\n\tdist[start] = 0\n\t\n\tq = []\n\theapq.heappush(q, (dist[start], start))\n\t\n\twhile q:\n\t\td, now = heapq.heappop(q)\n\t\t\n\t\tif dist[now] < d:\n\t\t\tcontinue\n\t\t\n\t\tfor v, c in graph[now]:\n\t\t\tcost = dist[now] + c\n\t\t\tif cost < dist[v]:\n\t\t\t\tdist[v] = cost\n\t\t\t\t# visited[v] = copy.deepcopy(visited[now])\n\t\t\t\t# visited[v].append(now)\n\t\t\t\theapq.heappush(q, (dist[v], v))\n\nfor _ in range(T):\n n, m, t = map(int, input().split())\n s, g, h = map(int, input().split())\n res = []\n \n graph = [[] for _ in range(n+1)]\n distance = [1e9] * (n+1)\n\t# visited = [[] for _ in range(n+1)]\n # 각 정점에 도달하기까지의 경로 저장해서 비교하고싶었으나 33퍼에서 실패 :(\n # 모든 도로의 길이에 2씩 곱해주고, g-h 경로만 -1해줘서\n # 최종 최단 경로가 2의 배수인지 아닌지로 해당 경로 지나왔는지 판별\n for _ in range(m):\n a, b, d = map(int, input().split())\n \n if (a == g and b == h) or (a == h and b == g):\n d = 2*d -1 \n else:\n d *= 2\n \n graph[a].append((b, d))\n graph[b].append((a, d))\n \n for _ in range(t):\n x = int(input())\n res.append(x)\n\t\n dijkstra(s, distance) # + visited\n\t\n res.sort()\n for i in res:\n if distance[i] % 2 == 1:\n\t\t# if g in visited[i] and h in visited[i]:\n print(i, end=' ')\t\n print()",
"import sys\nimport heapq\ninput = sys.stdin.readline\nT = int(input())\n\n\ndef dijkstra(start, dist):\n dist[start] = 0\n q = []\n heapq.heappush(q, (dist[start], start))\n while q:\n d, now = heapq.heappop(q)\n if dist[now] < d:\n continue\n for v, c in graph[now]:\n cost = dist[now] + c\n if cost < dist[v]:\n dist[v] = cost\n heapq.heappush(q, (dist[v], v))\n\n\nfor _ in range(T):\n n, m, t = map(int, input().split())\n s, g, h = map(int, input().split())\n res = []\n graph = [[] for _ in range(n + 1)]\n distance = [1000000000.0] * (n + 1)\n for _ in range(m):\n a, b, d = map(int, input().split())\n if a == g and b == h or a == h and b == g:\n d = 2 * d - 1\n else:\n d *= 2\n graph[a].append((b, d))\n graph[b].append((a, d))\n for _ in range(t):\n x = int(input())\n res.append(x)\n dijkstra(s, distance)\n res.sort()\n for i in res:\n if distance[i] % 2 == 1:\n print(i, end=' ')\n print()\n",
"<import token>\ninput = sys.stdin.readline\nT = int(input())\n\n\ndef dijkstra(start, dist):\n dist[start] = 0\n q = []\n heapq.heappush(q, (dist[start], start))\n while q:\n d, now = heapq.heappop(q)\n if dist[now] < d:\n continue\n for v, c in graph[now]:\n cost = dist[now] + c\n if cost < dist[v]:\n dist[v] = cost\n heapq.heappush(q, (dist[v], v))\n\n\nfor _ in range(T):\n n, m, t = map(int, input().split())\n s, g, h = map(int, input().split())\n res = []\n graph = [[] for _ in range(n + 1)]\n distance = [1000000000.0] * (n + 1)\n for _ in range(m):\n a, b, d = map(int, input().split())\n if a == g and b == h or a == h and b == g:\n d = 2 * d - 1\n else:\n d *= 2\n graph[a].append((b, d))\n graph[b].append((a, d))\n for _ in range(t):\n x = int(input())\n res.append(x)\n dijkstra(s, distance)\n res.sort()\n for i in res:\n if distance[i] % 2 == 1:\n print(i, end=' ')\n print()\n",
"<import token>\n<assignment token>\n\n\ndef dijkstra(start, dist):\n dist[start] = 0\n q = []\n heapq.heappush(q, (dist[start], start))\n while q:\n d, now = heapq.heappop(q)\n if dist[now] < d:\n continue\n for v, c in graph[now]:\n cost = dist[now] + c\n if cost < dist[v]:\n dist[v] = cost\n heapq.heappush(q, (dist[v], v))\n\n\nfor _ in range(T):\n n, m, t = map(int, input().split())\n s, g, h = map(int, input().split())\n res = []\n graph = [[] for _ in range(n + 1)]\n distance = [1000000000.0] * (n + 1)\n for _ in range(m):\n a, b, d = map(int, input().split())\n if a == g and b == h or a == h and b == g:\n d = 2 * d - 1\n else:\n d *= 2\n graph[a].append((b, d))\n graph[b].append((a, d))\n for _ in range(t):\n x = int(input())\n res.append(x)\n dijkstra(s, distance)\n res.sort()\n for i in res:\n if distance[i] % 2 == 1:\n print(i, end=' ')\n print()\n",
"<import token>\n<assignment token>\n\n\ndef dijkstra(start, dist):\n dist[start] = 0\n q = []\n heapq.heappush(q, (dist[start], start))\n while q:\n d, now = heapq.heappop(q)\n if dist[now] < d:\n continue\n for v, c in graph[now]:\n cost = dist[now] + c\n if cost < dist[v]:\n dist[v] = cost\n heapq.heappush(q, (dist[v], v))\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<code token>\n"
] | false |
98,576 |
203990634e13551f6c5d9f440af62d712125fa9c
|
# coding: utf-8
from __future__ import unicode_literals
def test_valid(cldf_dataset, cldf_logger):
assert cldf_dataset.validate(log=cldf_logger)
def test_forms(cldf_dataset, cldf_logger):
assert len(list(cldf_dataset["FormTable"])) == 171
def test_languages(cldf_dataset, cldf_logger):
assert len(list(cldf_dataset["LanguageTable"])) == 10
def test_sources(cldf_dataset, cldf_logger):
assert len(cldf_dataset.sources) == 1
def test_parameters(cldf_dataset, cldf_logger):
assert len(list(cldf_dataset["ParameterTable"])) == 18
def test_cognates(cldf_dataset, cldf_logger):
cogsets = {c["Cognateset_ID"] for c in cldf_dataset["CognateTable"]}
assert len(cogsets) == 39
|
[
"# coding: utf-8\nfrom __future__ import unicode_literals\n\n\ndef test_valid(cldf_dataset, cldf_logger):\n assert cldf_dataset.validate(log=cldf_logger)\n\n\ndef test_forms(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset[\"FormTable\"])) == 171\n\n\ndef test_languages(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset[\"LanguageTable\"])) == 10\n\n\ndef test_sources(cldf_dataset, cldf_logger):\n assert len(cldf_dataset.sources) == 1\n\n\ndef test_parameters(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset[\"ParameterTable\"])) == 18\n\n\ndef test_cognates(cldf_dataset, cldf_logger):\n cogsets = {c[\"Cognateset_ID\"] for c in cldf_dataset[\"CognateTable\"]}\n assert len(cogsets) == 39\n",
"from __future__ import unicode_literals\n\n\ndef test_valid(cldf_dataset, cldf_logger):\n assert cldf_dataset.validate(log=cldf_logger)\n\n\ndef test_forms(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset['FormTable'])) == 171\n\n\ndef test_languages(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset['LanguageTable'])) == 10\n\n\ndef test_sources(cldf_dataset, cldf_logger):\n assert len(cldf_dataset.sources) == 1\n\n\ndef test_parameters(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset['ParameterTable'])) == 18\n\n\ndef test_cognates(cldf_dataset, cldf_logger):\n cogsets = {c['Cognateset_ID'] for c in cldf_dataset['CognateTable']}\n assert len(cogsets) == 39\n",
"<import token>\n\n\ndef test_valid(cldf_dataset, cldf_logger):\n assert cldf_dataset.validate(log=cldf_logger)\n\n\ndef test_forms(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset['FormTable'])) == 171\n\n\ndef test_languages(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset['LanguageTable'])) == 10\n\n\ndef test_sources(cldf_dataset, cldf_logger):\n assert len(cldf_dataset.sources) == 1\n\n\ndef test_parameters(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset['ParameterTable'])) == 18\n\n\ndef test_cognates(cldf_dataset, cldf_logger):\n cogsets = {c['Cognateset_ID'] for c in cldf_dataset['CognateTable']}\n assert len(cogsets) == 39\n",
"<import token>\n\n\ndef test_valid(cldf_dataset, cldf_logger):\n assert cldf_dataset.validate(log=cldf_logger)\n\n\n<function token>\n\n\ndef test_languages(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset['LanguageTable'])) == 10\n\n\ndef test_sources(cldf_dataset, cldf_logger):\n assert len(cldf_dataset.sources) == 1\n\n\ndef test_parameters(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset['ParameterTable'])) == 18\n\n\ndef test_cognates(cldf_dataset, cldf_logger):\n cogsets = {c['Cognateset_ID'] for c in cldf_dataset['CognateTable']}\n assert len(cogsets) == 39\n",
"<import token>\n\n\ndef test_valid(cldf_dataset, cldf_logger):\n assert cldf_dataset.validate(log=cldf_logger)\n\n\n<function token>\n<function token>\n\n\ndef test_sources(cldf_dataset, cldf_logger):\n assert len(cldf_dataset.sources) == 1\n\n\ndef test_parameters(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset['ParameterTable'])) == 18\n\n\ndef test_cognates(cldf_dataset, cldf_logger):\n cogsets = {c['Cognateset_ID'] for c in cldf_dataset['CognateTable']}\n assert len(cogsets) == 39\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_sources(cldf_dataset, cldf_logger):\n assert len(cldf_dataset.sources) == 1\n\n\ndef test_parameters(cldf_dataset, cldf_logger):\n assert len(list(cldf_dataset['ParameterTable'])) == 18\n\n\ndef test_cognates(cldf_dataset, cldf_logger):\n cogsets = {c['Cognateset_ID'] for c in cldf_dataset['CognateTable']}\n assert len(cogsets) == 39\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_sources(cldf_dataset, cldf_logger):\n assert len(cldf_dataset.sources) == 1\n\n\n<function token>\n\n\ndef test_cognates(cldf_dataset, cldf_logger):\n cogsets = {c['Cognateset_ID'] for c in cldf_dataset['CognateTable']}\n assert len(cogsets) == 39\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_cognates(cldf_dataset, cldf_logger):\n cogsets = {c['Cognateset_ID'] for c in cldf_dataset['CognateTable']}\n assert len(cogsets) == 39\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,577 |
6fdc6d972f4c9cd93f7f6edffd3cc5c76988c3d8
|
import os
import re
import requests
import xlrd
import xlutils.copy
import xlwt
from bs4 import BeautifulSoup
from requests import RequestException
def get_html(url):
try:
# 添加User-Agent,放在headers中,伪装成浏览器
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
response.encoding = 'utf-8'
return response.text
return None
except RequestException as e:
print(e)
return None
def parse_html(html, url):
# 使用beautifulSoup进行解析
soup = BeautifulSoup(html, 'lxml')
# 题目
title = soup.select('[style="font-weight:bold;"]')[0].text
# 摘要
abstract = soup.select('.abstract')[0].textarea
if abstract:
abstract = abstract.text.strip()
else:
abstract = ''
# 关键词
keyword = soup.select(
'[title="知识脉络分析"][href="#"][onclick^="wfAnalysis"]') # 返回列表 ^表示以什么开头 找到title=x,href=x,οnclick=x的节点
keywords = ''
for word in keyword:
keywords = keywords + word.text + ';'
# 作者
author = soup.select('[onclick^="authorHome"]')
if author:
author = author[0].text
# 作者单位
unit = soup.select('[class^="unit_nameType"]')
if unit:
unit = unit[0].text
# 母体文献
pattern = re.compile('母体文献.*?<div class="info_right author">(.*?)</div>', re.S)
literature = re.findall(pattern, html)
if literature:
literature = literature[0]
print(literature)
# 会议名
# TODO
pattern = re.compile('会议名称.*?<div class="info_right">(.*?)</div>', re.S)
conference = pattern.findall(html)
# if len(soup.select('[href="#"][onclick^="searchResult"]')) > 0:
# conference = soup.select('[href="#"][onclick^="searchResult"]')[0].text
# else:
# conference = ''
print(conference)
# 会议时间
pattern = re.compile('会议时间.*?<div class="info_right">(.*?)</div>', re.S)
date = pattern.findall(html)
if date:
date = date[0].strip()
# 会议地点
pattern = re.compile('会议地点.*?<div class="info_right author">(.*?)</div>', re.S)
address = re.findall(pattern, html)
if address:
address = address[0].strip()
print(address)
# 主办单位
# TODO
organizer = ''#soup.select('[href="javascript:void(0)"][onclick^="searchResult"]')
# if organizer:
# organizer = organizer[0].text
print(organizer)
# 在线发表时间
pattern = re.compile('在线出版日期.*?<div class="info_right author">(.*?)</div>', re.S)
online_date = pattern.findall(html)
if online_date:
online_date = online_date[0].strip()
paper = [title, abstract, keywords, author, unit, literature, conference, date, address, organizer, online_date,
url]
print(paper)
return paper
def save_p(paper):
if not os.path.exists('会议论文.xls'):
wb = xlwt.Workbook()
sheet = wb.add_sheet('sheet1')
title = ['题目', '摘要', '关键词', '作者', '作者单位', '母体文献', '会议名称', '会议时间', '会议地点', '主办单位', '在线发表时间', '链接']
for i in range(len(title)):
sheet.write(0, i, title[i]) #在第0行写入标题
wb.save('会议论文.xls')
wb = xlrd.open_workbook('会议论文.xls')
sheet = wb.sheet_by_index(0)
rows = sheet.nrows #当前行数
print(rows)
ws = xlutils.copy.copy(wb)
sheet = ws.get_sheet(0)
for i in range(len(paper)):
sheet.write(rows, i, paper[i])
ws.save('会议论文.xls')
def main(url):
# 发送请求、获取响应
html = get_html(url)
# 解析响应
paper = parse_html(html, url)
# 数据存储
save_p(paper)
|
[
"import os\nimport re\n\nimport requests\nimport xlrd\nimport xlutils.copy\nimport xlwt\nfrom bs4 import BeautifulSoup\nfrom requests import RequestException\n\n\ndef get_html(url):\n try:\n # 添加User-Agent,放在headers中,伪装成浏览器\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'\n }\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n response.encoding = 'utf-8'\n return response.text\n return None\n except RequestException as e:\n print(e)\n return None\n\ndef parse_html(html, url):\n # 使用beautifulSoup进行解析\n soup = BeautifulSoup(html, 'lxml')\n # 题目\n title = soup.select('[style=\"font-weight:bold;\"]')[0].text\n # 摘要\n abstract = soup.select('.abstract')[0].textarea\n if abstract:\n abstract = abstract.text.strip()\n else:\n abstract = ''\n\n # 关键词\n keyword = soup.select(\n '[title=\"知识脉络分析\"][href=\"#\"][onclick^=\"wfAnalysis\"]') # 返回列表 ^表示以什么开头 找到title=x,href=x,οnclick=x的节点\n keywords = ''\n for word in keyword:\n keywords = keywords + word.text + ';'\n\n # 作者\n author = soup.select('[onclick^=\"authorHome\"]')\n if author:\n author = author[0].text\n\n # 作者单位\n unit = soup.select('[class^=\"unit_nameType\"]')\n if unit:\n unit = unit[0].text\n\n # 母体文献\n pattern = re.compile('母体文献.*?<div class=\"info_right author\">(.*?)</div>', re.S)\n literature = re.findall(pattern, html)\n if literature:\n literature = literature[0]\n print(literature)\n\n # 会议名\n # TODO\n pattern = re.compile('会议名称.*?<div class=\"info_right\">(.*?)</div>', re.S)\n conference = pattern.findall(html)\n # if len(soup.select('[href=\"#\"][onclick^=\"searchResult\"]')) > 0:\n # conference = soup.select('[href=\"#\"][onclick^=\"searchResult\"]')[0].text\n # else:\n # conference = ''\n print(conference)\n\n # 会议时间\n pattern = re.compile('会议时间.*?<div class=\"info_right\">(.*?)</div>', re.S)\n date = pattern.findall(html)\n if date:\n date = date[0].strip()\n\n # 会议地点\n pattern = re.compile('会议地点.*?<div class=\"info_right author\">(.*?)</div>', re.S)\n address = re.findall(pattern, html)\n if address:\n address = address[0].strip()\n print(address)\n\n # 主办单位\n # TODO\n organizer = ''#soup.select('[href=\"javascript:void(0)\"][onclick^=\"searchResult\"]')\n # if organizer:\n # organizer = organizer[0].text\n print(organizer)\n\n # 在线发表时间\n pattern = re.compile('在线出版日期.*?<div class=\"info_right author\">(.*?)</div>', re.S)\n online_date = pattern.findall(html)\n if online_date:\n online_date = online_date[0].strip()\n\n paper = [title, abstract, keywords, author, unit, literature, conference, date, address, organizer, online_date,\n url]\n print(paper)\n return paper\n\n\ndef save_p(paper):\n if not os.path.exists('会议论文.xls'):\n wb = xlwt.Workbook()\n sheet = wb.add_sheet('sheet1')\n title = ['题目', '摘要', '关键词', '作者', '作者单位', '母体文献', '会议名称', '会议时间', '会议地点', '主办单位', '在线发表时间', '链接']\n for i in range(len(title)):\n sheet.write(0, i, title[i]) #在第0行写入标题\n wb.save('会议论文.xls')\n wb = xlrd.open_workbook('会议论文.xls')\n sheet = wb.sheet_by_index(0)\n rows = sheet.nrows #当前行数\n print(rows)\n ws = xlutils.copy.copy(wb)\n sheet = ws.get_sheet(0)\n for i in range(len(paper)):\n sheet.write(rows, i, paper[i])\n ws.save('会议论文.xls')\n\ndef main(url):\n # 发送请求、获取响应\n html = get_html(url)\n # 解析响应\n paper = parse_html(html, url)\n # 数据存储\n save_p(paper)",
"import os\nimport re\nimport requests\nimport xlrd\nimport xlutils.copy\nimport xlwt\nfrom bs4 import BeautifulSoup\nfrom requests import RequestException\n\n\ndef get_html(url):\n try:\n headers = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'\n }\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n response.encoding = 'utf-8'\n return response.text\n return None\n except RequestException as e:\n print(e)\n return None\n\n\ndef parse_html(html, url):\n soup = BeautifulSoup(html, 'lxml')\n title = soup.select('[style=\"font-weight:bold;\"]')[0].text\n abstract = soup.select('.abstract')[0].textarea\n if abstract:\n abstract = abstract.text.strip()\n else:\n abstract = ''\n keyword = soup.select('[title=\"知识脉络分析\"][href=\"#\"][onclick^=\"wfAnalysis\"]')\n keywords = ''\n for word in keyword:\n keywords = keywords + word.text + ';'\n author = soup.select('[onclick^=\"authorHome\"]')\n if author:\n author = author[0].text\n unit = soup.select('[class^=\"unit_nameType\"]')\n if unit:\n unit = unit[0].text\n pattern = re.compile('母体文献.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n literature = re.findall(pattern, html)\n if literature:\n literature = literature[0]\n print(literature)\n pattern = re.compile('会议名称.*?<div class=\"info_right\">(.*?)</div>', re.S)\n conference = pattern.findall(html)\n print(conference)\n pattern = re.compile('会议时间.*?<div class=\"info_right\">(.*?)</div>', re.S)\n date = pattern.findall(html)\n if date:\n date = date[0].strip()\n pattern = re.compile('会议地点.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n address = re.findall(pattern, html)\n if address:\n address = address[0].strip()\n print(address)\n organizer = ''\n print(organizer)\n pattern = re.compile('在线出版日期.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n online_date = pattern.findall(html)\n if online_date:\n online_date = online_date[0].strip()\n paper = [title, abstract, keywords, author, unit, literature,\n conference, date, address, organizer, online_date, url]\n print(paper)\n return paper\n\n\ndef save_p(paper):\n if not os.path.exists('会议论文.xls'):\n wb = xlwt.Workbook()\n sheet = wb.add_sheet('sheet1')\n title = ['题目', '摘要', '关键词', '作者', '作者单位', '母体文献', '会议名称', '会议时间',\n '会议地点', '主办单位', '在线发表时间', '链接']\n for i in range(len(title)):\n sheet.write(0, i, title[i])\n wb.save('会议论文.xls')\n wb = xlrd.open_workbook('会议论文.xls')\n sheet = wb.sheet_by_index(0)\n rows = sheet.nrows\n print(rows)\n ws = xlutils.copy.copy(wb)\n sheet = ws.get_sheet(0)\n for i in range(len(paper)):\n sheet.write(rows, i, paper[i])\n ws.save('会议论文.xls')\n\n\ndef main(url):\n html = get_html(url)\n paper = parse_html(html, url)\n save_p(paper)\n",
"<import token>\n\n\ndef get_html(url):\n try:\n headers = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'\n }\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n response.encoding = 'utf-8'\n return response.text\n return None\n except RequestException as e:\n print(e)\n return None\n\n\ndef parse_html(html, url):\n soup = BeautifulSoup(html, 'lxml')\n title = soup.select('[style=\"font-weight:bold;\"]')[0].text\n abstract = soup.select('.abstract')[0].textarea\n if abstract:\n abstract = abstract.text.strip()\n else:\n abstract = ''\n keyword = soup.select('[title=\"知识脉络分析\"][href=\"#\"][onclick^=\"wfAnalysis\"]')\n keywords = ''\n for word in keyword:\n keywords = keywords + word.text + ';'\n author = soup.select('[onclick^=\"authorHome\"]')\n if author:\n author = author[0].text\n unit = soup.select('[class^=\"unit_nameType\"]')\n if unit:\n unit = unit[0].text\n pattern = re.compile('母体文献.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n literature = re.findall(pattern, html)\n if literature:\n literature = literature[0]\n print(literature)\n pattern = re.compile('会议名称.*?<div class=\"info_right\">(.*?)</div>', re.S)\n conference = pattern.findall(html)\n print(conference)\n pattern = re.compile('会议时间.*?<div class=\"info_right\">(.*?)</div>', re.S)\n date = pattern.findall(html)\n if date:\n date = date[0].strip()\n pattern = re.compile('会议地点.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n address = re.findall(pattern, html)\n if address:\n address = address[0].strip()\n print(address)\n organizer = ''\n print(organizer)\n pattern = re.compile('在线出版日期.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n online_date = pattern.findall(html)\n if online_date:\n online_date = online_date[0].strip()\n paper = [title, abstract, keywords, author, unit, literature,\n conference, date, address, organizer, online_date, url]\n print(paper)\n return paper\n\n\ndef save_p(paper):\n if not os.path.exists('会议论文.xls'):\n wb = xlwt.Workbook()\n sheet = wb.add_sheet('sheet1')\n title = ['题目', '摘要', '关键词', '作者', '作者单位', '母体文献', '会议名称', '会议时间',\n '会议地点', '主办单位', '在线发表时间', '链接']\n for i in range(len(title)):\n sheet.write(0, i, title[i])\n wb.save('会议论文.xls')\n wb = xlrd.open_workbook('会议论文.xls')\n sheet = wb.sheet_by_index(0)\n rows = sheet.nrows\n print(rows)\n ws = xlutils.copy.copy(wb)\n sheet = ws.get_sheet(0)\n for i in range(len(paper)):\n sheet.write(rows, i, paper[i])\n ws.save('会议论文.xls')\n\n\ndef main(url):\n html = get_html(url)\n paper = parse_html(html, url)\n save_p(paper)\n",
"<import token>\n\n\ndef get_html(url):\n try:\n headers = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'\n }\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n response.encoding = 'utf-8'\n return response.text\n return None\n except RequestException as e:\n print(e)\n return None\n\n\ndef parse_html(html, url):\n soup = BeautifulSoup(html, 'lxml')\n title = soup.select('[style=\"font-weight:bold;\"]')[0].text\n abstract = soup.select('.abstract')[0].textarea\n if abstract:\n abstract = abstract.text.strip()\n else:\n abstract = ''\n keyword = soup.select('[title=\"知识脉络分析\"][href=\"#\"][onclick^=\"wfAnalysis\"]')\n keywords = ''\n for word in keyword:\n keywords = keywords + word.text + ';'\n author = soup.select('[onclick^=\"authorHome\"]')\n if author:\n author = author[0].text\n unit = soup.select('[class^=\"unit_nameType\"]')\n if unit:\n unit = unit[0].text\n pattern = re.compile('母体文献.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n literature = re.findall(pattern, html)\n if literature:\n literature = literature[0]\n print(literature)\n pattern = re.compile('会议名称.*?<div class=\"info_right\">(.*?)</div>', re.S)\n conference = pattern.findall(html)\n print(conference)\n pattern = re.compile('会议时间.*?<div class=\"info_right\">(.*?)</div>', re.S)\n date = pattern.findall(html)\n if date:\n date = date[0].strip()\n pattern = re.compile('会议地点.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n address = re.findall(pattern, html)\n if address:\n address = address[0].strip()\n print(address)\n organizer = ''\n print(organizer)\n pattern = re.compile('在线出版日期.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n online_date = pattern.findall(html)\n if online_date:\n online_date = online_date[0].strip()\n paper = [title, abstract, keywords, author, unit, literature,\n conference, date, address, organizer, online_date, url]\n print(paper)\n return paper\n\n\n<function token>\n\n\ndef main(url):\n html = get_html(url)\n paper = parse_html(html, url)\n save_p(paper)\n",
"<import token>\n<function token>\n\n\ndef parse_html(html, url):\n soup = BeautifulSoup(html, 'lxml')\n title = soup.select('[style=\"font-weight:bold;\"]')[0].text\n abstract = soup.select('.abstract')[0].textarea\n if abstract:\n abstract = abstract.text.strip()\n else:\n abstract = ''\n keyword = soup.select('[title=\"知识脉络分析\"][href=\"#\"][onclick^=\"wfAnalysis\"]')\n keywords = ''\n for word in keyword:\n keywords = keywords + word.text + ';'\n author = soup.select('[onclick^=\"authorHome\"]')\n if author:\n author = author[0].text\n unit = soup.select('[class^=\"unit_nameType\"]')\n if unit:\n unit = unit[0].text\n pattern = re.compile('母体文献.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n literature = re.findall(pattern, html)\n if literature:\n literature = literature[0]\n print(literature)\n pattern = re.compile('会议名称.*?<div class=\"info_right\">(.*?)</div>', re.S)\n conference = pattern.findall(html)\n print(conference)\n pattern = re.compile('会议时间.*?<div class=\"info_right\">(.*?)</div>', re.S)\n date = pattern.findall(html)\n if date:\n date = date[0].strip()\n pattern = re.compile('会议地点.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n address = re.findall(pattern, html)\n if address:\n address = address[0].strip()\n print(address)\n organizer = ''\n print(organizer)\n pattern = re.compile('在线出版日期.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n online_date = pattern.findall(html)\n if online_date:\n online_date = online_date[0].strip()\n paper = [title, abstract, keywords, author, unit, literature,\n conference, date, address, organizer, online_date, url]\n print(paper)\n return paper\n\n\n<function token>\n\n\ndef main(url):\n html = get_html(url)\n paper = parse_html(html, url)\n save_p(paper)\n",
"<import token>\n<function token>\n\n\ndef parse_html(html, url):\n soup = BeautifulSoup(html, 'lxml')\n title = soup.select('[style=\"font-weight:bold;\"]')[0].text\n abstract = soup.select('.abstract')[0].textarea\n if abstract:\n abstract = abstract.text.strip()\n else:\n abstract = ''\n keyword = soup.select('[title=\"知识脉络分析\"][href=\"#\"][onclick^=\"wfAnalysis\"]')\n keywords = ''\n for word in keyword:\n keywords = keywords + word.text + ';'\n author = soup.select('[onclick^=\"authorHome\"]')\n if author:\n author = author[0].text\n unit = soup.select('[class^=\"unit_nameType\"]')\n if unit:\n unit = unit[0].text\n pattern = re.compile('母体文献.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n literature = re.findall(pattern, html)\n if literature:\n literature = literature[0]\n print(literature)\n pattern = re.compile('会议名称.*?<div class=\"info_right\">(.*?)</div>', re.S)\n conference = pattern.findall(html)\n print(conference)\n pattern = re.compile('会议时间.*?<div class=\"info_right\">(.*?)</div>', re.S)\n date = pattern.findall(html)\n if date:\n date = date[0].strip()\n pattern = re.compile('会议地点.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n address = re.findall(pattern, html)\n if address:\n address = address[0].strip()\n print(address)\n organizer = ''\n print(organizer)\n pattern = re.compile('在线出版日期.*?<div class=\"info_right author\">(.*?)</div>',\n re.S)\n online_date = pattern.findall(html)\n if online_date:\n online_date = online_date[0].strip()\n paper = [title, abstract, keywords, author, unit, literature,\n conference, date, address, organizer, online_date, url]\n print(paper)\n return paper\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,578 |
881589da9f237499d6d44d3a57003830e0931a82
|
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy import log, signals
# from testspiders.spiders.followall import FollowAllSpider
from spiders.zackSpider import zackSpider
from scrapy.utils.project import get_project_settings
def setup_crawler():
# spider = FollowAllSpider(domain=domain)
spider = zackSpider()
settings = get_project_settings()
crawler = Crawler(settings)
crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
crawler.configure()
crawler.crawl(spider)
crawler.start()
log.start()
reactor.run()
setup_crawler()
# for domain in ['scrapinghub.com', 'insophia.com']:
# setup_crawler(domain)
|
[
"from twisted.internet import reactor\nfrom scrapy.crawler import Crawler\nfrom scrapy import log, signals\n\n# from testspiders.spiders.followall import FollowAllSpider\n\nfrom spiders.zackSpider import zackSpider\nfrom scrapy.utils.project import get_project_settings\n\ndef setup_crawler():\n # spider = FollowAllSpider(domain=domain)\n spider = zackSpider()\n settings = get_project_settings()\n crawler = Crawler(settings)\n crawler.signals.connect(reactor.stop, signal=signals.spider_closed)\n crawler.configure()\n crawler.crawl(spider)\n crawler.start()\n\n log.start()\n reactor.run()\n\nsetup_crawler()\n# for domain in ['scrapinghub.com', 'insophia.com']:\n# setup_crawler(domain)\n",
"from twisted.internet import reactor\nfrom scrapy.crawler import Crawler\nfrom scrapy import log, signals\nfrom spiders.zackSpider import zackSpider\nfrom scrapy.utils.project import get_project_settings\n\n\ndef setup_crawler():\n spider = zackSpider()\n settings = get_project_settings()\n crawler = Crawler(settings)\n crawler.signals.connect(reactor.stop, signal=signals.spider_closed)\n crawler.configure()\n crawler.crawl(spider)\n crawler.start()\n log.start()\n reactor.run()\n\n\nsetup_crawler()\n",
"<import token>\n\n\ndef setup_crawler():\n spider = zackSpider()\n settings = get_project_settings()\n crawler = Crawler(settings)\n crawler.signals.connect(reactor.stop, signal=signals.spider_closed)\n crawler.configure()\n crawler.crawl(spider)\n crawler.start()\n log.start()\n reactor.run()\n\n\nsetup_crawler()\n",
"<import token>\n\n\ndef setup_crawler():\n spider = zackSpider()\n settings = get_project_settings()\n crawler = Crawler(settings)\n crawler.signals.connect(reactor.stop, signal=signals.spider_closed)\n crawler.configure()\n crawler.crawl(spider)\n crawler.start()\n log.start()\n reactor.run()\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
98,579 |
3b3a4987576264b81b78df3ac0e74d62b2a9403b
|
print("Hello world!")
if "a" == "a":
print("\"a\" is definitely the letter \'a\'")
print("Bye!")
|
[
"print(\"Hello world!\")\n\nif \"a\" == \"a\":\n print(\"\\\"a\\\" is definitely the letter \\'a\\'\")\n\nprint(\"Bye!\")\n",
"print('Hello world!')\nif 'a' == 'a':\n print('\"a\" is definitely the letter \\'a\\'')\nprint('Bye!')\n",
"<code token>\n"
] | false |
98,580 |
2b5f62fa08f547ee121af2c75cf89103f51a4bf9
|
#Write a program that accepts a sentence and
# calculate the number of letters and digits.
class A:
def tu(self):
count=0
count1=0
p=raw_input("Enter the number and digits\n")
for i in p.split(' '):
#print i.isalpha()
for j in i:
#print j
if j.isalpha():
#print len(j)
count+=len(j)
#print count
elif (j).isdigit():
#print j
#print len(j)
count1+=len(j)
#print count1
print count
print count1
def main():
l=A()
l.tu()
if __name__=="__main__":
main()
|
[
"#Write a program that accepts a sentence and\n# calculate the number of letters and digits.\n\n\nclass A:\n def tu(self):\n count=0\n count1=0\n p=raw_input(\"Enter the number and digits\\n\")\n for i in p.split(' '):\n #print i.isalpha()\n for j in i:\n #print j\n if j.isalpha():\n #print len(j)\n count+=len(j)\n #print count\n elif (j).isdigit():\n #print j\n #print len(j)\n count1+=len(j)\n #print count1\n print count\n print count1\n\n\ndef main():\n l=A()\n l.tu()\n\n\nif __name__==\"__main__\":\n main()"
] | true |
98,581 |
366121d723b1f585b5db1d066adf812c4c67ff18
|
# Problem 677 A - Vanya and Fence
# input
n, h = map(int, input().split())
a_nums = list(map(int, input().split()))
# initialization
ans = 0
# count
for a in a_nums:
if a>h:
ans += 2
else:
ans += 1
# output
print(ans)
|
[
"# Problem 677 A - Vanya and Fence\n\n# input\nn, h = map(int, input().split())\na_nums = list(map(int, input().split()))\n\n# initialization\nans = 0\n\n# count\nfor a in a_nums:\n if a>h:\n ans += 2\n else:\n ans += 1\n\n# output\nprint(ans)\n",
"n, h = map(int, input().split())\na_nums = list(map(int, input().split()))\nans = 0\nfor a in a_nums:\n if a > h:\n ans += 2\n else:\n ans += 1\nprint(ans)\n",
"<assignment token>\nfor a in a_nums:\n if a > h:\n ans += 2\n else:\n ans += 1\nprint(ans)\n",
"<assignment token>\n<code token>\n"
] | false |
98,582 |
ee99f03f8204458beb23b6bed40f5329861b00af
|
# -*- coding: utf-8 -*-
"""Tests for `chi_pet` package."""
import pytest
import random
from chi_pet import chi_pet
@pytest.fixture
def generate_numbers():
"""Sample pytest fixture. Generates list of random integers.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
return random.sample(range(100), 10)
|
[
"# -*- coding: utf-8 -*-\n\"\"\"Tests for `chi_pet` package.\"\"\"\n\nimport pytest\nimport random\n\nfrom chi_pet import chi_pet\n\n\[email protected]\ndef generate_numbers():\n \"\"\"Sample pytest fixture. Generates list of random integers.\n\n See more at: http://doc.pytest.org/en/latest/fixture.html\n \"\"\"\n\n return random.sample(range(100), 10)\n",
"<docstring token>\nimport pytest\nimport random\nfrom chi_pet import chi_pet\n\n\[email protected]\ndef generate_numbers():\n \"\"\"Sample pytest fixture. Generates list of random integers.\n\n See more at: http://doc.pytest.org/en/latest/fixture.html\n \"\"\"\n return random.sample(range(100), 10)\n",
"<docstring token>\n<import token>\n\n\[email protected]\ndef generate_numbers():\n \"\"\"Sample pytest fixture. Generates list of random integers.\n\n See more at: http://doc.pytest.org/en/latest/fixture.html\n \"\"\"\n return random.sample(range(100), 10)\n",
"<docstring token>\n<import token>\n<function token>\n"
] | false |
98,583 |
11041299f66100693cd22deb91c03bfa5360a729
|
""" wicd - wireless connection daemon implementation.
This module implements the wicd daemon that provides network
connection management, for both wireless and wired networks. The daemon
must be run as root to control the networks, however the user interface
components should be run as a normal user.
class LogWriter() -- Class to redirect stdout and stderr to a log file.
class ConnectionWizard() -- DBUS interface to manage the network.
class ConnectionStatus() -- Updates the current connection state
def usage() -- Print usage information.
def daemonize() -- Daemonize the current process with a double fork.
def main() -- The wicd daemon main loop.
"""
import os
import sys
import time
import getopt
import ConfigParser
import signal
import gobject
import dbus
import dbus.service
if getattr(dbus, 'version', (0, 0, 0)) < (0, 80, 0):
import dbus.glib
else:
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
import wicd.wpath as wpath
import wicd.networking as networking
import wicd.misc as misc
from wicd.misc import noneToBlankString
if __name__ == '__main__':
wpath.chdir(__file__)
misc.RenameProcess("wicd")
logging_enabled = True
class LogWriter:
""" A class to provide timestamped logging. """
def __init__(self):
if not os.path.exists(wpath.log + 'wicd.log'):
os.system("mkdir -p %s" % wpath.log)
self.file = open(wpath.log + 'wicd.log', 'w')
try:
os.chmod(wpath.log + 'wicd.log', 0600)
except:
print 'unable to chmod the log 0600'
self.eol = True
def __del__(self):
self.file.close()
def flush(self):
self.file.flush()
def write(self, data):
""" Writes the data to the log with a timestamp.
This function handles writing of data to a log file. In order to
handle output redirection, we need to be careful with how we
handle the addition of timestamps. In any set of data that is
written, we replace the newlines with a timestamp + new line,
except for newlines that are the final character in data.
When a newline is the last character in data, we set a flag to
indicate that the next write should have a timestamp prepended
as well, which ensures that the timestamps match the time at
which the data is written, rather than the previous write.
Keyword arguments:
data -- The string to write to the log.
"""
global logging_enabled
data = misc.to_unicode(data)
if len(data) <= 0: return
if logging_enabled:
if self.eol:
self.file.write(self.get_time() + ' :: ')
self.eol = False
if data[-1] == '\n':
self.eol = True
data = data[:-1]
self.file.write(
data.replace('\n', '\n' + self.get_time() + ' :: '))
if self.eol: self.file.write('\n')
self.file.flush()
def get_time(self):
""" Return a string with the current time nicely formatted.
The format of the returned string is yyyy/mm/dd HH:MM:SS
"""
x = time.localtime()
return ''.join([
str(x[0]).rjust(4, '0'), '/', str(x[1]).rjust(2, '0'), '/',
str(x[2]).rjust(2, '0'), ' ', str(x[3]).rjust(2, '0'), ':',
str(x[4]).rjust(2, '0'), ':', str(x[5]).rjust(2, '0')])
class ConnectionWizard(dbus.service.Object):
def __init__(self, bus_name, object_path='/org/wicd/daemon',
auto_connect=True):
dbus.service.Object.__init__(self, bus_name, object_path)
self.app_conf = wpath.etc + 'manager-settings.conf'
self.wireless_conf = wpath.etc + 'wireless-settings.conf'
self.wired_conf = wpath.etc + 'wired-settings.conf'
self.hidden_essid = None
self.wifi = networking.Wireless()
self.wired = networking.Wired()
self.forced_disconnect = False
self.need_profile_chooser = False
self.current_interface = None
self.vpn_session = None
self.gui_open = False
self.suspended = False
self.connection_state = misc.NOT_CONNECTED
self.connection_info = [""]
self.auto_connecting = False
self.dhcp_client = 0
self.link_detect_tool = 0
self.flush_tool = 0
self.ReadConfig()
self.LastScan = ''
self.WiredNetwork = {}
self.wifi.wireless_interface = self.GetWirelessInterface()
self.wired.wired_interface = self.GetWiredInterface()
self.wifi.LoadInterfaces()
self.wired.LoadInterfaces()
self.wifi.liface = self.wired.liface
self.wired.wiface = self.wifi.wiface
if auto_connect:
print "autoconnecting...", str(self.GetWirelessInterface())
self.AutoConnect(True)
else:
self.Scan()
self.SetForcedDisconnect(True)
print "--no-autoconnect detected, not autoconnecting..."
@dbus.service.method('org.wicd.daemon')
def Hello(self):
""" Returns the version number.
This number is major-minor-micro. Major is only incremented if minor
reaches > 9. Minor is incremented if changes that break core stucture
are implemented. Micro is for everything else, and micro may be
anything >= 0. This number is effective starting wicd v1.2.0.
"""
version = '1.5.3'
print 'returned version number', version
return version
@dbus.service.method('org.wicd.daemon')
def SetWiredInterface(self, interface):
""" Sets the wired interface for the daemon to use. """
print "setting wired interface %s" % (str(interface))
self.wired.wired_interface = noneToBlankString(interface)
self.wifi.wired_interface = noneToBlankString(interface)
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings","wired_interface", interface)
config.write(open(self.app_conf, "w"))
@dbus.service.method('org.wicd.daemon')
def SetWirelessInterface(self, interface):
""" Sets the wireless interface the daemon will use. """
print "setting wireless interface %s" % (str(interface))
self.wifi.wireless_interface = noneToBlankString(interface)
self.wired.wireless_interface = noneToBlankString(interface)
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings","wireless_interface", interface)
configfile = open(self.app_conf, "w")
config.write(configfile)
@dbus.service.method('org.wicd.daemon')
def SetWPADriver(self, driver):
""" Sets the wpa driver the wpa_supplicant will use. """
print "setting wpa driver", str(driver)
self.wifi.wpa_driver = driver
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings","wpa_driver",driver)
configfile = open(self.app_conf, "w")
config.write(configfile)
@dbus.service.method('org.wicd.daemon')
def SetUseGlobalDNS(self, use):
""" Sets a boolean which determines if global DNS is enabled. """
print 'setting use global dns to', use
use = misc.to_bool(use)
print 'setting use global dns to boolean', use
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings", "use_global_dns", use)
self.use_global_dns = use
self.wifi.use_global_dns = use
self.wired.use_global_dns = use
configfile = open(self.app_conf, "w")
config.write(configfile)
@dbus.service.method('org.wicd.daemon')
def SetGlobalDNS(self, dns1=None, dns2=None, dns3=None):
""" Sets the global dns addresses. """
print "setting global dns"
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings", "global_dns_1", misc.noneToString(dns1))
self.dns1 = dns1
self.wifi.global_dns_1 = dns1
self.wired.global_dns_1 = dns1
config.set("Settings", "global_dns_2", misc.noneToString(dns2))
self.dns2 = dns2
self.wifi.global_dns_2 = dns2
self.wired.global_dns_2 = dns2
config.set("Settings", "global_dns_3", misc.noneToString(dns3))
self.dns3 = dns3
self.wifi.global_dns_3 = dns3
self.wired.global_dns_3 = dns3
print 'global dns servers are', dns1, dns2, dns3
configfile = open(self.app_conf, "w")
config.write(configfile)
@dbus.service.method('org.wicd.daemon')
def GetUseGlobalDNS(self):
""" Returns a boolean that determines if global dns is enabled. """
return bool(self.use_global_dns)
@dbus.service.method('org.wicd.daemon')
def GetWPADriver(self):
""" Returns the wpa driver the daemon is using. """
return str(self.wifi.wpa_driver)
@dbus.service.method('org.wicd.daemon')
def GetWiredInterface(self):
""" Returns the wired interface. """
return str(self.wired.wired_interface)
@dbus.service.method('org.wicd.daemon')
def GetWirelessInterface(self):
""" Returns the wireless interface the daemon is using. """
return str(self.wifi.wireless_interface)
@dbus.service.method('org.wicd.daemon')
def SetDebugMode(self, debug):
""" Sets if debugging mode is on or off. """
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings", "debug_mode", debug)
configfile = open(self.app_conf, "w")
config.write(configfile)
self.debug_mode = misc.to_bool(debug)
self.wifi.debug = self.debug_mode
self.wired.debug = self.debug_mode
@dbus.service.method('org.wicd.daemon')
def GetDebugMode(self):
""" Returns whether debugging is enabled. """
return bool(self.debug_mode)
@dbus.service.method('org.wicd.daemon')
def Disconnect(self):
""" Disconnects all networks. """
self.SetForcedDisconnect(True)
self.wifi.Disconnect()
self.wired.Disconnect()
@dbus.service.method('org.wicd.daemon')
def GetSignalDisplayType(self):
""" Returns the signal display type.
Returns either 0 or 1.
0 for signal strength as a percentage
1 for signal strength measured in dBm
"""
return int(self.signal_display_type)
@dbus.service.method('org.wicd.daemon')
def SetSignalDisplayType(self, value):
""" Sets the signal display type and writes it the wicd config file. """
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings", "signal_display_type", value)
configfile = open(self.app_conf, "w")
config.write(configfile)
self.signal_display_type = int(value)
@dbus.service.method('org.wicd.daemon')
def FormatSignalForPrinting(self, signal):
""" Returns the suffix to display after the signal strength number. """
if self.GetSignalDisplayType() == 1:
return (signal + " dBm")
else:
return (signal + "%")
@dbus.service.method('org.wicd.daemon')
def SetSuspend(self, val):
""" Toggles whether or not monitoring connection status is suspended """
self.suspended = val
if self.suspended:
self.Disconnect()
@dbus.service.method('org.wicd.daemon')
def GetSuspend(self):
""" Returns True if the computer is in the suspend state. """
return self.suspended
@dbus.service.method('org.wicd.daemon')
def AutoConnect(self, fresh):
""" Attempts to autoconnect to a wired or wireless network.
Autoconnect will first try to connect to a wired network, if that
fails it tries a wireless connection.
"""
if fresh:
self.Scan()
if self.CheckPluggedIn():
self._wired_autoconnect()
else:
self._wireless_autoconnect()
def _wired_autoconnect(self):
""" Attempts to autoconnect to a wired network. """
if self.GetWiredAutoConnectMethod() == 2 and \
not self.GetNeedWiredProfileChooser():
self.LaunchChooser()
return
elif self.GetWiredAutoConnectMethod() == 1:
network = self.GetDefaultWiredNetwork()
if not network:
print "Couldn't find a default wired connection," + \
" wired autoconnect failed."
self._wireless_autoconnect()
return
else:
network = self.GetLastUsedWiredNetwork()
if not network:
print "no previous wired profile available, wired " + \
"autoconnect failed."
self._wireless_autoconnect()
return
self.ReadWiredNetworkProfile(network)
self.ConnectWired()
print "Attempting to autoconnect with wired interface..."
self.auto_connecting = True
time.sleep(1.5)
try:
gobject.timeout_add_seconds(3, self._monitor_wired_autoconnect)
except:
gobject.timeout_add(3000, self._monitor_wired_autoconnect)
def _wireless_autoconnect(self):
""" Attempts to autoconnect to a wireless network. """
print "No wired connection present, attempting to autoconnect" + \
"to wireless network"
if self.GetWirelessInterface() is None:
print 'Autoconnect failed because wireless interface returned None'
return
for x, network in enumerate(self.LastScan):
if bool(network["has_profile"]):
if self.debug_mode:
print network["essid"] + ' has profile'
if bool(network.get('automatic')):
print 'trying to automatically connect to...' + \
network["essid"]
self.ConnectWireless(x)
time.sleep(1)
return
print "Unable to autoconnect, you'll have to manually connect"
def _monitor_wired_autoconnect(self):
if self.CheckIfWiredConnecting():
return True
elif self.GetWiredIP():
self.auto_connecting = False
return False
elif not self.CheckIfWirelessConnecting():
self._wireless_autoconnect()
self.auto_connecting = False
return False
@dbus.service.method('org.wicd.daemon')
def GetAutoReconnect(self):
""" Returns the value of self.auto_reconnect. See SetAutoReconnect. """
do = bool(self.auto_reconnect)
return self.__printReturn('returning automatically reconnect when ' \
+ 'connection drops', do)
@dbus.service.method('org.wicd.daemon')
def SetAutoReconnect(self, value):
""" Sets the value of self.auto_reconnect.
If True, wicd will try to reconnect as soon as it detects that
an internet connection is lost. If False, it will do nothing,
and wait for the user to initiate reconnection.
"""
print 'setting automatically reconnect when connection drops'
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings", "auto_reconnect", misc.to_bool(value))
config.write(open(self.app_conf, "w"))
self.auto_reconnect = misc.to_bool(value)
@dbus.service.method('org.wicd.daemon')
def GetGlobalDNSAddresses(self):
""" Returns the global dns addresses. """
return (misc.noneToString(self.dns1), misc.noneToString(self.dns2),
misc.noneToString(self.dns3))
@dbus.service.method('org.wicd.daemon')
def CheckIfConnecting(self):
""" Returns if a network connection is being made. """
if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():
return True
else:
return False
@dbus.service.method('org.wicd.daemon')
def CancelConnect(self):
""" Cancels the wireless connection attempt """
print 'canceling connection attempt'
if self.wifi.connecting_thread:
self.wifi.connecting_thread.should_die = True
if self.wired.connecting_thread:
self.wired.connecting_thread.should_die = True
misc.Run("killall dhclient dhclient3 wpa_supplicant")
@dbus.service.method('org.wicd.daemon')
def GetCurrentInterface(self):
""" Returns the active interface """
return self.current_interface
@dbus.service.method('org.wicd.daemon')
def SetCurrentInterface(self, iface):
""" Sets the current active interface """
self.current_interface = str(iface)
@dbus.service.method('org.wicd.daemon')
def SetNeedWiredProfileChooser(self, val):
""" Sets the need_wired_profile_chooser variable.
If set to True, that alerts the wicd frontend to display the chooser,
if False the frontend will do nothing. This function is only needed
when the frontend starts up, to determine if the chooser was requested
before the frontend was launched.
"""
self.need_profile_chooser = misc.to_bool(val)
@dbus.service.method('org.wicd.daemon')
def ShouldAutoReconnect(self):
""" Returns True if it's the right time to try autoreconnecting. """
if self.GetAutoReconnect() and not self.CheckIfConnecting() and \
not self.GetForcedDisconnect() and not self.auto_connecting:
return True
else:
return False
@dbus.service.method('org.wicd.daemon')
def GetForcedDisconnect(self):
""" Returns the forced_disconnect status. See SetForcedDisconnect. """
return bool(self.forced_disconnect)
@dbus.service.method('org.wicd.daemon')
def SetForcedDisconnect(self, value):
""" Sets the forced_disconnect status.
Set to True when a user manually disconnects or cancels a connection.
It gets set to False as soon as the connection process is manually
started.
"""
self.forced_disconnect = bool(value)
@dbus.service.method('org.wicd.daemon')
def GetGUIOpen(self):
""" Returns the value of gui_open.
Returns the vlaue of gui_open, which is a boolean that keeps track
of the state of the wicd GUI. If the GUI is open, wicd will not
try to automatically reconnect to networks, as this behavior can
be annoying for the user while trying to use the GUI.
NOTE: It's possible for this to become out of sync, particularly if
the wicd.py is not exited properly while the GUI is open. We should
probably implement some kind of pid system to do it properly.
ANOTHER NOTE: This isn't implemented yet!
"""
return bool(self.gui_open)
@dbus.service.method('org.wicd.daemon')
def SetGUIOpen(self, val):
""" Sets the value of gui_open. """
self.gui_open = bool(val)
@dbus.service.method('org.wicd.daemon')
def SetConnectionStatus(self, state, info):
""" Sets the connection status.
Keyword arguments:
state - An int representing the state of the connection as defined
in misc.py.
info - a list of strings containing data about the connection state.
The contents of this list are dependent on the connection state.
state - info contents:
NOT_CONNECTED - info[0] = ""
CONNECTING - info[0] = "wired" or "wireless"
info[1] = None if wired, an essid if wireless
WIRED - info[0] = IP Adresss
WIRELESS - info[0] = IP Address
info[1] = essid
info[2] = signal strength
info[3] = internal networkid
SUSPENDED - info[0] = ""
"""
self.connection_state = state
self.connection_info = info
@dbus.service.method('org.wicd.daemon', out_signature='(uas)')
def GetConnectionStatus(self):
return [self.connection_state, self.connection_info]
@dbus.service.method('org.wicd.daemon')
def GetNeedWiredProfileChooser(self):
""" Returns need_profile_chooser.
Returns a boolean specifying if the wired profile chooser needs to
be launched.
"""
return bool(self.need_profile_chooser)
@dbus.service.method('org.wicd.daemon')
def GetDHCPClient(self):
return self.dhcp_client
@dbus.service.method('org.wicd.daemon')
def SetDHCPClient(self, client):
print "Setting dhcp client to %i" % (int(client))
self.dhcp_client = int(client)
self.wifi.dhcp_client = int(client)
self.wired.dhcp_client = int(client)
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings", "dhcp_client", client)
config.write(open(self.app_conf, "w"))
@dbus.service.method('org.wicd.daemon')
def GetLinkDetectionTool(self):
return self.link_detect_tool
@dbus.service.method('org.wicd.daemon')
def SetLinkDetectionTool(self, link_tool):
self.link_detect_tool = int(link_tool)
self.wired.link_tool = int(link_tool)
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings", "link_detect_tool", link_tool)
config.write(open(self.app_conf, "w"))
@dbus.service.method('org.wicd.daemon')
def GetFlushTool(self):
return self.flush_tool
@dbus.service.method('org.wicd.daemon')
def SetFlushTool(self, flush_tool):
self.flush_tool = int(flush_tool)
self.wired.flush_tool = int(flush_tool)
self.wifi.flush_tool = int(flush_tool)
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings", "flush_tool", flush_tool)
config.write(open(self.app_conf, "w"))
@dbus.service.signal(dbus_interface='org.wicd.daemon', signature='')
def LaunchChooser(self):
""" Emits the wired profile chooser dbus signal. """
print 'calling wired profile chooser'
self.SetNeedWiredProfileChooser(True)
@dbus.service.method('org.wicd.daemon', in_signature='uav')
def EmitStatusChanged(self, state, info):
self.StatusChanged(state, info)
@dbus.service.signal(dbus_interface='org.wicd.daemon', signature='uav')
def StatusChanged(self, state, info):
""" Emits a "status changed" dbus signal.
This D-Bus signal is emitted when the connection status changes.
"""
pass
@dbus.service.method('org.wicd.daemon')
@dbus.service.signal(dbus_interface='org.wicd.daemon', signature='')
def SendStartScanSignal(self):
""" Emits a signal announcing a scan has started. """
pass
@dbus.service.method('org.wicd.daemon')
@dbus.service.signal(dbus_interface='org.wicd.daemon', signature='')
def SendEndScanSignal(self):
""" Emits a signal announcing a scan has finished. """
pass
@dbus.service.method('org.wicd.daemon.wireless')
def SetHiddenNetworkESSID(self, essid):
""" Sets the ESSID of a hidden network for use with Scan(). """
self.hidden_essid = str(misc.Noneify(essid))
@dbus.service.method('org.wicd.daemon.wireless')
def Scan(self):
""" Scan for wireless networks.
Scans for wireless networks,optionally using a (hidden) essid
set with SetHiddenNetworkESSID.
"""
if self.debug_mode:
print 'scanning start'
self.SendStartScanSignal()
time.sleep(.2)
scan = self.wifi.Scan(str(self.hidden_essid))
self.LastScan = scan
if self.debug_mode:
print 'scanning done'
print 'found ' + str(len(scan)) + ' networks:'
for i, network in enumerate(scan):
self.ReadWirelessNetworkProfile(i)
self.SendEndScanSignal()
@dbus.service.method('org.wicd.daemon.wireless')
def GetIwconfig(self):
""" Calls and returns the output of iwconfig"""
return self.wifi.GetIwconfig()
@dbus.service.method('org.wicd.daemon.wireless')
def GetNumberOfNetworks(self):
""" Returns number of networks. """
return len(self.LastScan)
@dbus.service.method('org.wicd.daemon.wireless')
def GetApBssid(self):
return self.wifi.GetBSSID()
@dbus.service.method('org.wicd.daemon.wireless')
def CreateAdHocNetwork(self, essid, channel, ip, enctype, key, encused,
ics):
""" Creates an ad-hoc network using user inputted settings. """
self.wifi.CreateAdHocNetwork(essid, channel, ip, enctype, key, encused,
ics)
@dbus.service.method('org.wicd.daemon.wireless')
def GetKillSwitchEnabled(self):
""" Returns true if kill switch is pressed. """
status = self.wifi.GetKillSwitchStatus()
return status
@dbus.service.method('org.wicd.daemon.wireless')
def GetWirelessProperty(self, networkid, prop):
""" Retrieves wireless property from the network specified """
try:
value = self.LastScan[networkid].get(prop)
except IndexError:
if self.debug_mode:
print "GetWirelessProperty: Index error occured trying to " + \
"retrieve property %s" % prop
value = ""
try:
value = misc.to_unicode(value)
except:
pass
return value
@dbus.service.method('org.wicd.daemon.wireless')
def SetWirelessProperty(self, networkid, prop, value):
""" Sets property to value in network specified. """
if (prop.strip()).endswith("script"):
print "Setting script properties through the daemon is not" \
+ " permitted."
return False
self.LastScan[networkid][prop] = misc.Noneify(value)
@dbus.service.method('org.wicd.daemon.wireless')
def DetectWirelessInterface(self):
""" Returns an automatically detected wireless interface. """
iface = self.wifi.DetectWirelessInterface()
if iface:
print 'Automatically detected wireless interface ' + iface
else:
print "Couldn't detect a wireless interface."
return str(iface)
@dbus.service.method('org.wicd.daemon.wireless')
def DisconnectWireless(self):
""" Disconnects the wireless network. """
self.SetForcedDisconnect(True)
self.wifi.Disconnect()
@dbus.service.method('org.wicd.daemon.wireless')
def IsWirelessUp(self):
""" Returns a boolean specifying if wireless is up or down. """
return self.wifi.IsUp()
@dbus.service.method('org.wicd.daemon.wireless')
def GetPrintableSignalStrength(self, iwconfig=None):
""" Assigns a signal strength appropriate for display
This is used separately from the raw signal strength retrieving
functions as a way to simply the strength polling process for
the GUI and tray icon, by returning the strength that the user
has requested to be displayed in wicd preferences.
"""
if self.GetSignalDisplayType() == 0:
return self.GetCurrentSignalStrength(iwconfig)
else:
return self.GetCurrentDBMStrength(iwconfig)
@dbus.service.method('org.wicd.daemon.wireless')
def GetCurrentSignalStrength(self, iwconfig=None):
""" Returns the current signal strength. """
try:
strength = int(self.wifi.GetSignalStrength(iwconfig))
except:
strength = 0
return strength
@dbus.service.method('org.wicd.daemon.wireless')
def GetCurrentDBMStrength(self, iwconfig=None):
""" Returns the current dbm signal strength. """
try:
dbm_strength = int(self.wifi.GetDBMStrength(iwconfig))
except:
dbm_strength = 0
return dbm_strength
@dbus.service.method('org.wicd.daemon.wireless')
def GetCurrentNetwork(self, iwconfig=None):
""" Returns the current network. """
current_network = str(self.wifi.GetCurrentNetwork(iwconfig))
return current_network
@dbus.service.method('org.wicd.daemon.wireless')
def GetCurrentNetworkID(self, iwconfig=None):
""" Returns the id of the current network, or -1 if its not found. """
currentESSID = self.GetCurrentNetwork(iwconfig)
for x in xrange(0, len(self.LastScan)):
if self.LastScan[x]['essid'] == currentESSID:
return x
if self.debug_mode:
print 'GetCurrentNetworkID: Returning -1, current network not found'
return -1
@dbus.service.method('org.wicd.daemon.wireless')
def ConnectWireless(self, id):
""" Connects the the wireless network specified by i"""
self.SetForcedDisconnect(False)
self.wifi.before_script = self.GetWirelessProperty(id, 'beforescript')
self.wifi.after_script = self.GetWirelessProperty(id, 'afterscript')
self.wifi.disconnect_script = self.GetWirelessProperty(id,
'disconnectscript')
print 'Connecting to wireless network ' + self.LastScan[id]['essid']
return self.wifi.Connect(self.LastScan[id], debug=self.debug_mode)
@dbus.service.method('org.wicd.daemon.wireless')
def CheckIfWirelessConnecting(self):
"""Returns True if wireless interface is connecting, otherwise False."""
if self.wifi.connecting_thread:
return self.wifi.connecting_thread.is_connecting
else:
return False
@dbus.service.method('org.wicd.daemon.wireless')
def GetWirelessIP(self):
""" Returns the IP associated with the wireless interface. """
ip = self.wifi.GetIP()
return ip
@dbus.service.method('org.wicd.daemon.wireless')
def CheckWirelessConnectingMessage(self):
""" Returns the wireless interface's status message. """
if not self.wifi.connecting_thread == None:
stat = self.wifi.connecting_thread.GetStatus()
return stat
else:
return False
@dbus.service.method('org.wicd.daemon.wired')
def GetWiredIP(self):
""" Returns the wired interface's ip address. """
ip = self.wired.GetIP()
return ip
@dbus.service.method('org.wicd.daemon.wired')
def CheckIfWiredConnecting(self):
""" Returns True if wired interface is connecting, otherwise False. """
if self.wired.connecting_thread:
return self.wired.connecting_thread.is_connecting
else:
return False
@dbus.service.method('org.wicd.daemon.wired')
def SetWiredAutoConnectMethod(self, method):
""" Sets which method to use to autoconnect to wired networks. """
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings","wired_connect_mode", int(method))
config.write(open(self.app_conf, "w"))
self.wired_connect_mode = int(method)
@dbus.service.method('org.wicd.daemon.wired')
def GetWiredAutoConnectMethod(self):
""" Returns the wired autoconnect method. """
return int(self.wired_connect_mode)
@dbus.service.method('org.wicd.daemon.wired')
def CheckWiredConnectingMessage(self):
""" Returns the wired interface's status message. """
if self.wired.connecting_thread:
return self.wired.connecting_thread.GetStatus()
else:
return False
@dbus.service.method('org.wicd.daemon.wired')
def SetWiredProperty(self, prop, value):
""" Sets the given property to the given value. """
if self.WiredNetwork:
if (prop.strip()).endswith("script"):
print "Setting script properties through the daemon" \
+ " is not permitted."
return False
self.WiredNetwork[prop] = misc.Noneify(value)
return True
else:
print 'SetWiredProperty: WiredNetwork does not exist'
return False
@dbus.service.method('org.wicd.daemon.wired')
def GetWiredProperty(self, prop):
""" Returns the requested wired property. """
if self.WiredNetwork:
value = self.WiredNetwork.get(prop)
return value
else:
print 'GetWiredProperty: WiredNetwork does not exist'
return False
@dbus.service.method('org.wicd.daemon.wired')
def HasWiredDriver(self):
""" Returns True if a driver is associated with this interface. """
if self.wired.driver:
return True
else:
return False
@dbus.service.method('org.wicd.daemon.wired')
def DisconnectWired(self):
""" Disconnects the wired network. """
self.SetForcedDisconnect(True)
self.wired.Disconnect()
@dbus.service.method('org.wicd.daemon.wired')
def SetAlwaysShowWiredInterface(self, value):
""" Sets always_show_wired_interface to the given value. """
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.set("Settings", "always_show_wired_interface",
misc.to_bool(value))
config.write(open(self.app_conf, "w"))
self.always_show_wired_interface = misc.to_bool(value)
@dbus.service.method('org.wicd.daemon.wired')
def GetAlwaysShowWiredInterface(self):
""" Returns always_show_wired_interface """
return bool(self.always_show_wired_interface)
@dbus.service.method('org.wicd.daemon.wired')
def CheckPluggedIn(self):
""" Returns True if a ethernet cable is present, False otherwise. """
if self.wired.wired_interface and self.wired.wired_interface != "None":
return self.wired.CheckPluggedIn()
else:
return None
@dbus.service.method('org.wicd.daemon.wired')
def DetectWiredInterface(self):
""" Returns an automatically detected wired interface. """
iface = self.wired.DetectWiredInterface()
if iface:
print 'automatically detected wired interface ' + iface
else:
print "Couldn't detect a wired interface."
return str(iface)
@dbus.service.method('org.wicd.daemon.wired')
def IsWiredUp(self):
""" Returns a boolean specifying if wired iface is up or down. """
return self.wired.IsUp()
@dbus.service.method('org.wicd.daemon.wired')
def ConnectWired(self):
""" Connects to a wired network. """
self.SetForcedDisconnect(False)
self.wired.before_script = self.GetWiredProperty("beforescript")
self.wired.after_script = self.GetWiredProperty("afterscript")
self.wired.disconnect_script = self.GetWiredProperty("disconnectscript")
self.wired.Connect(self.WiredNetwork, debug=self.debug_mode)
@dbus.service.method('org.wicd.daemon.config')
def DisableLogging(self):
global logging_enabled
logging_enabled = False
@dbus.service.method('org.wicd.daemon.config')
def EnableLogging(self):
global logging_enabled
logging_enabled = True
@dbus.service.method('org.wicd.daemon.config')
def CreateWiredNetworkProfile(self, profilename, default=False):
""" Creates a wired network profile. """
profilename = misc.to_unicode(profilename)
print "Creating wired profile for " + profilename
config = ConfigParser.ConfigParser()
config.read(self.wired_conf)
if config.has_section(profilename):
return False
config.add_section(profilename)
config.set(profilename, "ip", None)
config.set(profilename, "broadcast", None)
config.set(profilename, "netmask", None)
config.set(profilename, "gateway", None)
config.set(profilename, "dns1", None)
config.set(profilename, "dns2", None)
config.set(profilename, "dns3", None)
config.set(profilename, "beforescript", None)
config.set(profilename, "afterscript", None)
config.set(profilename, "disconnectscript", None)
config.set(profilename, "default", default)
config.write(open(self.wired_conf, "w"))
return True
@dbus.service.method('org.wicd.daemon.config')
def UnsetWiredLastUsed(self):
""" Finds the previous lastused network, and sets lastused to False. """
config = ConfigParser.ConfigParser()
config.read(self.wired_conf)
profileList = config.sections()
for profile in profileList:
if config.has_option(profile, "lastused"):
if misc.to_bool(config.get(profile, "lastused")):
config.set(profile, "lastused", False)
config.write(open(self.wired_conf, "w"))
self.SaveWiredNetworkProfile(profile)
@dbus.service.method('org.wicd.daemon.config')
def UnsetWiredDefault(self):
""" Unsets the default option in the current default wired profile. """
config = ConfigParser.ConfigParser()
config.read(self.wired_conf)
profileList = config.sections()
for profile in profileList:
if config.has_option(profile, "default"):
if misc.to_bool(config.get(profile, "default")):
config.set(profile, "default", False)
config.write(open(self.wired_conf, "w"))
self.SaveWiredNetworkProfile(profile)
@dbus.service.method('org.wicd.daemon.config')
def GetDefaultWiredNetwork(self):
""" Returns the current default wired network. """
config = ConfigParser.ConfigParser()
config.read(self.wired_conf)
profileList = config.sections()
for profile in profileList:
if config.has_option(profile, "default"):
if misc.to_bool(config.get(profile, "default")):
return profile
return None
@dbus.service.method('org.wicd.daemon.config')
def GetLastUsedWiredNetwork(self):
""" Returns the profile of the last used wired network. """
config = ConfigParser.ConfigParser()
config.read(self.wired_conf)
profileList = config.sections()
for profile in profileList:
if config.has_option(profile,"lastused"):
if misc.to_bool(config.get(profile,"lastused")):
return profile
return None
@dbus.service.method('org.wicd.daemon.config')
def DeleteWiredNetworkProfile(self, profilename):
""" Deletes a wired network profile. """
profilename = misc.to_unicode(profilename)
print "Deleting wired profile for " + str(profilename)
config = ConfigParser.ConfigParser()
config.read(self.wired_conf)
if config.has_section(profilename):
config.remove_section(profilename)
else:
return "500: Profile does not exist"
config.write(open(self.wired_conf, "w"))
return "100: Profile Deleted"
@dbus.service.method('org.wicd.daemon.config')
def SaveWiredNetworkProfile(self, profilename):
""" Writes a wired network profile to disk. """
def write_script_ent(prof, conf, script):
if not conf.has_option(prof, script):
conf.set(prof, script, None)
if profilename == "":
return "500: Bad Profile name"
profilename = misc.to_unicode(profilename)
config = ConfigParser.ConfigParser()
config.read(self.wired_conf)
if config.has_section(profilename):
config.remove_section(profilename)
config.add_section(profilename)
for x in self.WiredNetwork:
config.set(profilename, x, self.WiredNetwork[x])
write_script_ent(profilename, config, "beforescript")
write_script_ent(profilename, config, "afterscript")
write_script_ent(profilename, config, "disconnectscript")
config.write(open(self.wired_conf, "w"))
return "100: Profile Written"
@dbus.service.method('org.wicd.daemon.config')
def ReadWiredNetworkProfile(self, profilename):
""" Reads a wired network profile in as the currently active profile """
profile = {}
profilename = misc.to_unicode(profilename)
config = ConfigParser.ConfigParser()
config.read(self.wired_conf)
if config.has_section(profilename):
for x in config.options(profilename):
profile[x] = misc.Noneify(config.get(profilename, x))
profile['use_global_dns'] = bool(profile.get('use_global_dns'))
profile['use_static_dns'] = bool(profile.get('use_static_dns'))
self.WiredNetwork = profile
return "100: Loaded Profile"
else:
self.WiredNetwork = None
return "500: Profile Not Found"
@dbus.service.method('org.wicd.daemon.config')
def GetWiredProfileList(self):
""" Returns a list of all wired profiles in wired-settings.conf. """
config = ConfigParser.ConfigParser()
config.read(self.wired_conf)
if config.sections():
return config.sections()
else:
return None
@dbus.service.method('org.wicd.daemon.config')
def SaveWirelessNetworkProfile(self, id):
""" Writes a wireless profile to disk. """
def write_script_ent(prof, conf, script):
if not conf.has_option(prof, script):
conf.set(prof, script, None)
config = ConfigParser.ConfigParser()
config.read(self.wireless_conf)
cur_network = self.LastScan[id]
bssid_key = cur_network["bssid"]
essid_key = "essid:" + cur_network["essid"]
if config.has_section(bssid_key):
config.remove_section(bssid_key)
config.add_section(bssid_key)
if cur_network["use_settings_globally"]:
if config.has_section(essid_key):
config.remove_section(essid_key)
config.add_section(essid_key)
for x in cur_network:
config.set(bssid_key, x, cur_network[x])
if cur_network["use_settings_globally"]:
config.set(essid_key, x, cur_network[x])
write_script_ent(bssid_key, config, "beforescript")
write_script_ent(bssid_key, config, "afterscript")
write_script_ent(bssid_key, config, "disconnect")
if cur_network["use_settings_globally"]:
write_script_ent(essid_key, config, "beforescript")
write_script_ent(essid_key, config, "afterscript")
write_script_ent(essid_key, config, "disconnect")
config.write(open(self.wireless_conf, "w"))
@dbus.service.method('org.wicd.daemon.config')
def SaveWirelessNetworkProperty(self, id, option):
""" Writes a particular wireless property to disk. """
if (option.strip()).endswith("script"):
print 'You cannot save script information to disk through ' + \
'the daemon.'
return
cur_network = self.LastScan[id]
essid_key = "essid:" + cur_network["essid"]
config = ConfigParser.ConfigParser()
config.read(self.wireless_conf)
if not config.has_section(cur_network["bssid"]):
config.add_section(cur_network["bssid"])
config.set(cur_network["bssid"], option,
str(cur_network[option]))
if config.has_section(essid_key):
if config.get(essid_key, 'use_settings_globally'):
config.set(essid_key, option, str(cur_network[option]))
config.write(open(self.wireless_conf, "w"))
@dbus.service.method('org.wicd.daemon.config')
def RemoveGlobalEssidEntry(self, networkid):
""" Removes the global entry for the networkid provided. """
config = ConfigParser.ConfigParser()
config.read(self.wireless_conf)
cur_network = self.LastScan[networkid]
essid_key = "essid:" + cur_network["essid"]
if config.has_section(essid_key):
config.remove_section(essid_key)
config.write(open(self.wireless_conf, "w"))
@dbus.service.method('org.wicd.daemon.config')
def ReadWirelessNetworkProfile(self, id):
""" Reads in wireless profile as the active network """
config = ConfigParser.ConfigParser()
config.read(self.wireless_conf)
cur_network = self.LastScan[id]
essid_key = "essid:" + cur_network["essid"]
bssid_key = cur_network["bssid"]
if self.debug_mode:
print bssid_key
if config.has_section(essid_key)and \
misc.stringToNone(config.get(essid_key, 'use_settings_globally')):
return self._read_wireless_profile(config, cur_network,
essid_key)
elif config.has_section(bssid_key):
return self._read_wireless_profile(config, cur_network, bssid_key)
else:
cur_network["has_profile"] = False
return "500: Profile Not Found"
def _read_wireless_profile(self, config, cur_network, section):
cur_network["has_profile"] = True
if cur_network["hidden"]:
if config.has_option(section, "essid"):
cur_network["essid"] = config.get(section, "essid")
if cur_network["essid"] in ["", "Hidden", "<hidden>"]:
cur_network["essid"] = "<hidden>"
for x in config.options(section):
if not cur_network.has_key(x) or x.endswith("script"):
cur_network[x] = misc.Noneify(config.get(section,
x))
for option in ['use_static_dns', 'use_global_dns', 'encryption',
'use_settings_globally']:
cur_network[option] = bool(cur_network.get(option))
for key in cur_network:
cur_network[key] = misc.to_unicode(cur_network[key])
return "100: Loaded Profile"
@dbus.service.method('org.wicd.daemon.config')
def WriteWindowSize(self, width, height, win_name):
"""Write the desired default window size"""
if win_name == "main":
height_str = "window_height"
width_str = "window_width"
else:
height_str = "pref_height"
width_str = "pref_width"
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
if config.has_section("Settings"):
config.set("Settings", width_str, width)
config.set("Settings", height_str, height)
config.write(open(self.app_conf, "w"))
@dbus.service.method('org.wicd.daemon.config')
def ReadWindowSize(self, win_name):
"""Returns a list containing the desired default window size
Attempts to read the default size from the config file,
and if that fails, returns a default of 605 x 400.
"""
default_width = default_height = width_str = height_str = -1
if win_name == "main":
default_width = 605
default_height = 400
width_str = "window_width"
height_str = "window_height"
else:
default_width = 125
default_height = 590
width_str = "pref_width"
height_str = "pref_height"
width = height = -1
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
if config.has_section("Settings"):
if config.has_option("Settings", width_str):
width = config.get("Settings", width_str)
else:
width = default_width
if config.has_option("Settings", height_str):
height = config.get("Settings", height_str)
else:
height = default_height
size = []
size.append(int(width))
size.append(int(height))
return size
def __printReturn(self, text, value):
"""prints the specified text and value, then returns the value"""
if self.debug_mode:
print ''.join([text, " ", str(value)])
return value
def get_option(self, section, option, default=None):
""" Method for returning an option from manager-settings.conf.
This method will return a given option from a given section
"""
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
if not config.has_section(section):
config.add_section(section)
if config.has_option(section, option):
ret = config.get(section, option)
print ''.join(['found ', option, ' in configuration ', ret])
else:
config.set(section, option, default)
ret = default
config.write(open(self.app_conf, "w"))
return ret
def ReadConfig(self):
""" Reads the manager-settings.conf file.
Reads the manager-settings.conf file and loads the stored
values into memory.
"""
if os.path.isfile(self.app_conf):
iface = self.DetectWirelessInterface()
if not iface:
if self.debug_mode:
print "Failed to detect wireless interface, defaulting " + \
"to wlan0, unless a config entry already exists."
iface = "wlan0"
self.SetWirelessInterface(self.get_option("Settings",
"wireless_interface",
default=iface))
iface = self.DetectWiredInterface()
if not iface:
if self.debug_mode:
print "Failed to detect wired interface, defaulting " + \
"to eth0, unless a config entry already exists."
iface = "eth0"
self.SetWiredInterface(self.get_option("Settings",
"wired_interface",
default=iface))
self.SetWPADriver(self.get_option("Settings", "wpa_driver",
default="wext"))
self.SetAlwaysShowWiredInterface(self.get_option("Settings",
"always_show_wired_interface",
default=False))
self.SetUseGlobalDNS(self.get_option("Settings", "use_global_dns",
default=False))
dns1 = self.get_option("Settings", "global_dns_1", default='None')
dns2 = self.get_option("Settings", "global_dns_2", default='None')
dns3 = self.get_option("Settings", "global_dns_3", default='None')
self.SetGlobalDNS(dns1, dns2, dns3)
self.SetAutoReconnect(self.get_option("Settings", "auto_reconnect",
default=True))
self.SetDebugMode(self.get_option("Settings", "debug_mode",
default=False))
self.SetWiredAutoConnectMethod(self.get_option("Settings",
"wired_connect_mode",
default=1))
self.SetSignalDisplayType(self.get_option("Settings",
"signal_display_type",
default=0))
self.SetDHCPClient(self.get_option("Settings", "dhcp_client",
default=0))
self.SetLinkDetectionTool(self.get_option("Settings",
"link_detect_tool",
default=0))
self.SetFlushTool(self.get_option("Settings", "flush_tool",
default=0))
else:
print "Configuration file not found, creating, adding defaults..."
config = ConfigParser.ConfigParser()
config.read(self.app_conf)
config.add_section("Settings")
config.set("Settings", "wireless_interface", "wlan0")
config.set("Settings", "wired_interface", "eth0")
config.set("Settings", "always_show_wired_interface", "False")
config.set("Settings", "auto_reconnect", "True")
config.set("Settings", "debug_mode", "False")
config.set("Settings", "wired_connect_mode", "1")
config.set("Settings", "signal_display_type", "0")
config.set("Settings", "dhcp_client", "0")
config.set("Settings", "link_detect_tool", "0")
config.set("Settings", "flush_tool", "0")
config.set("Settings", "dns1", "None")
config.set("Settings", "dns2", "None")
config.set("Settings", "dns3", "None")
iface = self.DetectWirelessInterface()
if iface is not None:
config.set("Settings", "wireless_interface", iface)
else:
print "Couldn't detect a wireless interface, using wlan0..."
config.set("Settings", "wireless_interface", "wlan0")
iface = self.DetectWiredInterface()
if iface is not None:
config.set("Settings", "wired_interface", iface)
else:
print "Couldn't detect a wired interface, using eth0..."
config.set("Settings", "wired_interface", "eth0")
config.set("Settings", "wpa_driver", "wext")
config.write(open(self.app_conf, "w"))
self.SetWirelessInterface(config.get("Settings",
"wireless_interface"))
self.SetWiredInterface(config.get("Settings",
"wired_interface"))
self.SetWPADriver(config.get("Settings",
"wpa_driver"))
self.SetDHCPClient(config.get("Settings", "dhcp_client"))
self.SetLinkDetectionTool(config.get("Settings",
"link_detect_tool"))
self.SetFlushTool(config.get("Settings", "flush_tool"))
self.SetAlwaysShowWiredInterface(False)
self.SetAutoReconnect(True)
self.SetDebugMode(False)
self.SetWiredAutoConnectMethod(1)
self.SetSignalDisplayType(0)
self.SetUseGlobalDNS(False)
self.SetGlobalDNS(None, None, None)
if os.path.isfile(self.wireless_conf):
print "Wireless configuration file found..."
pass
else:
print "Wireless configuration file not found, creating..."
open(self.wireless_conf, "w").close()
if os.path.isfile(self.wired_conf):
print "Wired configuration file found..."
pass
else:
print "Wired configuration file not found, creating a default..."
open(self.wired_conf, "w").close()
self.CreateWiredNetworkProfile("wired-default", default=True)
print "chmoding configuration files 0600..."
os.chmod(self.app_conf, 0600)
os.chmod(self.wireless_conf, 0600)
os.chmod(self.wired_conf, 0600)
print "chowning configuration files root:root..."
os.chown(self.app_conf, 0, 0)
os.chown(self.wireless_conf, 0, 0)
os.chown(self.wired_conf, 0, 0)
print "Using wired interface..." + self.GetWiredInterface()
print "Using wireless interface..." + self.GetWirelessInterface()
def usage():
print """
wicd 1.5.3
wireless (and wired) connection daemon.
Arguments:
\t-a\t--no-autoconnect\tDon't auto-scan/auto-connect.
\t-f\t--no-daemon\tDon't daemonize (run in foreground).
\t-e\t--no-stderr\tDon't redirect stderr.
\t-n\t--no-poll\tDon't monitor network status.
\t-o\t--no-stdout\tDon't redirect stdout.
\t-h\t--help\t\tPrint this help.
"""
def daemonize():
""" Disconnect from the controlling terminal.
Fork twice, once to disconnect ourselves from the parent terminal and a
second time to prevent any files we open from becoming our controlling
terminal.
For more info see:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
"""
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
print >> sys.stderr, "Fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
print wpath.pidfile
dirname = os.path.dirname(wpath.pidfile)
if not os.path.exists(dirname):
os.makedirs(dirname)
pidfile = open(wpath.pidfile, 'w')
pidfile.write(str(pid) + '\n')
pidfile.close()
sys.exit(0)
except OSError, e:
print >> sys.stderr, "Fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
sys.stdout.flush()
sys.stderr.flush()
os.close(sys.__stdin__.fileno())
os.close(sys.__stdout__.fileno())
os.close(sys.__stderr__.fileno())
sys.stdin = open('/dev/null', 'r')
def main(argv):
""" The main daemon program.
Keyword arguments:
argv -- The arguments passed to the script.
"""
global child_pid
do_daemonize = True
redirect_stderr = True
redirect_stdout = True
auto_connect = True
try:
opts, args = getopt.getopt(sys.argv[1:], 'fenoah',
['help', 'no-daemon', 'no-poll', 'no-stderr', 'no-stdout',
'no-autoconnect'])
except getopt.GetoptError:
usage()
sys.exit(2)
no_poll = False
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit()
if o in ('-e', '--no-stderr'):
redirect_stderr = False
if o in ('-o', '--no-stdout'):
redirect_stdout = False
if o in ('-f', '--no-daemon'):
do_daemonize = False
if o in ('-a', '--no-autoconnect'):
auto_connect = False
if o in ('-n', '--no-poll'):
no_poll = True
if do_daemonize: daemonize()
if redirect_stderr or redirect_stdout: output = LogWriter()
if redirect_stdout: sys.stdout = output
if redirect_stderr: sys.stderr = output
print '---------------------------'
print 'wicd initializing...'
print '---------------------------'
d_bus_name = dbus.service.BusName('org.wicd.daemon', bus=dbus.SystemBus())
obj = ConnectionWizard(d_bus_name, auto_connect=auto_connect)
gobject.threads_init()
if not no_poll:
(child_pid, x, x, x) = gobject.spawn_async([wpath.lib + "monitor.py"],
flags=gobject.SPAWN_CHILD_INHERITS_STDIN)
signal.signal(signal.SIGTERM, sigterm_caught)
mainloop = gobject.MainLoop()
mainloop.run()
def sigterm_caught(sig, frame):
""" Called when a SIGTERM is caught, kills monitor.py before exiting. """
global child_pid
print 'SIGTERM caught, killing wicd-monitor...'
os.kill(child_pid, signal.SIGTERM)
print 'Removing PID file...'
if os.path.exists(wpath.pidfile):
os.remove(wpath.pidfile)
print 'Shutting down...'
sys.exit(0)
if __name__ == '__main__':
if os.getuid() != 0:
print ("Root priviledges are required for the daemon to run properly." +
" Exiting.")
sys.exit(1)
main(sys.argv)
|
[
"\"\"\" wicd - wireless connection daemon implementation.\nThis module implements the wicd daemon that provides network\nconnection management, for both wireless and wired networks. The daemon\nmust be run as root to control the networks, however the user interface\ncomponents should be run as a normal user.\nclass LogWriter() -- Class to redirect stdout and stderr to a log file.\nclass ConnectionWizard() -- DBUS interface to manage the network.\nclass ConnectionStatus() -- Updates the current connection state\ndef usage() -- Print usage information.\ndef daemonize() -- Daemonize the current process with a double fork.\ndef main() -- The wicd daemon main loop.\n\"\"\"\nimport os\nimport sys\nimport time\nimport getopt\nimport ConfigParser\nimport signal\nimport gobject\nimport dbus\nimport dbus.service\nif getattr(dbus, 'version', (0, 0, 0)) < (0, 80, 0):\n import dbus.glib\nelse:\n from dbus.mainloop.glib import DBusGMainLoop\n DBusGMainLoop(set_as_default=True)\nimport wicd.wpath as wpath\nimport wicd.networking as networking\nimport wicd.misc as misc\nfrom wicd.misc import noneToBlankString\nif __name__ == '__main__':\n wpath.chdir(__file__)\nmisc.RenameProcess(\"wicd\")\nlogging_enabled = True\nclass LogWriter:\n \"\"\" A class to provide timestamped logging. \"\"\"\n def __init__(self):\n if not os.path.exists(wpath.log + 'wicd.log'):\n os.system(\"mkdir -p %s\" % wpath.log)\n self.file = open(wpath.log + 'wicd.log', 'w')\n try:\n os.chmod(wpath.log + 'wicd.log', 0600)\n except:\n print 'unable to chmod the log 0600'\n self.eol = True\n def __del__(self):\n self.file.close()\n def flush(self):\n self.file.flush()\n def write(self, data):\n \"\"\" Writes the data to the log with a timestamp.\n This function handles writing of data to a log file. In order to\n handle output redirection, we need to be careful with how we\n handle the addition of timestamps. In any set of data that is\n written, we replace the newlines with a timestamp + new line,\n except for newlines that are the final character in data.\n When a newline is the last character in data, we set a flag to\n indicate that the next write should have a timestamp prepended\n as well, which ensures that the timestamps match the time at\n which the data is written, rather than the previous write.\n Keyword arguments:\n data -- The string to write to the log.\n \"\"\"\n global logging_enabled\n data = misc.to_unicode(data)\n if len(data) <= 0: return\n if logging_enabled:\n if self.eol:\n self.file.write(self.get_time() + ' :: ')\n self.eol = False\n if data[-1] == '\\n':\n self.eol = True\n data = data[:-1]\n self.file.write(\n data.replace('\\n', '\\n' + self.get_time() + ' :: '))\n if self.eol: self.file.write('\\n')\n self.file.flush()\n def get_time(self):\n \"\"\" Return a string with the current time nicely formatted.\n The format of the returned string is yyyy/mm/dd HH:MM:SS\n \"\"\"\n x = time.localtime()\n return ''.join([\n str(x[0]).rjust(4, '0'), '/', str(x[1]).rjust(2, '0'), '/',\n str(x[2]).rjust(2, '0'), ' ', str(x[3]).rjust(2, '0'), ':',\n str(x[4]).rjust(2, '0'), ':', str(x[5]).rjust(2, '0')])\nclass ConnectionWizard(dbus.service.Object):\n def __init__(self, bus_name, object_path='/org/wicd/daemon',\n auto_connect=True):\n dbus.service.Object.__init__(self, bus_name, object_path)\n self.app_conf = wpath.etc + 'manager-settings.conf'\n self.wireless_conf = wpath.etc + 'wireless-settings.conf'\n self.wired_conf = wpath.etc + 'wired-settings.conf'\n self.hidden_essid = None\n self.wifi = networking.Wireless()\n self.wired = networking.Wired()\n self.forced_disconnect = False\n self.need_profile_chooser = False\n self.current_interface = None\n self.vpn_session = None\n self.gui_open = False\n self.suspended = False\n self.connection_state = misc.NOT_CONNECTED\n self.connection_info = [\"\"]\n self.auto_connecting = False\n self.dhcp_client = 0\n self.link_detect_tool = 0\n self.flush_tool = 0\n self.ReadConfig()\n self.LastScan = ''\n self.WiredNetwork = {}\n self.wifi.wireless_interface = self.GetWirelessInterface()\n self.wired.wired_interface = self.GetWiredInterface()\n self.wifi.LoadInterfaces()\n self.wired.LoadInterfaces()\n self.wifi.liface = self.wired.liface\n self.wired.wiface = self.wifi.wiface\n if auto_connect:\n print \"autoconnecting...\", str(self.GetWirelessInterface())\n self.AutoConnect(True)\n else:\n self.Scan()\n self.SetForcedDisconnect(True)\n print \"--no-autoconnect detected, not autoconnecting...\"\n @dbus.service.method('org.wicd.daemon')\n def Hello(self):\n \"\"\" Returns the version number. \n This number is major-minor-micro. Major is only incremented if minor\n reaches > 9. Minor is incremented if changes that break core stucture\n are implemented. Micro is for everything else, and micro may be\n anything >= 0. This number is effective starting wicd v1.2.0.\n \"\"\"\n version = '1.5.3'\n print 'returned version number', version\n return version\n @dbus.service.method('org.wicd.daemon')\n def SetWiredInterface(self, interface):\n \"\"\" Sets the wired interface for the daemon to use. \"\"\"\n print \"setting wired interface %s\" % (str(interface))\n self.wired.wired_interface = noneToBlankString(interface)\n self.wifi.wired_interface = noneToBlankString(interface)\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\",\"wired_interface\", interface)\n config.write(open(self.app_conf, \"w\"))\n @dbus.service.method('org.wicd.daemon')\n def SetWirelessInterface(self, interface):\n \"\"\" Sets the wireless interface the daemon will use. \"\"\"\n print \"setting wireless interface %s\" % (str(interface))\n self.wifi.wireless_interface = noneToBlankString(interface)\n self.wired.wireless_interface = noneToBlankString(interface)\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\",\"wireless_interface\", interface)\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)\n @dbus.service.method('org.wicd.daemon')\n def SetWPADriver(self, driver):\n \"\"\" Sets the wpa driver the wpa_supplicant will use. \"\"\"\n print \"setting wpa driver\", str(driver)\n self.wifi.wpa_driver = driver\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\",\"wpa_driver\",driver)\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)\n @dbus.service.method('org.wicd.daemon')\n def SetUseGlobalDNS(self, use):\n \"\"\" Sets a boolean which determines if global DNS is enabled. \"\"\"\n print 'setting use global dns to', use\n use = misc.to_bool(use)\n print 'setting use global dns to boolean', use\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\", \"use_global_dns\", use)\n self.use_global_dns = use\n self.wifi.use_global_dns = use\n self.wired.use_global_dns = use\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)\n @dbus.service.method('org.wicd.daemon')\n def SetGlobalDNS(self, dns1=None, dns2=None, dns3=None):\n \"\"\" Sets the global dns addresses. \"\"\"\n print \"setting global dns\"\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\", \"global_dns_1\", misc.noneToString(dns1))\n self.dns1 = dns1\n self.wifi.global_dns_1 = dns1\n self.wired.global_dns_1 = dns1\n config.set(\"Settings\", \"global_dns_2\", misc.noneToString(dns2))\n self.dns2 = dns2\n self.wifi.global_dns_2 = dns2\n self.wired.global_dns_2 = dns2\n config.set(\"Settings\", \"global_dns_3\", misc.noneToString(dns3))\n self.dns3 = dns3\n self.wifi.global_dns_3 = dns3\n self.wired.global_dns_3 = dns3\n print 'global dns servers are', dns1, dns2, dns3\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)\n @dbus.service.method('org.wicd.daemon')\n def GetUseGlobalDNS(self):\n \"\"\" Returns a boolean that determines if global dns is enabled. \"\"\"\n return bool(self.use_global_dns)\n @dbus.service.method('org.wicd.daemon')\n def GetWPADriver(self):\n \"\"\" Returns the wpa driver the daemon is using. \"\"\"\n return str(self.wifi.wpa_driver)\n @dbus.service.method('org.wicd.daemon')\n def GetWiredInterface(self):\n \"\"\" Returns the wired interface. \"\"\"\n return str(self.wired.wired_interface)\n @dbus.service.method('org.wicd.daemon')\n def GetWirelessInterface(self):\n \"\"\" Returns the wireless interface the daemon is using. \"\"\"\n return str(self.wifi.wireless_interface)\n @dbus.service.method('org.wicd.daemon')\n def SetDebugMode(self, debug):\n \"\"\" Sets if debugging mode is on or off. \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\", \"debug_mode\", debug)\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)\n self.debug_mode = misc.to_bool(debug)\n self.wifi.debug = self.debug_mode\n self.wired.debug = self.debug_mode\n @dbus.service.method('org.wicd.daemon')\n def GetDebugMode(self):\n \"\"\" Returns whether debugging is enabled. \"\"\"\n return bool(self.debug_mode)\n @dbus.service.method('org.wicd.daemon')\n def Disconnect(self):\n \"\"\" Disconnects all networks. \"\"\"\n self.SetForcedDisconnect(True)\n self.wifi.Disconnect()\n self.wired.Disconnect()\n @dbus.service.method('org.wicd.daemon')\n def GetSignalDisplayType(self):\n \"\"\" Returns the signal display type.\n Returns either 0 or 1.\n 0 for signal strength as a percentage\n 1 for signal strength measured in dBm\n \"\"\"\n return int(self.signal_display_type)\n @dbus.service.method('org.wicd.daemon')\n def SetSignalDisplayType(self, value):\n \"\"\" Sets the signal display type and writes it the wicd config file. \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\", \"signal_display_type\", value)\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)\n self.signal_display_type = int(value)\n @dbus.service.method('org.wicd.daemon')\n def FormatSignalForPrinting(self, signal):\n \"\"\" Returns the suffix to display after the signal strength number. \"\"\"\n if self.GetSignalDisplayType() == 1:\n return (signal + \" dBm\")\n else:\n return (signal + \"%\")\n @dbus.service.method('org.wicd.daemon')\n def SetSuspend(self, val):\n \"\"\" Toggles whether or not monitoring connection status is suspended \"\"\"\n self.suspended = val\n if self.suspended:\n self.Disconnect()\n @dbus.service.method('org.wicd.daemon')\n def GetSuspend(self):\n \"\"\" Returns True if the computer is in the suspend state. \"\"\"\n return self.suspended\n @dbus.service.method('org.wicd.daemon')\n def AutoConnect(self, fresh):\n \"\"\" Attempts to autoconnect to a wired or wireless network.\n Autoconnect will first try to connect to a wired network, if that \n fails it tries a wireless connection.\n \"\"\"\n if fresh:\n self.Scan()\n if self.CheckPluggedIn():\n self._wired_autoconnect()\n else:\n self._wireless_autoconnect()\n def _wired_autoconnect(self):\n \"\"\" Attempts to autoconnect to a wired network. \"\"\"\n if self.GetWiredAutoConnectMethod() == 2 and \\\n not self.GetNeedWiredProfileChooser():\n self.LaunchChooser()\n return\n elif self.GetWiredAutoConnectMethod() == 1:\n network = self.GetDefaultWiredNetwork()\n if not network:\n print \"Couldn't find a default wired connection,\" + \\\n \" wired autoconnect failed.\"\n self._wireless_autoconnect()\n return\n else:\n network = self.GetLastUsedWiredNetwork()\n if not network:\n print \"no previous wired profile available, wired \" + \\\n \"autoconnect failed.\"\n self._wireless_autoconnect()\n return\n self.ReadWiredNetworkProfile(network)\n self.ConnectWired()\n print \"Attempting to autoconnect with wired interface...\"\n self.auto_connecting = True\n time.sleep(1.5)\n try:\n gobject.timeout_add_seconds(3, self._monitor_wired_autoconnect)\n except:\n gobject.timeout_add(3000, self._monitor_wired_autoconnect)\n def _wireless_autoconnect(self):\n \"\"\" Attempts to autoconnect to a wireless network. \"\"\"\n print \"No wired connection present, attempting to autoconnect\" + \\\n \"to wireless network\"\n if self.GetWirelessInterface() is None:\n print 'Autoconnect failed because wireless interface returned None'\n return\n for x, network in enumerate(self.LastScan):\n if bool(network[\"has_profile\"]):\n if self.debug_mode:\n print network[\"essid\"] + ' has profile'\n if bool(network.get('automatic')):\n print 'trying to automatically connect to...' + \\\n network[\"essid\"]\n self.ConnectWireless(x)\n time.sleep(1)\n return\n print \"Unable to autoconnect, you'll have to manually connect\"\n def _monitor_wired_autoconnect(self):\n if self.CheckIfWiredConnecting():\n return True\n elif self.GetWiredIP():\n self.auto_connecting = False\n return False\n elif not self.CheckIfWirelessConnecting():\n self._wireless_autoconnect()\n self.auto_connecting = False\n return False\n @dbus.service.method('org.wicd.daemon')\n def GetAutoReconnect(self):\n \"\"\" Returns the value of self.auto_reconnect. See SetAutoReconnect. \"\"\"\n do = bool(self.auto_reconnect)\n return self.__printReturn('returning automatically reconnect when ' \\\n + 'connection drops', do)\n @dbus.service.method('org.wicd.daemon')\n def SetAutoReconnect(self, value):\n \"\"\" Sets the value of self.auto_reconnect.\n If True, wicd will try to reconnect as soon as it detects that\n an internet connection is lost. If False, it will do nothing,\n and wait for the user to initiate reconnection.\n \"\"\"\n print 'setting automatically reconnect when connection drops'\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\", \"auto_reconnect\", misc.to_bool(value))\n config.write(open(self.app_conf, \"w\"))\n self.auto_reconnect = misc.to_bool(value)\n @dbus.service.method('org.wicd.daemon')\n def GetGlobalDNSAddresses(self):\n \"\"\" Returns the global dns addresses. \"\"\"\n return (misc.noneToString(self.dns1), misc.noneToString(self.dns2),\n misc.noneToString(self.dns3))\n @dbus.service.method('org.wicd.daemon')\n def CheckIfConnecting(self):\n \"\"\" Returns if a network connection is being made. \"\"\"\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False\n @dbus.service.method('org.wicd.daemon')\n def CancelConnect(self):\n \"\"\" Cancels the wireless connection attempt \"\"\"\n print 'canceling connection attempt'\n if self.wifi.connecting_thread:\n self.wifi.connecting_thread.should_die = True\n if self.wired.connecting_thread:\n self.wired.connecting_thread.should_die = True\n misc.Run(\"killall dhclient dhclient3 wpa_supplicant\")\n @dbus.service.method('org.wicd.daemon')\n def GetCurrentInterface(self):\n \"\"\" Returns the active interface \"\"\"\n return self.current_interface\n @dbus.service.method('org.wicd.daemon') \n def SetCurrentInterface(self, iface):\n \"\"\" Sets the current active interface \"\"\"\n self.current_interface = str(iface)\n @dbus.service.method('org.wicd.daemon')\n def SetNeedWiredProfileChooser(self, val):\n \"\"\" Sets the need_wired_profile_chooser variable.\n If set to True, that alerts the wicd frontend to display the chooser,\n if False the frontend will do nothing. This function is only needed\n when the frontend starts up, to determine if the chooser was requested\n before the frontend was launched.\n \"\"\"\n self.need_profile_chooser = misc.to_bool(val)\n @dbus.service.method('org.wicd.daemon')\n def ShouldAutoReconnect(self):\n \"\"\" Returns True if it's the right time to try autoreconnecting. \"\"\"\n if self.GetAutoReconnect() and not self.CheckIfConnecting() and \\\n not self.GetForcedDisconnect() and not self.auto_connecting:\n return True\n else:\n return False\n @dbus.service.method('org.wicd.daemon')\n def GetForcedDisconnect(self):\n \"\"\" Returns the forced_disconnect status. See SetForcedDisconnect. \"\"\"\n return bool(self.forced_disconnect)\n @dbus.service.method('org.wicd.daemon')\n def SetForcedDisconnect(self, value):\n \"\"\" Sets the forced_disconnect status.\n Set to True when a user manually disconnects or cancels a connection.\n It gets set to False as soon as the connection process is manually\n started.\n \"\"\"\n self.forced_disconnect = bool(value)\n @dbus.service.method('org.wicd.daemon')\n def GetGUIOpen(self):\n \"\"\" Returns the value of gui_open.\n Returns the vlaue of gui_open, which is a boolean that keeps track\n of the state of the wicd GUI. If the GUI is open, wicd will not\n try to automatically reconnect to networks, as this behavior can\n be annoying for the user while trying to use the GUI.\n NOTE: It's possible for this to become out of sync, particularly if\n the wicd.py is not exited properly while the GUI is open. We should\n probably implement some kind of pid system to do it properly.\n ANOTHER NOTE: This isn't implemented yet!\n \"\"\"\n return bool(self.gui_open)\n @dbus.service.method('org.wicd.daemon')\n def SetGUIOpen(self, val):\n \"\"\" Sets the value of gui_open. \"\"\"\n self.gui_open = bool(val)\n @dbus.service.method('org.wicd.daemon')\n def SetConnectionStatus(self, state, info):\n \"\"\" Sets the connection status.\n Keyword arguments:\n state - An int representing the state of the connection as defined\n in misc.py.\n info - a list of strings containing data about the connection state. \n The contents of this list are dependent on the connection state.\n state - info contents:\n NOT_CONNECTED - info[0] = \"\"\n CONNECTING - info[0] = \"wired\" or \"wireless\"\n info[1] = None if wired, an essid if wireless\n WIRED - info[0] = IP Adresss\n WIRELESS - info[0] = IP Address\n info[1] = essid\n info[2] = signal strength\n info[3] = internal networkid\n SUSPENDED - info[0] = \"\"\n \"\"\"\n self.connection_state = state\n self.connection_info = info\n @dbus.service.method('org.wicd.daemon', out_signature='(uas)')\n def GetConnectionStatus(self):\n return [self.connection_state, self.connection_info]\n @dbus.service.method('org.wicd.daemon')\n def GetNeedWiredProfileChooser(self):\n \"\"\" Returns need_profile_chooser.\n Returns a boolean specifying if the wired profile chooser needs to\n be launched.\n \"\"\"\n return bool(self.need_profile_chooser)\n @dbus.service.method('org.wicd.daemon')\n def GetDHCPClient(self):\n return self.dhcp_client\n @dbus.service.method('org.wicd.daemon')\n def SetDHCPClient(self, client):\n print \"Setting dhcp client to %i\" % (int(client))\n self.dhcp_client = int(client)\n self.wifi.dhcp_client = int(client)\n self.wired.dhcp_client = int(client)\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\", \"dhcp_client\", client)\n config.write(open(self.app_conf, \"w\"))\n @dbus.service.method('org.wicd.daemon')\n def GetLinkDetectionTool(self):\n return self.link_detect_tool\n @dbus.service.method('org.wicd.daemon')\n def SetLinkDetectionTool(self, link_tool):\n self.link_detect_tool = int(link_tool)\n self.wired.link_tool = int(link_tool)\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\", \"link_detect_tool\", link_tool)\n config.write(open(self.app_conf, \"w\"))\n @dbus.service.method('org.wicd.daemon')\n def GetFlushTool(self):\n return self.flush_tool\n @dbus.service.method('org.wicd.daemon')\n def SetFlushTool(self, flush_tool):\n self.flush_tool = int(flush_tool)\n self.wired.flush_tool = int(flush_tool)\n self.wifi.flush_tool = int(flush_tool)\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\", \"flush_tool\", flush_tool)\n config.write(open(self.app_conf, \"w\"))\n @dbus.service.signal(dbus_interface='org.wicd.daemon', signature='')\n def LaunchChooser(self):\n \"\"\" Emits the wired profile chooser dbus signal. \"\"\"\n print 'calling wired profile chooser'\n self.SetNeedWiredProfileChooser(True)\n @dbus.service.method('org.wicd.daemon', in_signature='uav')\n def EmitStatusChanged(self, state, info):\n self.StatusChanged(state, info)\n @dbus.service.signal(dbus_interface='org.wicd.daemon', signature='uav')\n def StatusChanged(self, state, info):\n \"\"\" Emits a \"status changed\" dbus signal.\n This D-Bus signal is emitted when the connection status changes.\n \"\"\"\n pass\n @dbus.service.method('org.wicd.daemon')\n @dbus.service.signal(dbus_interface='org.wicd.daemon', signature='')\n def SendStartScanSignal(self):\n \"\"\" Emits a signal announcing a scan has started. \"\"\"\n pass\n @dbus.service.method('org.wicd.daemon')\n @dbus.service.signal(dbus_interface='org.wicd.daemon', signature='')\n def SendEndScanSignal(self):\n \"\"\" Emits a signal announcing a scan has finished. \"\"\"\n pass\n @dbus.service.method('org.wicd.daemon.wireless')\n def SetHiddenNetworkESSID(self, essid):\n \"\"\" Sets the ESSID of a hidden network for use with Scan(). \"\"\"\n self.hidden_essid = str(misc.Noneify(essid))\n @dbus.service.method('org.wicd.daemon.wireless')\n def Scan(self):\n \"\"\" Scan for wireless networks.\n Scans for wireless networks,optionally using a (hidden) essid\n set with SetHiddenNetworkESSID.\n \"\"\"\n if self.debug_mode:\n print 'scanning start'\n self.SendStartScanSignal()\n time.sleep(.2)\n scan = self.wifi.Scan(str(self.hidden_essid))\n self.LastScan = scan\n if self.debug_mode:\n print 'scanning done'\n print 'found ' + str(len(scan)) + ' networks:'\n for i, network in enumerate(scan):\n self.ReadWirelessNetworkProfile(i)\n self.SendEndScanSignal()\n @dbus.service.method('org.wicd.daemon.wireless')\n def GetIwconfig(self):\n \"\"\" Calls and returns the output of iwconfig\"\"\"\n return self.wifi.GetIwconfig()\n @dbus.service.method('org.wicd.daemon.wireless')\n def GetNumberOfNetworks(self):\n \"\"\" Returns number of networks. \"\"\"\n return len(self.LastScan)\n @dbus.service.method('org.wicd.daemon.wireless')\n def GetApBssid(self):\n return self.wifi.GetBSSID()\n @dbus.service.method('org.wicd.daemon.wireless')\n def CreateAdHocNetwork(self, essid, channel, ip, enctype, key, encused,\n ics):\n \"\"\" Creates an ad-hoc network using user inputted settings. \"\"\"\n self.wifi.CreateAdHocNetwork(essid, channel, ip, enctype, key, encused,\n ics)\n @dbus.service.method('org.wicd.daemon.wireless')\n def GetKillSwitchEnabled(self):\n \"\"\" Returns true if kill switch is pressed. \"\"\"\n status = self.wifi.GetKillSwitchStatus()\n return status\n @dbus.service.method('org.wicd.daemon.wireless')\n def GetWirelessProperty(self, networkid, prop):\n \"\"\" Retrieves wireless property from the network specified \"\"\"\n try:\n value = self.LastScan[networkid].get(prop)\n except IndexError:\n if self.debug_mode:\n print \"GetWirelessProperty: Index error occured trying to \" + \\\n \"retrieve property %s\" % prop\n value = \"\"\n try:\n value = misc.to_unicode(value)\n except:\n pass\n return value\n @dbus.service.method('org.wicd.daemon.wireless')\n def SetWirelessProperty(self, networkid, prop, value):\n \"\"\" Sets property to value in network specified. \"\"\"\n if (prop.strip()).endswith(\"script\"):\n print \"Setting script properties through the daemon is not\" \\\n + \" permitted.\"\n return False\n self.LastScan[networkid][prop] = misc.Noneify(value)\n @dbus.service.method('org.wicd.daemon.wireless')\n def DetectWirelessInterface(self):\n \"\"\" Returns an automatically detected wireless interface. \"\"\"\n iface = self.wifi.DetectWirelessInterface()\n if iface:\n print 'Automatically detected wireless interface ' + iface\n else:\n print \"Couldn't detect a wireless interface.\"\n return str(iface)\n @dbus.service.method('org.wicd.daemon.wireless')\n def DisconnectWireless(self):\n \"\"\" Disconnects the wireless network. \"\"\"\n self.SetForcedDisconnect(True)\n self.wifi.Disconnect()\n @dbus.service.method('org.wicd.daemon.wireless')\n def IsWirelessUp(self):\n \"\"\" Returns a boolean specifying if wireless is up or down. \"\"\"\n return self.wifi.IsUp()\n @dbus.service.method('org.wicd.daemon.wireless')\n def GetPrintableSignalStrength(self, iwconfig=None):\n \"\"\" Assigns a signal strength appropriate for display\n This is used separately from the raw signal strength retrieving\n functions as a way to simply the strength polling process for\n the GUI and tray icon, by returning the strength that the user\n has requested to be displayed in wicd preferences.\n \"\"\"\n if self.GetSignalDisplayType() == 0:\n return self.GetCurrentSignalStrength(iwconfig)\n else:\n return self.GetCurrentDBMStrength(iwconfig)\n @dbus.service.method('org.wicd.daemon.wireless')\n def GetCurrentSignalStrength(self, iwconfig=None):\n \"\"\" Returns the current signal strength. \"\"\"\n try:\n strength = int(self.wifi.GetSignalStrength(iwconfig))\n except:\n strength = 0\n return strength\n @dbus.service.method('org.wicd.daemon.wireless')\n def GetCurrentDBMStrength(self, iwconfig=None):\n \"\"\" Returns the current dbm signal strength. \"\"\"\n try:\n dbm_strength = int(self.wifi.GetDBMStrength(iwconfig))\n except:\n dbm_strength = 0\n return dbm_strength\n @dbus.service.method('org.wicd.daemon.wireless')\n def GetCurrentNetwork(self, iwconfig=None):\n \"\"\" Returns the current network. \"\"\"\n current_network = str(self.wifi.GetCurrentNetwork(iwconfig))\n return current_network\n @dbus.service.method('org.wicd.daemon.wireless')\n def GetCurrentNetworkID(self, iwconfig=None):\n \"\"\" Returns the id of the current network, or -1 if its not found. \"\"\"\n currentESSID = self.GetCurrentNetwork(iwconfig)\n for x in xrange(0, len(self.LastScan)):\n if self.LastScan[x]['essid'] == currentESSID:\n return x\n if self.debug_mode:\n print 'GetCurrentNetworkID: Returning -1, current network not found'\n return -1\n @dbus.service.method('org.wicd.daemon.wireless')\n def ConnectWireless(self, id):\n \"\"\" Connects the the wireless network specified by i\"\"\"\n self.SetForcedDisconnect(False)\n self.wifi.before_script = self.GetWirelessProperty(id, 'beforescript')\n self.wifi.after_script = self.GetWirelessProperty(id, 'afterscript')\n self.wifi.disconnect_script = self.GetWirelessProperty(id,\n 'disconnectscript')\n print 'Connecting to wireless network ' + self.LastScan[id]['essid']\n return self.wifi.Connect(self.LastScan[id], debug=self.debug_mode)\n @dbus.service.method('org.wicd.daemon.wireless')\n def CheckIfWirelessConnecting(self):\n \"\"\"Returns True if wireless interface is connecting, otherwise False.\"\"\"\n if self.wifi.connecting_thread:\n return self.wifi.connecting_thread.is_connecting\n else:\n return False\n @dbus.service.method('org.wicd.daemon.wireless')\n def GetWirelessIP(self):\n \"\"\" Returns the IP associated with the wireless interface. \"\"\"\n ip = self.wifi.GetIP()\n return ip\n @dbus.service.method('org.wicd.daemon.wireless')\n def CheckWirelessConnectingMessage(self):\n \"\"\" Returns the wireless interface's status message. \"\"\"\n if not self.wifi.connecting_thread == None:\n stat = self.wifi.connecting_thread.GetStatus()\n return stat\n else:\n return False\n @dbus.service.method('org.wicd.daemon.wired')\n def GetWiredIP(self):\n \"\"\" Returns the wired interface's ip address. \"\"\"\n ip = self.wired.GetIP()\n return ip\n @dbus.service.method('org.wicd.daemon.wired')\n def CheckIfWiredConnecting(self):\n \"\"\" Returns True if wired interface is connecting, otherwise False. \"\"\"\n if self.wired.connecting_thread:\n return self.wired.connecting_thread.is_connecting\n else:\n return False\n @dbus.service.method('org.wicd.daemon.wired')\n def SetWiredAutoConnectMethod(self, method):\n \"\"\" Sets which method to use to autoconnect to wired networks. \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\",\"wired_connect_mode\", int(method))\n config.write(open(self.app_conf, \"w\"))\n self.wired_connect_mode = int(method)\n @dbus.service.method('org.wicd.daemon.wired')\n def GetWiredAutoConnectMethod(self):\n \"\"\" Returns the wired autoconnect method. \"\"\"\n return int(self.wired_connect_mode)\n @dbus.service.method('org.wicd.daemon.wired')\n def CheckWiredConnectingMessage(self):\n \"\"\" Returns the wired interface's status message. \"\"\"\n if self.wired.connecting_thread:\n return self.wired.connecting_thread.GetStatus()\n else:\n return False\n @dbus.service.method('org.wicd.daemon.wired')\n def SetWiredProperty(self, prop, value):\n \"\"\" Sets the given property to the given value. \"\"\"\n if self.WiredNetwork:\n if (prop.strip()).endswith(\"script\"):\n print \"Setting script properties through the daemon\" \\\n + \" is not permitted.\"\n return False\n self.WiredNetwork[prop] = misc.Noneify(value)\n return True\n else:\n print 'SetWiredProperty: WiredNetwork does not exist'\n return False\n @dbus.service.method('org.wicd.daemon.wired')\n def GetWiredProperty(self, prop):\n \"\"\" Returns the requested wired property. \"\"\"\n if self.WiredNetwork:\n value = self.WiredNetwork.get(prop)\n return value\n else:\n print 'GetWiredProperty: WiredNetwork does not exist'\n return False\n @dbus.service.method('org.wicd.daemon.wired')\n def HasWiredDriver(self):\n \"\"\" Returns True if a driver is associated with this interface. \"\"\"\n if self.wired.driver:\n return True\n else:\n return False\n @dbus.service.method('org.wicd.daemon.wired')\n def DisconnectWired(self):\n \"\"\" Disconnects the wired network. \"\"\"\n self.SetForcedDisconnect(True)\n self.wired.Disconnect()\n @dbus.service.method('org.wicd.daemon.wired')\n def SetAlwaysShowWiredInterface(self, value):\n \"\"\" Sets always_show_wired_interface to the given value. \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\", \"always_show_wired_interface\", \n misc.to_bool(value))\n config.write(open(self.app_conf, \"w\"))\n self.always_show_wired_interface = misc.to_bool(value)\n @dbus.service.method('org.wicd.daemon.wired')\n def GetAlwaysShowWiredInterface(self):\n \"\"\" Returns always_show_wired_interface \"\"\"\n return bool(self.always_show_wired_interface)\n @dbus.service.method('org.wicd.daemon.wired')\n def CheckPluggedIn(self):\n \"\"\" Returns True if a ethernet cable is present, False otherwise. \"\"\"\n if self.wired.wired_interface and self.wired.wired_interface != \"None\":\n return self.wired.CheckPluggedIn()\n else:\n return None\n @dbus.service.method('org.wicd.daemon.wired')\n def DetectWiredInterface(self):\n \"\"\" Returns an automatically detected wired interface. \"\"\"\n iface = self.wired.DetectWiredInterface()\n if iface:\n print 'automatically detected wired interface ' + iface\n else:\n print \"Couldn't detect a wired interface.\"\n return str(iface)\n @dbus.service.method('org.wicd.daemon.wired')\n def IsWiredUp(self):\n \"\"\" Returns a boolean specifying if wired iface is up or down. \"\"\"\n return self.wired.IsUp()\n @dbus.service.method('org.wicd.daemon.wired')\n def ConnectWired(self):\n \"\"\" Connects to a wired network. \"\"\"\n self.SetForcedDisconnect(False)\n self.wired.before_script = self.GetWiredProperty(\"beforescript\")\n self.wired.after_script = self.GetWiredProperty(\"afterscript\")\n self.wired.disconnect_script = self.GetWiredProperty(\"disconnectscript\")\n self.wired.Connect(self.WiredNetwork, debug=self.debug_mode)\n @dbus.service.method('org.wicd.daemon.config')\n def DisableLogging(self):\n global logging_enabled\n logging_enabled = False\n @dbus.service.method('org.wicd.daemon.config')\n def EnableLogging(self):\n global logging_enabled\n logging_enabled = True\n @dbus.service.method('org.wicd.daemon.config')\n def CreateWiredNetworkProfile(self, profilename, default=False):\n \"\"\" Creates a wired network profile. \"\"\"\n profilename = misc.to_unicode(profilename)\n print \"Creating wired profile for \" + profilename\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n if config.has_section(profilename):\n return False\n config.add_section(profilename)\n config.set(profilename, \"ip\", None)\n config.set(profilename, \"broadcast\", None)\n config.set(profilename, \"netmask\", None)\n config.set(profilename, \"gateway\", None)\n config.set(profilename, \"dns1\", None)\n config.set(profilename, \"dns2\", None)\n config.set(profilename, \"dns3\", None)\n config.set(profilename, \"beforescript\", None)\n config.set(profilename, \"afterscript\", None)\n config.set(profilename, \"disconnectscript\", None)\n config.set(profilename, \"default\", default)\n config.write(open(self.wired_conf, \"w\"))\n return True\n @dbus.service.method('org.wicd.daemon.config')\n def UnsetWiredLastUsed(self):\n \"\"\" Finds the previous lastused network, and sets lastused to False. \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile, \"lastused\"):\n if misc.to_bool(config.get(profile, \"lastused\")):\n config.set(profile, \"lastused\", False)\n config.write(open(self.wired_conf, \"w\"))\n self.SaveWiredNetworkProfile(profile)\n @dbus.service.method('org.wicd.daemon.config')\n def UnsetWiredDefault(self):\n \"\"\" Unsets the default option in the current default wired profile. \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile, \"default\"):\n if misc.to_bool(config.get(profile, \"default\")):\n config.set(profile, \"default\", False)\n config.write(open(self.wired_conf, \"w\"))\n self.SaveWiredNetworkProfile(profile)\n @dbus.service.method('org.wicd.daemon.config')\n def GetDefaultWiredNetwork(self):\n \"\"\" Returns the current default wired network. \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile, \"default\"):\n if misc.to_bool(config.get(profile, \"default\")):\n return profile\n return None\n @dbus.service.method('org.wicd.daemon.config')\n def GetLastUsedWiredNetwork(self):\n \"\"\" Returns the profile of the last used wired network. \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile,\"lastused\"):\n if misc.to_bool(config.get(profile,\"lastused\")):\n return profile\n return None\n @dbus.service.method('org.wicd.daemon.config')\n def DeleteWiredNetworkProfile(self, profilename):\n \"\"\" Deletes a wired network profile. \"\"\"\n profilename = misc.to_unicode(profilename)\n print \"Deleting wired profile for \" + str(profilename)\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n if config.has_section(profilename):\n config.remove_section(profilename)\n else:\n return \"500: Profile does not exist\"\n config.write(open(self.wired_conf, \"w\"))\n return \"100: Profile Deleted\"\n @dbus.service.method('org.wicd.daemon.config')\n def SaveWiredNetworkProfile(self, profilename):\n \"\"\" Writes a wired network profile to disk. \"\"\"\n def write_script_ent(prof, conf, script):\n if not conf.has_option(prof, script):\n conf.set(prof, script, None)\n if profilename == \"\":\n return \"500: Bad Profile name\"\n profilename = misc.to_unicode(profilename)\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n if config.has_section(profilename):\n config.remove_section(profilename)\n config.add_section(profilename)\n for x in self.WiredNetwork:\n config.set(profilename, x, self.WiredNetwork[x])\n write_script_ent(profilename, config, \"beforescript\")\n write_script_ent(profilename, config, \"afterscript\")\n write_script_ent(profilename, config, \"disconnectscript\")\n config.write(open(self.wired_conf, \"w\"))\n return \"100: Profile Written\"\n @dbus.service.method('org.wicd.daemon.config')\n def ReadWiredNetworkProfile(self, profilename):\n \"\"\" Reads a wired network profile in as the currently active profile \"\"\"\n profile = {}\n profilename = misc.to_unicode(profilename)\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n if config.has_section(profilename):\n for x in config.options(profilename):\n profile[x] = misc.Noneify(config.get(profilename, x))\n profile['use_global_dns'] = bool(profile.get('use_global_dns'))\n profile['use_static_dns'] = bool(profile.get('use_static_dns'))\n self.WiredNetwork = profile\n return \"100: Loaded Profile\"\n else:\n self.WiredNetwork = None\n return \"500: Profile Not Found\"\n @dbus.service.method('org.wicd.daemon.config')\n def GetWiredProfileList(self):\n \"\"\" Returns a list of all wired profiles in wired-settings.conf. \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n if config.sections():\n return config.sections()\n else:\n return None\n @dbus.service.method('org.wicd.daemon.config')\n def SaveWirelessNetworkProfile(self, id):\n \"\"\" Writes a wireless profile to disk. \"\"\"\n def write_script_ent(prof, conf, script):\n if not conf.has_option(prof, script):\n conf.set(prof, script, None)\n config = ConfigParser.ConfigParser()\n config.read(self.wireless_conf)\n cur_network = self.LastScan[id]\n bssid_key = cur_network[\"bssid\"]\n essid_key = \"essid:\" + cur_network[\"essid\"]\n if config.has_section(bssid_key):\n config.remove_section(bssid_key)\n config.add_section(bssid_key)\n if cur_network[\"use_settings_globally\"]:\n if config.has_section(essid_key):\n config.remove_section(essid_key)\n config.add_section(essid_key)\n for x in cur_network:\n config.set(bssid_key, x, cur_network[x])\n if cur_network[\"use_settings_globally\"]:\n config.set(essid_key, x, cur_network[x])\n write_script_ent(bssid_key, config, \"beforescript\")\n write_script_ent(bssid_key, config, \"afterscript\")\n write_script_ent(bssid_key, config, \"disconnect\")\n if cur_network[\"use_settings_globally\"]:\n write_script_ent(essid_key, config, \"beforescript\")\n write_script_ent(essid_key, config, \"afterscript\")\n write_script_ent(essid_key, config, \"disconnect\")\n config.write(open(self.wireless_conf, \"w\"))\n @dbus.service.method('org.wicd.daemon.config')\n def SaveWirelessNetworkProperty(self, id, option):\n \"\"\" Writes a particular wireless property to disk. \"\"\"\n if (option.strip()).endswith(\"script\"):\n print 'You cannot save script information to disk through ' + \\\n 'the daemon.'\n return\n cur_network = self.LastScan[id]\n essid_key = \"essid:\" + cur_network[\"essid\"]\n config = ConfigParser.ConfigParser()\n config.read(self.wireless_conf)\n if not config.has_section(cur_network[\"bssid\"]):\n config.add_section(cur_network[\"bssid\"])\n config.set(cur_network[\"bssid\"], option,\n str(cur_network[option]))\n if config.has_section(essid_key):\n if config.get(essid_key, 'use_settings_globally'):\n config.set(essid_key, option, str(cur_network[option]))\n config.write(open(self.wireless_conf, \"w\"))\n @dbus.service.method('org.wicd.daemon.config')\n def RemoveGlobalEssidEntry(self, networkid):\n \"\"\" Removes the global entry for the networkid provided. \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.wireless_conf)\n cur_network = self.LastScan[networkid]\n essid_key = \"essid:\" + cur_network[\"essid\"]\n if config.has_section(essid_key):\n config.remove_section(essid_key)\n config.write(open(self.wireless_conf, \"w\"))\n @dbus.service.method('org.wicd.daemon.config')\n def ReadWirelessNetworkProfile(self, id):\n \"\"\" Reads in wireless profile as the active network \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.wireless_conf)\n cur_network = self.LastScan[id]\n essid_key = \"essid:\" + cur_network[\"essid\"]\n bssid_key = cur_network[\"bssid\"]\n if self.debug_mode:\n print bssid_key\n if config.has_section(essid_key)and \\\n misc.stringToNone(config.get(essid_key, 'use_settings_globally')):\n return self._read_wireless_profile(config, cur_network, \n essid_key)\n elif config.has_section(bssid_key):\n return self._read_wireless_profile(config, cur_network, bssid_key)\n else:\n cur_network[\"has_profile\"] = False\n return \"500: Profile Not Found\"\n def _read_wireless_profile(self, config, cur_network, section):\n cur_network[\"has_profile\"] = True\n if cur_network[\"hidden\"]:\n if config.has_option(section, \"essid\"):\n cur_network[\"essid\"] = config.get(section, \"essid\")\n if cur_network[\"essid\"] in [\"\", \"Hidden\", \"<hidden>\"]:\n cur_network[\"essid\"] = \"<hidden>\"\n for x in config.options(section):\n if not cur_network.has_key(x) or x.endswith(\"script\"):\n cur_network[x] = misc.Noneify(config.get(section, \n x))\n for option in ['use_static_dns', 'use_global_dns', 'encryption',\n 'use_settings_globally']:\n cur_network[option] = bool(cur_network.get(option))\n for key in cur_network:\n cur_network[key] = misc.to_unicode(cur_network[key])\n return \"100: Loaded Profile\"\n @dbus.service.method('org.wicd.daemon.config')\n def WriteWindowSize(self, width, height, win_name):\n \"\"\"Write the desired default window size\"\"\"\n if win_name == \"main\":\n height_str = \"window_height\"\n width_str = \"window_width\"\n else:\n height_str = \"pref_height\"\n width_str = \"pref_width\"\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n if config.has_section(\"Settings\"):\n config.set(\"Settings\", width_str, width)\n config.set(\"Settings\", height_str, height)\n config.write(open(self.app_conf, \"w\"))\n @dbus.service.method('org.wicd.daemon.config')\n def ReadWindowSize(self, win_name):\n \"\"\"Returns a list containing the desired default window size\n Attempts to read the default size from the config file,\n and if that fails, returns a default of 605 x 400.\n \"\"\"\n default_width = default_height = width_str = height_str = -1\n if win_name == \"main\":\n default_width = 605\n default_height = 400\n width_str = \"window_width\"\n height_str = \"window_height\"\n else:\n default_width = 125\n default_height = 590\n width_str = \"pref_width\"\n height_str = \"pref_height\"\n width = height = -1 \n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n if config.has_section(\"Settings\"):\n if config.has_option(\"Settings\", width_str):\n width = config.get(\"Settings\", width_str)\n else:\n width = default_width\n if config.has_option(\"Settings\", height_str):\n height = config.get(\"Settings\", height_str)\n else:\n height = default_height\n size = []\n size.append(int(width))\n size.append(int(height))\n return size\n def __printReturn(self, text, value):\n \"\"\"prints the specified text and value, then returns the value\"\"\"\n if self.debug_mode:\n print ''.join([text, \" \", str(value)])\n return value\n def get_option(self, section, option, default=None):\n \"\"\" Method for returning an option from manager-settings.conf. \n This method will return a given option from a given section\n \"\"\"\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n if not config.has_section(section):\n config.add_section(section)\n if config.has_option(section, option):\n ret = config.get(section, option)\n print ''.join(['found ', option, ' in configuration ', ret])\n else:\n config.set(section, option, default)\n ret = default\n config.write(open(self.app_conf, \"w\"))\n return ret\n def ReadConfig(self):\n \"\"\" Reads the manager-settings.conf file.\n Reads the manager-settings.conf file and loads the stored\n values into memory.\n \"\"\"\n if os.path.isfile(self.app_conf):\n iface = self.DetectWirelessInterface()\n if not iface:\n if self.debug_mode:\n print \"Failed to detect wireless interface, defaulting \" + \\\n \"to wlan0, unless a config entry already exists.\"\n iface = \"wlan0\"\n self.SetWirelessInterface(self.get_option(\"Settings\",\n \"wireless_interface\",\n default=iface))\n iface = self.DetectWiredInterface()\n if not iface:\n if self.debug_mode:\n print \"Failed to detect wired interface, defaulting \" + \\\n \"to eth0, unless a config entry already exists.\"\n iface = \"eth0\"\n self.SetWiredInterface(self.get_option(\"Settings\",\n \"wired_interface\",\n default=iface))\n self.SetWPADriver(self.get_option(\"Settings\", \"wpa_driver\",\n default=\"wext\"))\n self.SetAlwaysShowWiredInterface(self.get_option(\"Settings\",\n \"always_show_wired_interface\",\n default=False))\n self.SetUseGlobalDNS(self.get_option(\"Settings\", \"use_global_dns\",\n default=False))\n dns1 = self.get_option(\"Settings\", \"global_dns_1\", default='None')\n dns2 = self.get_option(\"Settings\", \"global_dns_2\", default='None')\n dns3 = self.get_option(\"Settings\", \"global_dns_3\", default='None')\n self.SetGlobalDNS(dns1, dns2, dns3)\n self.SetAutoReconnect(self.get_option(\"Settings\", \"auto_reconnect\",\n default=True))\n self.SetDebugMode(self.get_option(\"Settings\", \"debug_mode\",\n default=False))\n self.SetWiredAutoConnectMethod(self.get_option(\"Settings\",\n \"wired_connect_mode\",\n default=1))\n self.SetSignalDisplayType(self.get_option(\"Settings\",\n \"signal_display_type\",\n default=0))\n self.SetDHCPClient(self.get_option(\"Settings\", \"dhcp_client\",\n default=0))\n self.SetLinkDetectionTool(self.get_option(\"Settings\",\n \"link_detect_tool\",\n default=0))\n self.SetFlushTool(self.get_option(\"Settings\", \"flush_tool\",\n default=0))\n else:\n print \"Configuration file not found, creating, adding defaults...\"\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.add_section(\"Settings\")\n config.set(\"Settings\", \"wireless_interface\", \"wlan0\")\n config.set(\"Settings\", \"wired_interface\", \"eth0\")\n config.set(\"Settings\", \"always_show_wired_interface\", \"False\")\n config.set(\"Settings\", \"auto_reconnect\", \"True\")\n config.set(\"Settings\", \"debug_mode\", \"False\")\n config.set(\"Settings\", \"wired_connect_mode\", \"1\")\n config.set(\"Settings\", \"signal_display_type\", \"0\")\n config.set(\"Settings\", \"dhcp_client\", \"0\")\n config.set(\"Settings\", \"link_detect_tool\", \"0\")\n config.set(\"Settings\", \"flush_tool\", \"0\")\n config.set(\"Settings\", \"dns1\", \"None\")\n config.set(\"Settings\", \"dns2\", \"None\")\n config.set(\"Settings\", \"dns3\", \"None\")\n iface = self.DetectWirelessInterface()\n if iface is not None:\n config.set(\"Settings\", \"wireless_interface\", iface)\n else:\n print \"Couldn't detect a wireless interface, using wlan0...\"\n config.set(\"Settings\", \"wireless_interface\", \"wlan0\")\n iface = self.DetectWiredInterface()\n if iface is not None:\n config.set(\"Settings\", \"wired_interface\", iface)\n else:\n print \"Couldn't detect a wired interface, using eth0...\"\n config.set(\"Settings\", \"wired_interface\", \"eth0\")\n config.set(\"Settings\", \"wpa_driver\", \"wext\")\n config.write(open(self.app_conf, \"w\"))\n self.SetWirelessInterface(config.get(\"Settings\",\n \"wireless_interface\"))\n self.SetWiredInterface(config.get(\"Settings\",\n \"wired_interface\"))\n self.SetWPADriver(config.get(\"Settings\",\n \"wpa_driver\"))\n self.SetDHCPClient(config.get(\"Settings\", \"dhcp_client\"))\n self.SetLinkDetectionTool(config.get(\"Settings\", \n \"link_detect_tool\"))\n self.SetFlushTool(config.get(\"Settings\", \"flush_tool\"))\n self.SetAlwaysShowWiredInterface(False)\n self.SetAutoReconnect(True)\n self.SetDebugMode(False)\n self.SetWiredAutoConnectMethod(1)\n self.SetSignalDisplayType(0)\n self.SetUseGlobalDNS(False)\n self.SetGlobalDNS(None, None, None)\n if os.path.isfile(self.wireless_conf):\n print \"Wireless configuration file found...\"\n pass\n else:\n print \"Wireless configuration file not found, creating...\"\n open(self.wireless_conf, \"w\").close()\n if os.path.isfile(self.wired_conf):\n print \"Wired configuration file found...\"\n pass\n else:\n print \"Wired configuration file not found, creating a default...\"\n open(self.wired_conf, \"w\").close()\n self.CreateWiredNetworkProfile(\"wired-default\", default=True)\n print \"chmoding configuration files 0600...\"\n os.chmod(self.app_conf, 0600)\n os.chmod(self.wireless_conf, 0600)\n os.chmod(self.wired_conf, 0600)\n print \"chowning configuration files root:root...\"\n os.chown(self.app_conf, 0, 0)\n os.chown(self.wireless_conf, 0, 0)\n os.chown(self.wired_conf, 0, 0)\n print \"Using wired interface...\" + self.GetWiredInterface()\n print \"Using wireless interface...\" + self.GetWirelessInterface()\ndef usage():\n print \"\"\"\nwicd 1.5.3\nwireless (and wired) connection daemon.\nArguments:\n\\t-a\\t--no-autoconnect\\tDon't auto-scan/auto-connect.\n\\t-f\\t--no-daemon\\tDon't daemonize (run in foreground).\n\\t-e\\t--no-stderr\\tDon't redirect stderr.\n\\t-n\\t--no-poll\\tDon't monitor network status.\n\\t-o\\t--no-stdout\\tDon't redirect stdout.\n\\t-h\\t--help\\t\\tPrint this help.\n\"\"\"\ndef daemonize():\n \"\"\" Disconnect from the controlling terminal.\n Fork twice, once to disconnect ourselves from the parent terminal and a\n second time to prevent any files we open from becoming our controlling\n terminal.\n For more info see:\n http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012\n \"\"\"\n try:\n pid = os.fork()\n if pid > 0:\n sys.exit(0)\n except OSError, e:\n print >> sys.stderr, \"Fork #1 failed: %d (%s)\" % (e.errno, e.strerror)\n sys.exit(1)\n os.setsid()\n os.umask(0)\n try:\n pid = os.fork()\n if pid > 0:\n print wpath.pidfile\n dirname = os.path.dirname(wpath.pidfile)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n pidfile = open(wpath.pidfile, 'w')\n pidfile.write(str(pid) + '\\n')\n pidfile.close()\n sys.exit(0)\n except OSError, e:\n print >> sys.stderr, \"Fork #2 failed: %d (%s)\" % (e.errno, e.strerror)\n sys.exit(1)\n sys.stdout.flush()\n sys.stderr.flush()\n os.close(sys.__stdin__.fileno())\n os.close(sys.__stdout__.fileno())\n os.close(sys.__stderr__.fileno())\n sys.stdin = open('/dev/null', 'r')\ndef main(argv):\n \"\"\" The main daemon program.\n Keyword arguments:\n argv -- The arguments passed to the script.\n \"\"\"\n global child_pid\n do_daemonize = True\n redirect_stderr = True\n redirect_stdout = True\n auto_connect = True\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'fenoah',\n ['help', 'no-daemon', 'no-poll', 'no-stderr', 'no-stdout',\n 'no-autoconnect'])\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n no_poll = False\n for o, a in opts:\n if o in ('-h', '--help'):\n usage()\n sys.exit()\n if o in ('-e', '--no-stderr'):\n redirect_stderr = False\n if o in ('-o', '--no-stdout'):\n redirect_stdout = False\n if o in ('-f', '--no-daemon'):\n do_daemonize = False\n if o in ('-a', '--no-autoconnect'):\n auto_connect = False\n if o in ('-n', '--no-poll'):\n no_poll = True\n if do_daemonize: daemonize()\n if redirect_stderr or redirect_stdout: output = LogWriter()\n if redirect_stdout: sys.stdout = output\n if redirect_stderr: sys.stderr = output\n print '---------------------------'\n print 'wicd initializing...'\n print '---------------------------'\n d_bus_name = dbus.service.BusName('org.wicd.daemon', bus=dbus.SystemBus())\n obj = ConnectionWizard(d_bus_name, auto_connect=auto_connect)\n gobject.threads_init()\n if not no_poll:\n (child_pid, x, x, x) = gobject.spawn_async([wpath.lib + \"monitor.py\"], \n flags=gobject.SPAWN_CHILD_INHERITS_STDIN)\n signal.signal(signal.SIGTERM, sigterm_caught)\n mainloop = gobject.MainLoop()\n mainloop.run()\ndef sigterm_caught(sig, frame):\n \"\"\" Called when a SIGTERM is caught, kills monitor.py before exiting. \"\"\"\n global child_pid\n print 'SIGTERM caught, killing wicd-monitor...'\n os.kill(child_pid, signal.SIGTERM)\n print 'Removing PID file...'\n if os.path.exists(wpath.pidfile):\n os.remove(wpath.pidfile)\n print 'Shutting down...'\n sys.exit(0)\nif __name__ == '__main__':\n if os.getuid() != 0:\n print (\"Root priviledges are required for the daemon to run properly.\" +\n \" Exiting.\")\n sys.exit(1)\n main(sys.argv)\n"
] | true |
98,584 |
9ba5268500be70b7962a7bf5811906c6d5429d48
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from subjects.models import Subject
from django.contrib.auth.models import User
from levels.models import Level
from utility.helpers import generate_str
class Chapter(models.Model):
subject = models.ForeignKey(Subject, default='', on_delete=models.CASCADE)
level = models.ForeignKey(Level, default='', on_delete=models.CASCADE)
name = models.CharField(max_length=100, default='')
created_by = models.ForeignKey(User, default='', on_delete=models.CASCADE)
date_created = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.subject.name + ' | ' + self.level.name + ' | ' + self.name
def __str__(self):
return generate_str([self.subject.name, self.level.name, self.name])
|
[
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.db import models\nfrom subjects.models import Subject\nfrom django.contrib.auth.models import User\nfrom levels.models import Level\nfrom utility.helpers import generate_str\n\n\nclass Chapter(models.Model):\n subject = models.ForeignKey(Subject, default='', on_delete=models.CASCADE)\n level = models.ForeignKey(Level, default='', on_delete=models.CASCADE)\n name = models.CharField(max_length=100, default='')\n created_by = models.ForeignKey(User, default='', on_delete=models.CASCADE)\n date_created = models.DateTimeField(auto_now_add=True)\n modified_date = models.DateTimeField(auto_now=True)\n\n def __unicode__(self):\n return self.subject.name + ' | ' + self.level.name + ' | ' + self.name\n\n def __str__(self):\n return generate_str([self.subject.name, self.level.name, self.name])\n",
"from __future__ import unicode_literals\nfrom django.db import models\nfrom subjects.models import Subject\nfrom django.contrib.auth.models import User\nfrom levels.models import Level\nfrom utility.helpers import generate_str\n\n\nclass Chapter(models.Model):\n subject = models.ForeignKey(Subject, default='', on_delete=models.CASCADE)\n level = models.ForeignKey(Level, default='', on_delete=models.CASCADE)\n name = models.CharField(max_length=100, default='')\n created_by = models.ForeignKey(User, default='', on_delete=models.CASCADE)\n date_created = models.DateTimeField(auto_now_add=True)\n modified_date = models.DateTimeField(auto_now=True)\n\n def __unicode__(self):\n return self.subject.name + ' | ' + self.level.name + ' | ' + self.name\n\n def __str__(self):\n return generate_str([self.subject.name, self.level.name, self.name])\n",
"<import token>\n\n\nclass Chapter(models.Model):\n subject = models.ForeignKey(Subject, default='', on_delete=models.CASCADE)\n level = models.ForeignKey(Level, default='', on_delete=models.CASCADE)\n name = models.CharField(max_length=100, default='')\n created_by = models.ForeignKey(User, default='', on_delete=models.CASCADE)\n date_created = models.DateTimeField(auto_now_add=True)\n modified_date = models.DateTimeField(auto_now=True)\n\n def __unicode__(self):\n return self.subject.name + ' | ' + self.level.name + ' | ' + self.name\n\n def __str__(self):\n return generate_str([self.subject.name, self.level.name, self.name])\n",
"<import token>\n\n\nclass Chapter(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __unicode__(self):\n return self.subject.name + ' | ' + self.level.name + ' | ' + self.name\n\n def __str__(self):\n return generate_str([self.subject.name, self.level.name, self.name])\n",
"<import token>\n\n\nclass Chapter(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __unicode__(self):\n return self.subject.name + ' | ' + self.level.name + ' | ' + self.name\n <function token>\n",
"<import token>\n\n\nclass Chapter(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,585 |
4e0c0cf342f871137e4819b49f8ec2997e837a19
|
from django.contrib import admin
from .models import Categoria,Proveedor
# Register your models here.
admin.site.register(Categoria)
admin.site.register(Proveedor)
|
[
"from django.contrib import admin\nfrom .models import Categoria,Proveedor\n# Register your models here.\n\n\nadmin.site.register(Categoria)\nadmin.site.register(Proveedor)\n",
"from django.contrib import admin\nfrom .models import Categoria, Proveedor\nadmin.site.register(Categoria)\nadmin.site.register(Proveedor)\n",
"<import token>\nadmin.site.register(Categoria)\nadmin.site.register(Proveedor)\n",
"<import token>\n<code token>\n"
] | false |
98,586 |
95cf3a03537353d0c72c4c8d777c3da27991f594
|
#Importing libraries
import ccxt
import numpy as np
import pandas as pd
from datetime import datetime
#Fetching historical data from Gemini
gemini = ccxt.gemini()
since = '01.01.2021' #MM.DD.YYYY
def get_ror(k):
dt_obj = datetime.strptime(since ,'%m.%d.%Y')
startdate = dt_obj.timestamp() * 1000
stock_ohlcv = gemini.fetch_ohlcv("BTC/USD", timeframe = '1d', since = startdate)
df = pd.DataFrame(stock_ohlcv, columns=['date','open','high','low','close','volume'])
df['date'] = pd.to_datetime(df['date'], unit='ms')
df.set_index('date',inplace=True)
df['range'] = (df['high'] - df['low']) * k
df['target'] = df['open'] + df['range'].shift(1)
fee = 0.0035
df['ror'] = np.where(df['high'] > df['target'],
df['close'] / df['target'] - fee,
1)
ror = df['ror'].cumprod()[-2]
return ror
#Increasing k by 0.01 to see which value returns the highest %
for k in np.arange(0.00,1.00,0.01):
ror = get_ror(k)
if ror > 1:
print((k, ror))
|
[
"#Importing libraries\nimport ccxt\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\n\n#Fetching historical data from Gemini\ngemini = ccxt.gemini()\nsince = '01.01.2021' #MM.DD.YYYY\n\n\ndef get_ror(k):\n dt_obj = datetime.strptime(since ,'%m.%d.%Y')\n startdate = dt_obj.timestamp() * 1000\n stock_ohlcv = gemini.fetch_ohlcv(\"BTC/USD\", timeframe = '1d', since = startdate)\n \n df = pd.DataFrame(stock_ohlcv, columns=['date','open','high','low','close','volume'])\n df['date'] = pd.to_datetime(df['date'], unit='ms') \n df.set_index('date',inplace=True)\n \n df['range'] = (df['high'] - df['low']) * k\n df['target'] = df['open'] + df['range'].shift(1)\n \n fee = 0.0035\n\n df['ror'] = np.where(df['high'] > df['target'],\n df['close'] / df['target'] - fee,\n 1)\n \n ror = df['ror'].cumprod()[-2]\n return ror\n\n#Increasing k by 0.01 to see which value returns the highest %\nfor k in np.arange(0.00,1.00,0.01):\n ror = get_ror(k)\n if ror > 1:\n print((k, ror))\n",
"import ccxt\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\ngemini = ccxt.gemini()\nsince = '01.01.2021'\n\n\ndef get_ror(k):\n dt_obj = datetime.strptime(since, '%m.%d.%Y')\n startdate = dt_obj.timestamp() * 1000\n stock_ohlcv = gemini.fetch_ohlcv('BTC/USD', timeframe='1d', since=startdate\n )\n df = pd.DataFrame(stock_ohlcv, columns=['date', 'open', 'high', 'low',\n 'close', 'volume'])\n df['date'] = pd.to_datetime(df['date'], unit='ms')\n df.set_index('date', inplace=True)\n df['range'] = (df['high'] - df['low']) * k\n df['target'] = df['open'] + df['range'].shift(1)\n fee = 0.0035\n df['ror'] = np.where(df['high'] > df['target'], df['close'] / df[\n 'target'] - fee, 1)\n ror = df['ror'].cumprod()[-2]\n return ror\n\n\nfor k in np.arange(0.0, 1.0, 0.01):\n ror = get_ror(k)\n if ror > 1:\n print((k, ror))\n",
"<import token>\ngemini = ccxt.gemini()\nsince = '01.01.2021'\n\n\ndef get_ror(k):\n dt_obj = datetime.strptime(since, '%m.%d.%Y')\n startdate = dt_obj.timestamp() * 1000\n stock_ohlcv = gemini.fetch_ohlcv('BTC/USD', timeframe='1d', since=startdate\n )\n df = pd.DataFrame(stock_ohlcv, columns=['date', 'open', 'high', 'low',\n 'close', 'volume'])\n df['date'] = pd.to_datetime(df['date'], unit='ms')\n df.set_index('date', inplace=True)\n df['range'] = (df['high'] - df['low']) * k\n df['target'] = df['open'] + df['range'].shift(1)\n fee = 0.0035\n df['ror'] = np.where(df['high'] > df['target'], df['close'] / df[\n 'target'] - fee, 1)\n ror = df['ror'].cumprod()[-2]\n return ror\n\n\nfor k in np.arange(0.0, 1.0, 0.01):\n ror = get_ror(k)\n if ror > 1:\n print((k, ror))\n",
"<import token>\n<assignment token>\n\n\ndef get_ror(k):\n dt_obj = datetime.strptime(since, '%m.%d.%Y')\n startdate = dt_obj.timestamp() * 1000\n stock_ohlcv = gemini.fetch_ohlcv('BTC/USD', timeframe='1d', since=startdate\n )\n df = pd.DataFrame(stock_ohlcv, columns=['date', 'open', 'high', 'low',\n 'close', 'volume'])\n df['date'] = pd.to_datetime(df['date'], unit='ms')\n df.set_index('date', inplace=True)\n df['range'] = (df['high'] - df['low']) * k\n df['target'] = df['open'] + df['range'].shift(1)\n fee = 0.0035\n df['ror'] = np.where(df['high'] > df['target'], df['close'] / df[\n 'target'] - fee, 1)\n ror = df['ror'].cumprod()[-2]\n return ror\n\n\nfor k in np.arange(0.0, 1.0, 0.01):\n ror = get_ror(k)\n if ror > 1:\n print((k, ror))\n",
"<import token>\n<assignment token>\n\n\ndef get_ror(k):\n dt_obj = datetime.strptime(since, '%m.%d.%Y')\n startdate = dt_obj.timestamp() * 1000\n stock_ohlcv = gemini.fetch_ohlcv('BTC/USD', timeframe='1d', since=startdate\n )\n df = pd.DataFrame(stock_ohlcv, columns=['date', 'open', 'high', 'low',\n 'close', 'volume'])\n df['date'] = pd.to_datetime(df['date'], unit='ms')\n df.set_index('date', inplace=True)\n df['range'] = (df['high'] - df['low']) * k\n df['target'] = df['open'] + df['range'].shift(1)\n fee = 0.0035\n df['ror'] = np.where(df['high'] > df['target'], df['close'] / df[\n 'target'] - fee, 1)\n ror = df['ror'].cumprod()[-2]\n return ror\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<code token>\n"
] | false |
98,587 |
0ccc345854890a1f65e688ec36529e1b13169511
|
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_excel('GitHub_commits.xlsx')
df['DateTime'] = pd.to_datetime(df['DateTime'])
df['Date'] = df['DateTime'].dt.date
df['Time'] = df['DateTime'].dt.time
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid()
plt.style.use('seaborn')
plt.plot(df['Date'],df['Time'],'bo', alpha=.5)
plt.title('GitHub Commits')
# format x-axis ticks as dates
import matplotlib.dates as mdates
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('\n%Y')
moFmt = mdates.DateFormatter('%m') # (%b for Jan, Feb Mar; %m for 01 02 03)
ax.xaxis.set_major_locator(years)
ax.xaxis.set_minor_locator(months)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_formatter(moFmt)
plt.savefig('commit_history.png')
|
[
"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndf = pd.read_excel('GitHub_commits.xlsx')\n\ndf['DateTime'] = pd.to_datetime(df['DateTime'])\ndf['Date'] = df['DateTime'].dt.date\ndf['Time'] = df['DateTime'].dt.time\n\n\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.grid()\nplt.style.use('seaborn')\n\nplt.plot(df['Date'],df['Time'],'bo', alpha=.5)\nplt.title('GitHub Commits')\n\n# format x-axis ticks as dates\nimport matplotlib.dates as mdates\nyears = mdates.YearLocator() # every year\nmonths = mdates.MonthLocator() # every month\nyearsFmt = mdates.DateFormatter('\\n%Y')\nmoFmt = mdates.DateFormatter('%m') # (%b for Jan, Feb Mar; %m for 01 02 03)\nax.xaxis.set_major_locator(years)\nax.xaxis.set_minor_locator(months)\nax.xaxis.set_major_formatter(yearsFmt)\nax.xaxis.set_minor_formatter(moFmt)\n\n\nplt.savefig('commit_history.png')\n",
"import pandas as pd\nimport matplotlib.pyplot as plt\ndf = pd.read_excel('GitHub_commits.xlsx')\ndf['DateTime'] = pd.to_datetime(df['DateTime'])\ndf['Date'] = df['DateTime'].dt.date\ndf['Time'] = df['DateTime'].dt.time\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.grid()\nplt.style.use('seaborn')\nplt.plot(df['Date'], df['Time'], 'bo', alpha=0.5)\nplt.title('GitHub Commits')\nimport matplotlib.dates as mdates\nyears = mdates.YearLocator()\nmonths = mdates.MonthLocator()\nyearsFmt = mdates.DateFormatter('\\n%Y')\nmoFmt = mdates.DateFormatter('%m')\nax.xaxis.set_major_locator(years)\nax.xaxis.set_minor_locator(months)\nax.xaxis.set_major_formatter(yearsFmt)\nax.xaxis.set_minor_formatter(moFmt)\nplt.savefig('commit_history.png')\n",
"<import token>\ndf = pd.read_excel('GitHub_commits.xlsx')\ndf['DateTime'] = pd.to_datetime(df['DateTime'])\ndf['Date'] = df['DateTime'].dt.date\ndf['Time'] = df['DateTime'].dt.time\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.grid()\nplt.style.use('seaborn')\nplt.plot(df['Date'], df['Time'], 'bo', alpha=0.5)\nplt.title('GitHub Commits')\n<import token>\nyears = mdates.YearLocator()\nmonths = mdates.MonthLocator()\nyearsFmt = mdates.DateFormatter('\\n%Y')\nmoFmt = mdates.DateFormatter('%m')\nax.xaxis.set_major_locator(years)\nax.xaxis.set_minor_locator(months)\nax.xaxis.set_major_formatter(yearsFmt)\nax.xaxis.set_minor_formatter(moFmt)\nplt.savefig('commit_history.png')\n",
"<import token>\n<assignment token>\nax.grid()\nplt.style.use('seaborn')\nplt.plot(df['Date'], df['Time'], 'bo', alpha=0.5)\nplt.title('GitHub Commits')\n<import token>\n<assignment token>\nax.xaxis.set_major_locator(years)\nax.xaxis.set_minor_locator(months)\nax.xaxis.set_major_formatter(yearsFmt)\nax.xaxis.set_minor_formatter(moFmt)\nplt.savefig('commit_history.png')\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
98,588 |
342b0a87f7623d1b421327887b1717daa2f27c21
|
from flask import redirect, request, make_response, abort, jsonify
from flask_login import LoginManager, login_user, logout_user, current_user, login_required
from flask import Flask, render_template
from data import db_session, objects, users
from flask_restful import abort, Api
import os
from resources import objects_resorce, users_resource
from algorithms.password_algorithms import chek_password_combination
from algorithms.phone_number_algorithms import check_phone
import logging
from forms import RegisterForm, LoginForm, ObjectsForm, SortAscending
from forms import SortDescending, EditProfileForm, FindObjectForm, ConfirmPasswordForm
db_session.global_init("db/blogs.sqlite")
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config['SECRET_KEY'] = 'DinoTradeTheBest123_secret_key'
api = Api(app)
api.add_resource(objects_resorce.ObjectsListResource, '/api/v0.1/objects')
api.add_resource(objects_resorce.ObjResource, '/api/v0.1/objects/<int:obj_id>')
# ------------------------------------------------
api.add_resource(users_resource.UsersListResource, '/api/v0.1/users')
api.add_resource(users_resource.UsersResource, '/api/v0.1/users/<int:user_id>')
ALLOWED_TYPES = ['jpg', 'png', 'jpeg', 'gif']
login_manager = LoginManager()
login_manager.init_app(app)
UPLOAD_FOLDER = os.getcwd() + '/static/img'
files = []
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def return_date(user):
months = {
1: 'января',
2: 'февраля',
3: 'марта',
4: 'апреля',
5: 'мая',
6: 'июня',
7: 'июля',
8: 'августа',
9: 'сентября',
10: 'октября',
11: 'ноября',
12: 'декабря'
}
return f'{user.created_date.date().day} {months[user.created_date.date().month]}' \
f' {user.created_date.date().year} года'
def log():
logging.info('Info')
logging.warning('Warning')
logging.error('Error')
logging.critical('Critical or Fatal')
@login_manager.user_loader
def load_user(user_id):
sessions = db_session.create_session()
return sessions.query(users.User).get(user_id)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
def open_file(id, type):
file = request.files['file']
if file.filename.split('.')[-1] not in ALLOWED_TYPES:
return False, False
if type == 'avatar':
path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\')) + '/avatar_' + str(id) + '/'
try:
os.mkdir(path_of_folder)
except FileExistsError:
pass
filename = path_of_folder + file.filename
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return filename
if type == 'object':
path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\')) + '/object_' + str(id) + '/'
try:
os.mkdir(path_of_folder)
except FileExistsError:
pass
filename = path_of_folder + file.filename
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return file, '../' + '/'.join(filename.split('/')[-4:])
@app.route('/object_delete/<int:id>', methods=['GET', 'POST'])
@login_required
def object_delete(id):
sessions = db_session.create_session()
obj = sessions.query(objects.Object).filter(
objects.Object.id == id).first()
if obj:
sessions.delete(obj)
sessions.commit()
else:
abort(404)
return redirect('/')
@app.route('/edit_object/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_obj(id):
form = ObjectsForm()
if request.method == "GET":
sessions = db_session.create_session()
obj = sessions.query(objects.Object).filter(objects.Object.id == id).first()
if obj:
form.name.data = obj.name
form.price.data = obj.price
form.description.data = obj.description
form.category.data = obj.category
form.sold.data = obj.sold
else:
abort(404)
if form.validate_on_submit():
sessions = db_session.create_session()
obj = sessions.query(objects.Object).filter(objects.Object.id == id).first()
if not form.rules.data:
return render_template('add_objects.html',
title='Новое объявление',
form=form,
files=files,
id=None, rules_message='Вы должны согласиться с '
'правилами сайта!')
if obj:
obj.name = form.name.data
if form.price.data > 10000000000:
return render_template('add_objects.html',
title='Новое объявление',
form=form,
files=files,
id=None, incor_ln='Мы не можем брать ответственность'
' за столь серьёзную сделку')
elif form.price.data < 0:
return render_template('add_objects.html',
title='Новое объявление',
form=form,
files=files,
id=None, incor_ln='Укажите корректную цену')
else:
obj.price = form.price.data
obj.name_for_find = form.name.data.lower()
obj.description = form.description.data
obj.category = form.category.data
obj.sold = form.sold.data
sessions.commit()
return redirect("/")
else:
abort(404)
return render_template('add_objects.html', title='Редактирование объекта', form=form, id=id)
@app.route('/change_avatar', methods=['GET', 'POST'])
@login_required
def change_avatar():
if request.method == 'POST':
session = db_session.create_session()
filename = open_file(current_user.id, 'avatar')
if not filename[0]:
files = current_user.avatar
return render_template('change_avatar.html', title='Смена аватарки', files=files)
current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])
session.merge(current_user)
session.commit()
files = current_user.avatar
return render_template('change_avatar.html', title='Смена аватарки', files=files)
@app.route('/users_list')
@login_required
def users_list():
sessions = db_session.create_session()
users_list = sessions.query(users.User).all()
return render_template('users_list.html',
users_list=users_list,
title='Список всех пользователей')
@app.route('/profile/<int:id>')
def profile(id):
sessions = db_session.create_session()
user = sessions.query(users.User).filter(users.User.id == id).first()
not_sold_objs = sessions.query(objects.Object).filter(objects.Object.sold == 0,
objects.Object.user_id == user.id)
sold_objs = sessions.query(objects.Object).filter(objects.Object.sold == 1,
objects.Object.user_id == user.id)
if request.method == 'POST':
session = db_session.create_session()
filename = open_file(current_user.id, 'avatar')
user.avatar = '../' + '/'.join(filename.split('/')[-4:])
session.merge(user)
session.commit()
files = user.avatar
if str(user.objects) == '[]':
kolvo = 0
else:
kolvo = len(str(user.objects).split('|, '))
return render_template('profile_page.html', kolvo=kolvo, title=user.name,
files=files, id=id, user=user,
not_sold_objs=not_sold_objs,
sold_objs=sold_objs,
date=return_date(user))
@app.route('/confirm_password/<int:id>', methods=['GET', 'POST'])
@login_required
def confirm_password(id):
form = ConfirmPasswordForm()
sessions = db_session.create_session()
new = sessions.query(users.User).filter(users.User.id == id).first()
if form.validate_on_submit():
if form.password.data == new.password:
return redirect(f'/edit_profile/{new.id}')
else:
return render_template('confirm_password.html',
message='Неправильный пароль',
title='Подтверждение пароля',
form=form)
return render_template('confirm_password.html',
title='Подтверждение пароля',
form=form)
@app.route('/obj/<int:id>', methods=['GET', 'POST'])
def show_obj(id):
session = db_session.create_session()
obj = session.query(objects.Object).filter(objects.Object.id == id).first()
if request.method == 'POST':
file, filename = open_file(id, 'object')
if not file:
files = obj.pictures.split()
return render_template('object_page.html', files=files,
author=obj.user,
object=obj,
title=f'Объявление {obj.name}',
date=return_date(obj.user))
if obj:
if filename not in obj.pictures:
obj.pictures = str(obj.pictures) + ' ' + filename + ' '
session.merge(obj)
session.commit()
files = obj.pictures.split()
return render_template('object_page.html', files=files,
author=obj.user,
object=obj,
title=f'Объявление {obj.name}',
date=return_date(obj.user))
@app.route('/object_delete_photos/<int:id>', methods=['GET', 'POST'])
@login_required
def delete_photos(id):
session = db_session.create_session()
obj = session.query(objects.Object).filter(objects.Object.id == id).first()
obj.pictures = ' '
session.commit()
return redirect(f'/obj/{id}')
@app.route('/edit_profile/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_profile(id):
if str(current_user.objects) == '[]':
kolvo = 0
else:
kolvo = len(str(current_user.objects).split('|, '))
form = EditProfileForm()
if request.method == 'GET':
sessions = db_session.create_session()
new = sessions.query(users.User).filter(users.User.id == id).first()
if new:
form.new_name.data = new.name
form.new_email.data = new.email
form.new_password.data = new.password
form.new_town.data = new.town
form.new_phone.data = new.phone
if form.validate_on_submit():
sessions = db_session.create_session()
new = sessions.query(users.User).filter(users.User.id == id).first()
if new:
if not chek_password_combination(form.new_password.data):
return render_template('edit_profile.html',
form=form, title='Регистрация',
pass_message="Слишком слабый пароль")
if form.new_password.data != form.new_password_again.data:
return render_template('edit_profile.html', name=current_user.name,
email=current_user.email,
password=current_user.password, town=current_user.town,
phone=current_user.phone, message='Пароли не совпадают',
title='Редактирование профиля', form=form)
if not check_phone(form.new_phone.data)[0]:
return render_template('edit_profile.html',
form=form, title='Регистрация',
phone_message=check_phone(form.new_phone.data)[1])
if sessions.query(users.User).filter(users.User.email == form.new_email.data,
form.new_email.data != current_user.email
).first():
return render_template('edit_profile.html',
form=form, title='Регистрация',
email_message="Пользователь с такой почтой уже существует")
else:
new.name = form.new_name.data
new.email = form.new_email.data
new.password = form.new_password.data
new.town = form.new_town.data
new.phone = form.new_phone.data
sessions.commit()
return redirect(f'/profile/{current_user.id}')
else:
abort(404)
return render_template('edit_profile.html', name=current_user.name, email=current_user.email,
password=current_user.password, town=current_user.town,
phone=current_user.phone,
kolvo=kolvo, title='Редактирование профиля', form=form)
@app.route('/add_obj', methods=['GET', 'POST'])
@login_required
def add_obj():
form = ObjectsForm()
if form.validate_on_submit():
sessions = db_session.create_session()
if not form.rules.data:
return render_template('add_objects.html',
title='Новое объявление',
form=form,
files=files,
id=None, rules_message='Вы должны согласиться с '
'правилами сайта!')
obj = objects.Object()
obj.name = form.name.data
obj.name_for_find = form.name.data.lower()
if form.price.data > 10000000000:
return render_template('add_objects.html',
title='Новое объявление',
form=form,
files=files,
id=None, incor_ln='Мы не можем брать ответственность'
' за столь серьёзную сделку')
elif form.price.data <= 0:
print(form.price.data)
return render_template('add_objects.html',
title='Новое объявление',
form=form,
files=files,
id=None, incor_ln='Укажите корректную цену')
else:
obj.price = form.price.data
obj.description = form.description.data
obj.category = form.category.data
obj.sold = form.sold.data
current_user.objects.append(obj)
sessions.merge(current_user)
sessions.commit()
return redirect('/')
return render_template('add_objects.html',
title='Новое объявление',
form=form,
files=files,
id=None)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
sessions = db_session.create_session()
user = sessions.query(users.User).filter(users.User.email == form.email.data).first()
if user and user.password == form.password.data:
if user.block:
return render_template('login.html',
message='Ваша страница заблокирована'
' за нарушение правил сайта.',
title='Вход', form=form)
login_user(user, remember=form.remember_me.data)
return redirect('/')
return render_template('login.html', message='Неправильный логин или пароль',
title='Вход', form=form)
return render_template('login.html', title='Вход', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect('/')
@app.route('/block/<int:id>')
def block(id):
session = db_session.create_session()
user = session.query(users.User).filter(users.User.id == id).first()
user.block = True
session.merge(current_user)
session.commit()
return redirect('/')
@app.route('/unblock/<int:id>')
def unblock(id):
session = db_session.create_session()
user = session.query(users.User).filter(users.User.id == id).first()
user.block = False
session.merge(current_user)
session.commit()
return redirect('/')
@app.route('/promote/<int:id>')
def promote(id):
session = db_session.create_session()
user = session.query(users.User).filter(users.User.id == id).first()
user.admin = 1
session.merge(current_user)
session.commit()
return redirect('/')
@app.route('/drop/<int:id>')
def drop(id):
session = db_session.create_session()
user = session.query(users.User).filter(users.User.id == id).first()
user.admin = 0
session.merge(current_user)
session.commit()
return redirect('/')
@app.route('/register', methods=['GET', 'POST'])
def reqister():
form = RegisterForm()
if form.validate_on_submit():
if not chek_password_combination(form.password.data):
return render_template('register.html',
form=form, title='Регистрация',
pass_message="Слишком слабый пароль")
if form.password.data != form.password_again.data:
return render_template('register.html',
form=form, title='Регистрация',
pass_message="Пароли не совпадают")
if not check_phone(form.phone.data)[0]:
return render_template('register.html',
form=form, title='Регистрация',
phone_message=check_phone(form.phone.data)[1])
sessions = db_session.create_session()
if sessions.query(users.User).filter(users.User.email == form.email.data).first():
return render_template('register.html',
form=form, title='Регистрация',
email_message="Пользователь с такой почтой уже существует")
user = users.User(
name=form.name.data,
email=form.email.data,
password=form.password.data,
town=form.town.data,
phone=form.phone.data
)
user.set_password(form.password.data)
sessions.add(user)
sessions.commit()
return redirect('/login')
return render_template('register.html', title='Регистрация', form=form)
@app.route('/rules', methods=['GET', 'POST'])
def rules():
return render_template('rules.html', title='Правила')
@app.route('/', methods=['GET', 'POST'])
@app.route('/index/<category>', methods=['GET', 'POST'])
def main_page(category='Всекатегории'):
form = FindObjectForm()
sort_asc_form = SortAscending()
sort_desc_form = SortDescending()
sessions = db_session.create_session()
what_we_want_to_find = ''
if sort_desc_form.sort_descending.data:
if category != 'Всекатегории':
objs = sessions.query(objects.Object).filter(objects.Object.sold == 0,
objects.Object.category == category) \
.order_by(
objects.Object.price.desc())
else:
objs = sessions.query(objects.Object).filter(objects.Object.sold == 0).order_by(
objects.Object.price.desc())
return render_template('main_page.html', category=category, current_user=current_user,
title='DinoTrade', objects=objs, form=form,
sort_asc_form=sort_asc_form, sort_desc_form=sort_desc_form,
name='', find=False)
if sort_asc_form.sort_ascending.data:
if category != 'Всекатегории':
objs = sessions.query(objects.Object).filter(objects.Object.sold == 0,
objects.Object.category == category) \
.order_by(objects.Object.price)
else:
objs = sessions.query(objects.Object).filter(objects.Object.sold == 0).order_by(
objects.Object.price)
return render_template('main_page.html', category=category, current_user=current_user,
title='DinoTrade', objects=objs, form=form,
sort_asc_form=sort_asc_form, sort_desc_form=sort_desc_form,
name='', find=False)
if form.find_line.data:
what_we_want_to_find = form.find_line.data
if request.method == 'GET':
if category != 'Всекатегории':
objs = sessions.query(objects.Object).filter(objects.Object.sold == 0,
objects.Object.category == category)
else:
objs = sessions.query(objects.Object).filter(objects.Object.sold == 0)
return render_template('main_page.html', category=category, current_user=current_user,
title='DinoTrade', objects=objs, form=form,
sort_asc_form=sort_asc_form, sort_desc_form=sort_desc_form,
name='', find=False)
if form.validate_on_submit():
objs = sessions.query(objects.Object).filter(
objects.Object.name_for_find.like(
f'%{what_we_want_to_find.lower()}%'), objects.Object.sold == 0)
return render_template('main_page.html',
category=category,
current_user=current_user,
title='DinoTrade', sort_asc_form=sort_asc_form,
sort_desc_form=sort_desc_form,
objects=objs, form=form)
if __name__ == '__main__':
app.run(port=8080, host='127.0.0.1')
# log()
|
[
"from flask import redirect, request, make_response, abort, jsonify\nfrom flask_login import LoginManager, login_user, logout_user, current_user, login_required\nfrom flask import Flask, render_template\nfrom data import db_session, objects, users\nfrom flask_restful import abort, Api\nimport os\nfrom resources import objects_resorce, users_resource\nfrom algorithms.password_algorithms import chek_password_combination\nfrom algorithms.phone_number_algorithms import check_phone\nimport logging\nfrom forms import RegisterForm, LoginForm, ObjectsForm, SortAscending\nfrom forms import SortDescending, EditProfileForm, FindObjectForm, ConfirmPasswordForm\n\ndb_session.global_init(\"db/blogs.sqlite\")\napp = Flask(__name__)\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\napp.config['SECRET_KEY'] = 'DinoTradeTheBest123_secret_key'\napi = Api(app)\napi.add_resource(objects_resorce.ObjectsListResource, '/api/v0.1/objects')\napi.add_resource(objects_resorce.ObjResource, '/api/v0.1/objects/<int:obj_id>')\n# ------------------------------------------------\napi.add_resource(users_resource.UsersListResource, '/api/v0.1/users')\napi.add_resource(users_resource.UsersResource, '/api/v0.1/users/<int:user_id>')\nALLOWED_TYPES = ['jpg', 'png', 'jpeg', 'gif']\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nUPLOAD_FOLDER = os.getcwd() + '/static/img'\nfiles = []\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\ndef return_date(user):\n months = {\n 1: 'января',\n 2: 'февраля',\n 3: 'марта',\n 4: 'апреля',\n 5: 'мая',\n 6: 'июня',\n 7: 'июля',\n 8: 'августа',\n 9: 'сентября',\n 10: 'октября',\n 11: 'ноября',\n 12: 'декабря'\n }\n return f'{user.created_date.date().day} {months[user.created_date.date().month]}' \\\n f' {user.created_date.date().year} года'\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n sessions = db_session.create_session()\n return sessions.query(users.User).get(user_id)\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(\n objects.Object.id == id).first()\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == \"GET\":\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first()\n if not form.rules.data:\n return render_template('add_objects.html',\n title='Новое объявление',\n form=form,\n files=files,\n id=None, rules_message='Вы должны согласиться с '\n 'правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html',\n title='Новое объявление',\n form=form,\n files=files,\n id=None, incor_ln='Мы не можем брать ответственность'\n ' за столь серьёзную сделку')\n elif form.price.data < 0:\n return render_template('add_objects.html',\n title='Новое объявление',\n form=form,\n files=files,\n id=None, incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect(\"/\")\n else:\n abort(404)\n return render_template('add_objects.html', title='Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки', files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html',\n users_list=users_list,\n title='Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.sold == 0,\n objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold == 1,\n objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.name,\n files=files, id=id, user=user,\n not_sold_objs=not_sold_objs,\n sold_objs=sold_objs,\n date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html',\n message='Неправильный пароль',\n title='Подтверждение пароля',\n form=form)\n return render_template('confirm_password.html',\n title='Подтверждение пароля',\n form=form)\n\n\[email protected]('/obj/<int:id>', methods=['GET', 'POST'])\ndef show_obj(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n if request.method == 'POST':\n file, filename = open_file(id, 'object')\n if not file:\n files = obj.pictures.split()\n return render_template('object_page.html', files=files,\n author=obj.user,\n object=obj,\n title=f'Объявление {obj.name}',\n date=return_date(obj.user))\n if obj:\n if filename not in obj.pictures:\n obj.pictures = str(obj.pictures) + ' ' + filename + ' '\n session.merge(obj)\n session.commit()\n files = obj.pictures.split()\n return render_template('object_page.html', files=files,\n author=obj.user,\n object=obj,\n title=f'Объявление {obj.name}',\n date=return_date(obj.user))\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\[email protected]('/edit_profile/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(id):\n if str(current_user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(current_user.objects).split('|, '))\n form = EditProfileForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n form.new_name.data = new.name\n form.new_email.data = new.email\n form.new_password.data = new.password\n form.new_town.data = new.town\n form.new_phone.data = new.phone\n if form.validate_on_submit():\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n if not chek_password_combination(form.new_password.data):\n return render_template('edit_profile.html',\n form=form, title='Регистрация',\n pass_message=\"Слишком слабый пароль\")\n if form.new_password.data != form.new_password_again.data:\n return render_template('edit_profile.html', name=current_user.name,\n email=current_user.email,\n password=current_user.password, town=current_user.town,\n phone=current_user.phone, message='Пароли не совпадают',\n title='Редактирование профиля', form=form)\n if not check_phone(form.new_phone.data)[0]:\n return render_template('edit_profile.html',\n form=form, title='Регистрация',\n phone_message=check_phone(form.new_phone.data)[1])\n if sessions.query(users.User).filter(users.User.email == form.new_email.data,\n form.new_email.data != current_user.email\n ).first():\n return render_template('edit_profile.html',\n form=form, title='Регистрация',\n email_message=\"Пользователь с такой почтой уже существует\")\n else:\n new.name = form.new_name.data\n new.email = form.new_email.data\n new.password = form.new_password.data\n new.town = form.new_town.data\n new.phone = form.new_phone.data\n sessions.commit()\n return redirect(f'/profile/{current_user.id}')\n else:\n abort(404)\n return render_template('edit_profile.html', name=current_user.name, email=current_user.email,\n password=current_user.password, town=current_user.town,\n phone=current_user.phone,\n kolvo=kolvo, title='Редактирование профиля', form=form)\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html',\n title='Новое объявление',\n form=form,\n files=files,\n id=None, rules_message='Вы должны согласиться с '\n 'правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html',\n title='Новое объявление',\n form=form,\n files=files,\n id=None, incor_ln='Мы не можем брать ответственность'\n ' за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html',\n title='Новое объявление',\n form=form,\n files=files,\n id=None, incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html',\n title='Новое объявление',\n form=form,\n files=files,\n id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html',\n message='Ваша страница заблокирована'\n ' за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message='Неправильный логин или пароль',\n title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\[email protected]('/logout')\ndef logout():\n logout_user()\n return redirect('/')\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html',\n form=form, title='Регистрация',\n pass_message=\"Слишком слабый пароль\")\n if form.password.data != form.password_again.data:\n return render_template('register.html',\n form=form, title='Регистрация',\n pass_message=\"Пароли не совпадают\")\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html',\n form=form, title='Регистрация',\n phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email.data).first():\n return render_template('register.html',\n form=form, title='Регистрация',\n email_message=\"Пользователь с такой почтой уже существует\")\n user = users.User(\n name=form.name.data,\n email=form.email.data,\n password=form.password.data,\n town=form.town.data,\n phone=form.phone.data\n )\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\[email protected]('/rules', methods=['GET', 'POST'])\ndef rules():\n return render_template('rules.html', title='Правила')\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/index/<category>', methods=['GET', 'POST'])\ndef main_page(category='Всекатегории'):\n form = FindObjectForm()\n sort_asc_form = SortAscending()\n sort_desc_form = SortDescending()\n sessions = db_session.create_session()\n what_we_want_to_find = ''\n if sort_desc_form.sort_descending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.sold == 0,\n objects.Object.category == category) \\\n .order_by(\n objects.Object.price.desc())\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.sold == 0).order_by(\n objects.Object.price.desc())\n return render_template('main_page.html', category=category, current_user=current_user,\n title='DinoTrade', objects=objs, form=form,\n sort_asc_form=sort_asc_form, sort_desc_form=sort_desc_form,\n name='', find=False)\n if sort_asc_form.sort_ascending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.sold == 0,\n objects.Object.category == category) \\\n .order_by(objects.Object.price)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.sold == 0).order_by(\n objects.Object.price)\n return render_template('main_page.html', category=category, current_user=current_user,\n title='DinoTrade', objects=objs, form=form,\n sort_asc_form=sort_asc_form, sort_desc_form=sort_desc_form,\n name='', find=False)\n if form.find_line.data:\n what_we_want_to_find = form.find_line.data\n if request.method == 'GET':\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.sold == 0,\n objects.Object.category == category)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.sold == 0)\n return render_template('main_page.html', category=category, current_user=current_user,\n title='DinoTrade', objects=objs, form=form,\n sort_asc_form=sort_asc_form, sort_desc_form=sort_desc_form,\n name='', find=False)\n if form.validate_on_submit():\n objs = sessions.query(objects.Object).filter(\n objects.Object.name_for_find.like(\n f'%{what_we_want_to_find.lower()}%'), objects.Object.sold == 0)\n return render_template('main_page.html',\n category=category,\n current_user=current_user,\n title='DinoTrade', sort_asc_form=sort_asc_form,\n sort_desc_form=sort_desc_form,\n objects=objs, form=form)\n\n\nif __name__ == '__main__':\n app.run(port=8080, host='127.0.0.1')\n # log()\n",
"from flask import redirect, request, make_response, abort, jsonify\nfrom flask_login import LoginManager, login_user, logout_user, current_user, login_required\nfrom flask import Flask, render_template\nfrom data import db_session, objects, users\nfrom flask_restful import abort, Api\nimport os\nfrom resources import objects_resorce, users_resource\nfrom algorithms.password_algorithms import chek_password_combination\nfrom algorithms.phone_number_algorithms import check_phone\nimport logging\nfrom forms import RegisterForm, LoginForm, ObjectsForm, SortAscending\nfrom forms import SortDescending, EditProfileForm, FindObjectForm, ConfirmPasswordForm\ndb_session.global_init('db/blogs.sqlite')\napp = Flask(__name__)\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\napp.config['SECRET_KEY'] = 'DinoTradeTheBest123_secret_key'\napi = Api(app)\napi.add_resource(objects_resorce.ObjectsListResource, '/api/v0.1/objects')\napi.add_resource(objects_resorce.ObjResource, '/api/v0.1/objects/<int:obj_id>')\napi.add_resource(users_resource.UsersListResource, '/api/v0.1/users')\napi.add_resource(users_resource.UsersResource, '/api/v0.1/users/<int:user_id>')\nALLOWED_TYPES = ['jpg', 'png', 'jpeg', 'gif']\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nUPLOAD_FOLDER = os.getcwd() + '/static/img'\nfiles = []\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n sessions = db_session.create_session()\n return sessions.query(users.User).get(user_id)\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\[email protected]('/obj/<int:id>', methods=['GET', 'POST'])\ndef show_obj(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n if request.method == 'POST':\n file, filename = open_file(id, 'object')\n if not file:\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=\n obj.user, object=obj, title=f'Объявление {obj.name}', date=\n return_date(obj.user))\n if obj:\n if filename not in obj.pictures:\n obj.pictures = str(obj.pictures) + ' ' + filename + ' '\n session.merge(obj)\n session.commit()\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=obj.user,\n object=obj, title=f'Объявление {obj.name}', date=return_date(obj.user))\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\[email protected]('/edit_profile/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(id):\n if str(current_user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(current_user.objects).split('|, '))\n form = EditProfileForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n form.new_name.data = new.name\n form.new_email.data = new.email\n form.new_password.data = new.password\n form.new_town.data = new.town\n form.new_phone.data = new.phone\n if form.validate_on_submit():\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n if not chek_password_combination(form.new_password.data):\n return render_template('edit_profile.html', form=form,\n title='Регистрация', pass_message='Слишком слабый пароль')\n if form.new_password.data != form.new_password_again.data:\n return render_template('edit_profile.html', name=\n current_user.name, email=current_user.email, password=\n current_user.password, town=current_user.town, phone=\n current_user.phone, message='Пароли не совпадают',\n title='Редактирование профиля', form=form)\n if not check_phone(form.new_phone.data)[0]:\n return render_template('edit_profile.html', form=form,\n title='Регистрация', phone_message=check_phone(form.\n new_phone.data)[1])\n if sessions.query(users.User).filter(users.User.email == form.\n new_email.data, form.new_email.data != current_user.email\n ).first():\n return render_template('edit_profile.html', form=form,\n title='Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n else:\n new.name = form.new_name.data\n new.email = form.new_email.data\n new.password = form.new_password.data\n new.town = form.new_town.data\n new.phone = form.new_phone.data\n sessions.commit()\n return redirect(f'/profile/{current_user.id}')\n else:\n abort(404)\n return render_template('edit_profile.html', name=current_user.name,\n email=current_user.email, password=current_user.password, town=\n current_user.town, phone=current_user.phone, kolvo=kolvo, title=\n 'Редактирование профиля', form=form)\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html', title='Новое объявление',\n form=form, files=files, id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\[email protected]('/logout')\ndef logout():\n logout_user()\n return redirect('/')\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\[email protected]('/rules', methods=['GET', 'POST'])\ndef rules():\n return render_template('rules.html', title='Правила')\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/index/<category>', methods=['GET', 'POST'])\ndef main_page(category='Всекатегории'):\n form = FindObjectForm()\n sort_asc_form = SortAscending()\n sort_desc_form = SortDescending()\n sessions = db_session.create_session()\n what_we_want_to_find = ''\n if sort_desc_form.sort_descending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category).order_by(\n objects.Object.price.desc())\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0).order_by(objects.Object.price.desc())\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if sort_asc_form.sort_ascending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category).order_by(\n objects.Object.price)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0).order_by(objects.Object.price)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if form.find_line.data:\n what_we_want_to_find = form.find_line.data\n if request.method == 'GET':\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if form.validate_on_submit():\n objs = sessions.query(objects.Object).filter(objects.Object.\n name_for_find.like(f'%{what_we_want_to_find.lower()}%'), \n objects.Object.sold == 0)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', sort_asc_form=\n sort_asc_form, sort_desc_form=sort_desc_form, objects=objs,\n form=form)\n\n\nif __name__ == '__main__':\n app.run(port=8080, host='127.0.0.1')\n",
"<import token>\ndb_session.global_init('db/blogs.sqlite')\napp = Flask(__name__)\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\napp.config['SECRET_KEY'] = 'DinoTradeTheBest123_secret_key'\napi = Api(app)\napi.add_resource(objects_resorce.ObjectsListResource, '/api/v0.1/objects')\napi.add_resource(objects_resorce.ObjResource, '/api/v0.1/objects/<int:obj_id>')\napi.add_resource(users_resource.UsersListResource, '/api/v0.1/users')\napi.add_resource(users_resource.UsersResource, '/api/v0.1/users/<int:user_id>')\nALLOWED_TYPES = ['jpg', 'png', 'jpeg', 'gif']\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nUPLOAD_FOLDER = os.getcwd() + '/static/img'\nfiles = []\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n sessions = db_session.create_session()\n return sessions.query(users.User).get(user_id)\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\[email protected]('/obj/<int:id>', methods=['GET', 'POST'])\ndef show_obj(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n if request.method == 'POST':\n file, filename = open_file(id, 'object')\n if not file:\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=\n obj.user, object=obj, title=f'Объявление {obj.name}', date=\n return_date(obj.user))\n if obj:\n if filename not in obj.pictures:\n obj.pictures = str(obj.pictures) + ' ' + filename + ' '\n session.merge(obj)\n session.commit()\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=obj.user,\n object=obj, title=f'Объявление {obj.name}', date=return_date(obj.user))\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\[email protected]('/edit_profile/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(id):\n if str(current_user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(current_user.objects).split('|, '))\n form = EditProfileForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n form.new_name.data = new.name\n form.new_email.data = new.email\n form.new_password.data = new.password\n form.new_town.data = new.town\n form.new_phone.data = new.phone\n if form.validate_on_submit():\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n if not chek_password_combination(form.new_password.data):\n return render_template('edit_profile.html', form=form,\n title='Регистрация', pass_message='Слишком слабый пароль')\n if form.new_password.data != form.new_password_again.data:\n return render_template('edit_profile.html', name=\n current_user.name, email=current_user.email, password=\n current_user.password, town=current_user.town, phone=\n current_user.phone, message='Пароли не совпадают',\n title='Редактирование профиля', form=form)\n if not check_phone(form.new_phone.data)[0]:\n return render_template('edit_profile.html', form=form,\n title='Регистрация', phone_message=check_phone(form.\n new_phone.data)[1])\n if sessions.query(users.User).filter(users.User.email == form.\n new_email.data, form.new_email.data != current_user.email\n ).first():\n return render_template('edit_profile.html', form=form,\n title='Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n else:\n new.name = form.new_name.data\n new.email = form.new_email.data\n new.password = form.new_password.data\n new.town = form.new_town.data\n new.phone = form.new_phone.data\n sessions.commit()\n return redirect(f'/profile/{current_user.id}')\n else:\n abort(404)\n return render_template('edit_profile.html', name=current_user.name,\n email=current_user.email, password=current_user.password, town=\n current_user.town, phone=current_user.phone, kolvo=kolvo, title=\n 'Редактирование профиля', form=form)\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html', title='Новое объявление',\n form=form, files=files, id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\[email protected]('/logout')\ndef logout():\n logout_user()\n return redirect('/')\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\[email protected]('/rules', methods=['GET', 'POST'])\ndef rules():\n return render_template('rules.html', title='Правила')\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/index/<category>', methods=['GET', 'POST'])\ndef main_page(category='Всекатегории'):\n form = FindObjectForm()\n sort_asc_form = SortAscending()\n sort_desc_form = SortDescending()\n sessions = db_session.create_session()\n what_we_want_to_find = ''\n if sort_desc_form.sort_descending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category).order_by(\n objects.Object.price.desc())\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0).order_by(objects.Object.price.desc())\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if sort_asc_form.sort_ascending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category).order_by(\n objects.Object.price)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0).order_by(objects.Object.price)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if form.find_line.data:\n what_we_want_to_find = form.find_line.data\n if request.method == 'GET':\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if form.validate_on_submit():\n objs = sessions.query(objects.Object).filter(objects.Object.\n name_for_find.like(f'%{what_we_want_to_find.lower()}%'), \n objects.Object.sold == 0)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', sort_asc_form=\n sort_asc_form, sort_desc_form=sort_desc_form, objects=objs,\n form=form)\n\n\nif __name__ == '__main__':\n app.run(port=8080, host='127.0.0.1')\n",
"<import token>\ndb_session.global_init('db/blogs.sqlite')\n<assignment token>\napi.add_resource(objects_resorce.ObjectsListResource, '/api/v0.1/objects')\napi.add_resource(objects_resorce.ObjResource, '/api/v0.1/objects/<int:obj_id>')\napi.add_resource(users_resource.UsersListResource, '/api/v0.1/users')\napi.add_resource(users_resource.UsersResource, '/api/v0.1/users/<int:user_id>')\n<assignment token>\nlogin_manager.init_app(app)\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n sessions = db_session.create_session()\n return sessions.query(users.User).get(user_id)\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\[email protected]('/obj/<int:id>', methods=['GET', 'POST'])\ndef show_obj(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n if request.method == 'POST':\n file, filename = open_file(id, 'object')\n if not file:\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=\n obj.user, object=obj, title=f'Объявление {obj.name}', date=\n return_date(obj.user))\n if obj:\n if filename not in obj.pictures:\n obj.pictures = str(obj.pictures) + ' ' + filename + ' '\n session.merge(obj)\n session.commit()\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=obj.user,\n object=obj, title=f'Объявление {obj.name}', date=return_date(obj.user))\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\[email protected]('/edit_profile/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(id):\n if str(current_user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(current_user.objects).split('|, '))\n form = EditProfileForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n form.new_name.data = new.name\n form.new_email.data = new.email\n form.new_password.data = new.password\n form.new_town.data = new.town\n form.new_phone.data = new.phone\n if form.validate_on_submit():\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n if not chek_password_combination(form.new_password.data):\n return render_template('edit_profile.html', form=form,\n title='Регистрация', pass_message='Слишком слабый пароль')\n if form.new_password.data != form.new_password_again.data:\n return render_template('edit_profile.html', name=\n current_user.name, email=current_user.email, password=\n current_user.password, town=current_user.town, phone=\n current_user.phone, message='Пароли не совпадают',\n title='Редактирование профиля', form=form)\n if not check_phone(form.new_phone.data)[0]:\n return render_template('edit_profile.html', form=form,\n title='Регистрация', phone_message=check_phone(form.\n new_phone.data)[1])\n if sessions.query(users.User).filter(users.User.email == form.\n new_email.data, form.new_email.data != current_user.email\n ).first():\n return render_template('edit_profile.html', form=form,\n title='Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n else:\n new.name = form.new_name.data\n new.email = form.new_email.data\n new.password = form.new_password.data\n new.town = form.new_town.data\n new.phone = form.new_phone.data\n sessions.commit()\n return redirect(f'/profile/{current_user.id}')\n else:\n abort(404)\n return render_template('edit_profile.html', name=current_user.name,\n email=current_user.email, password=current_user.password, town=\n current_user.town, phone=current_user.phone, kolvo=kolvo, title=\n 'Редактирование профиля', form=form)\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html', title='Новое объявление',\n form=form, files=files, id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\[email protected]('/logout')\ndef logout():\n logout_user()\n return redirect('/')\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\[email protected]('/rules', methods=['GET', 'POST'])\ndef rules():\n return render_template('rules.html', title='Правила')\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/index/<category>', methods=['GET', 'POST'])\ndef main_page(category='Всекатегории'):\n form = FindObjectForm()\n sort_asc_form = SortAscending()\n sort_desc_form = SortDescending()\n sessions = db_session.create_session()\n what_we_want_to_find = ''\n if sort_desc_form.sort_descending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category).order_by(\n objects.Object.price.desc())\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0).order_by(objects.Object.price.desc())\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if sort_asc_form.sort_ascending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category).order_by(\n objects.Object.price)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0).order_by(objects.Object.price)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if form.find_line.data:\n what_we_want_to_find = form.find_line.data\n if request.method == 'GET':\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if form.validate_on_submit():\n objs = sessions.query(objects.Object).filter(objects.Object.\n name_for_find.like(f'%{what_we_want_to_find.lower()}%'), \n objects.Object.sold == 0)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', sort_asc_form=\n sort_asc_form, sort_desc_form=sort_desc_form, objects=objs,\n form=form)\n\n\nif __name__ == '__main__':\n app.run(port=8080, host='127.0.0.1')\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n sessions = db_session.create_session()\n return sessions.query(users.User).get(user_id)\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\[email protected]('/obj/<int:id>', methods=['GET', 'POST'])\ndef show_obj(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n if request.method == 'POST':\n file, filename = open_file(id, 'object')\n if not file:\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=\n obj.user, object=obj, title=f'Объявление {obj.name}', date=\n return_date(obj.user))\n if obj:\n if filename not in obj.pictures:\n obj.pictures = str(obj.pictures) + ' ' + filename + ' '\n session.merge(obj)\n session.commit()\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=obj.user,\n object=obj, title=f'Объявление {obj.name}', date=return_date(obj.user))\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\[email protected]('/edit_profile/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(id):\n if str(current_user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(current_user.objects).split('|, '))\n form = EditProfileForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n form.new_name.data = new.name\n form.new_email.data = new.email\n form.new_password.data = new.password\n form.new_town.data = new.town\n form.new_phone.data = new.phone\n if form.validate_on_submit():\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n if not chek_password_combination(form.new_password.data):\n return render_template('edit_profile.html', form=form,\n title='Регистрация', pass_message='Слишком слабый пароль')\n if form.new_password.data != form.new_password_again.data:\n return render_template('edit_profile.html', name=\n current_user.name, email=current_user.email, password=\n current_user.password, town=current_user.town, phone=\n current_user.phone, message='Пароли не совпадают',\n title='Редактирование профиля', form=form)\n if not check_phone(form.new_phone.data)[0]:\n return render_template('edit_profile.html', form=form,\n title='Регистрация', phone_message=check_phone(form.\n new_phone.data)[1])\n if sessions.query(users.User).filter(users.User.email == form.\n new_email.data, form.new_email.data != current_user.email\n ).first():\n return render_template('edit_profile.html', form=form,\n title='Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n else:\n new.name = form.new_name.data\n new.email = form.new_email.data\n new.password = form.new_password.data\n new.town = form.new_town.data\n new.phone = form.new_phone.data\n sessions.commit()\n return redirect(f'/profile/{current_user.id}')\n else:\n abort(404)\n return render_template('edit_profile.html', name=current_user.name,\n email=current_user.email, password=current_user.password, town=\n current_user.town, phone=current_user.phone, kolvo=kolvo, title=\n 'Редактирование профиля', form=form)\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html', title='Новое объявление',\n form=form, files=files, id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\[email protected]('/logout')\ndef logout():\n logout_user()\n return redirect('/')\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\[email protected]('/rules', methods=['GET', 'POST'])\ndef rules():\n return render_template('rules.html', title='Правила')\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/index/<category>', methods=['GET', 'POST'])\ndef main_page(category='Всекатегории'):\n form = FindObjectForm()\n sort_asc_form = SortAscending()\n sort_desc_form = SortDescending()\n sessions = db_session.create_session()\n what_we_want_to_find = ''\n if sort_desc_form.sort_descending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category).order_by(\n objects.Object.price.desc())\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0).order_by(objects.Object.price.desc())\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if sort_asc_form.sort_ascending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category).order_by(\n objects.Object.price)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0).order_by(objects.Object.price)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if form.find_line.data:\n what_we_want_to_find = form.find_line.data\n if request.method == 'GET':\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if form.validate_on_submit():\n objs = sessions.query(objects.Object).filter(objects.Object.\n name_for_find.like(f'%{what_we_want_to_find.lower()}%'), \n objects.Object.sold == 0)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', sort_asc_form=\n sort_asc_form, sort_desc_form=sort_desc_form, objects=objs,\n form=form)\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n sessions = db_session.create_session()\n return sessions.query(users.User).get(user_id)\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\[email protected]('/obj/<int:id>', methods=['GET', 'POST'])\ndef show_obj(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n if request.method == 'POST':\n file, filename = open_file(id, 'object')\n if not file:\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=\n obj.user, object=obj, title=f'Объявление {obj.name}', date=\n return_date(obj.user))\n if obj:\n if filename not in obj.pictures:\n obj.pictures = str(obj.pictures) + ' ' + filename + ' '\n session.merge(obj)\n session.commit()\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=obj.user,\n object=obj, title=f'Объявление {obj.name}', date=return_date(obj.user))\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\[email protected]('/edit_profile/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(id):\n if str(current_user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(current_user.objects).split('|, '))\n form = EditProfileForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n form.new_name.data = new.name\n form.new_email.data = new.email\n form.new_password.data = new.password\n form.new_town.data = new.town\n form.new_phone.data = new.phone\n if form.validate_on_submit():\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n if not chek_password_combination(form.new_password.data):\n return render_template('edit_profile.html', form=form,\n title='Регистрация', pass_message='Слишком слабый пароль')\n if form.new_password.data != form.new_password_again.data:\n return render_template('edit_profile.html', name=\n current_user.name, email=current_user.email, password=\n current_user.password, town=current_user.town, phone=\n current_user.phone, message='Пароли не совпадают',\n title='Редактирование профиля', form=form)\n if not check_phone(form.new_phone.data)[0]:\n return render_template('edit_profile.html', form=form,\n title='Регистрация', phone_message=check_phone(form.\n new_phone.data)[1])\n if sessions.query(users.User).filter(users.User.email == form.\n new_email.data, form.new_email.data != current_user.email\n ).first():\n return render_template('edit_profile.html', form=form,\n title='Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n else:\n new.name = form.new_name.data\n new.email = form.new_email.data\n new.password = form.new_password.data\n new.town = form.new_town.data\n new.phone = form.new_phone.data\n sessions.commit()\n return redirect(f'/profile/{current_user.id}')\n else:\n abort(404)\n return render_template('edit_profile.html', name=current_user.name,\n email=current_user.email, password=current_user.password, town=\n current_user.town, phone=current_user.phone, kolvo=kolvo, title=\n 'Редактирование профиля', form=form)\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html', title='Новое объявление',\n form=form, files=files, id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\[email protected]('/rules', methods=['GET', 'POST'])\ndef rules():\n return render_template('rules.html', title='Правила')\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/index/<category>', methods=['GET', 'POST'])\ndef main_page(category='Всекатегории'):\n form = FindObjectForm()\n sort_asc_form = SortAscending()\n sort_desc_form = SortDescending()\n sessions = db_session.create_session()\n what_we_want_to_find = ''\n if sort_desc_form.sort_descending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category).order_by(\n objects.Object.price.desc())\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0).order_by(objects.Object.price.desc())\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if sort_asc_form.sort_ascending.data:\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category).order_by(\n objects.Object.price)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0).order_by(objects.Object.price)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if form.find_line.data:\n what_we_want_to_find = form.find_line.data\n if request.method == 'GET':\n if category != 'Всекатегории':\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.category == category)\n else:\n objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', objects=objs,\n form=form, sort_asc_form=sort_asc_form, sort_desc_form=\n sort_desc_form, name='', find=False)\n if form.validate_on_submit():\n objs = sessions.query(objects.Object).filter(objects.Object.\n name_for_find.like(f'%{what_we_want_to_find.lower()}%'), \n objects.Object.sold == 0)\n return render_template('main_page.html', category=category,\n current_user=current_user, title='DinoTrade', sort_asc_form=\n sort_asc_form, sort_desc_form=sort_desc_form, objects=objs,\n form=form)\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n sessions = db_session.create_session()\n return sessions.query(users.User).get(user_id)\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\[email protected]('/obj/<int:id>', methods=['GET', 'POST'])\ndef show_obj(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n if request.method == 'POST':\n file, filename = open_file(id, 'object')\n if not file:\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=\n obj.user, object=obj, title=f'Объявление {obj.name}', date=\n return_date(obj.user))\n if obj:\n if filename not in obj.pictures:\n obj.pictures = str(obj.pictures) + ' ' + filename + ' '\n session.merge(obj)\n session.commit()\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=obj.user,\n object=obj, title=f'Объявление {obj.name}', date=return_date(obj.user))\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\[email protected]('/edit_profile/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(id):\n if str(current_user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(current_user.objects).split('|, '))\n form = EditProfileForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n form.new_name.data = new.name\n form.new_email.data = new.email\n form.new_password.data = new.password\n form.new_town.data = new.town\n form.new_phone.data = new.phone\n if form.validate_on_submit():\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n if not chek_password_combination(form.new_password.data):\n return render_template('edit_profile.html', form=form,\n title='Регистрация', pass_message='Слишком слабый пароль')\n if form.new_password.data != form.new_password_again.data:\n return render_template('edit_profile.html', name=\n current_user.name, email=current_user.email, password=\n current_user.password, town=current_user.town, phone=\n current_user.phone, message='Пароли не совпадают',\n title='Редактирование профиля', form=form)\n if not check_phone(form.new_phone.data)[0]:\n return render_template('edit_profile.html', form=form,\n title='Регистрация', phone_message=check_phone(form.\n new_phone.data)[1])\n if sessions.query(users.User).filter(users.User.email == form.\n new_email.data, form.new_email.data != current_user.email\n ).first():\n return render_template('edit_profile.html', form=form,\n title='Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n else:\n new.name = form.new_name.data\n new.email = form.new_email.data\n new.password = form.new_password.data\n new.town = form.new_town.data\n new.phone = form.new_phone.data\n sessions.commit()\n return redirect(f'/profile/{current_user.id}')\n else:\n abort(404)\n return render_template('edit_profile.html', name=current_user.name,\n email=current_user.email, password=current_user.password, town=\n current_user.town, phone=current_user.phone, kolvo=kolvo, title=\n 'Редактирование профиля', form=form)\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html', title='Новое объявление',\n form=form, files=files, id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\[email protected]('/rules', methods=['GET', 'POST'])\ndef rules():\n return render_template('rules.html', title='Правила')\n\n\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n sessions = db_session.create_session()\n return sessions.query(users.User).get(user_id)\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\[email protected]('/obj/<int:id>', methods=['GET', 'POST'])\ndef show_obj(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n if request.method == 'POST':\n file, filename = open_file(id, 'object')\n if not file:\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=\n obj.user, object=obj, title=f'Объявление {obj.name}', date=\n return_date(obj.user))\n if obj:\n if filename not in obj.pictures:\n obj.pictures = str(obj.pictures) + ' ' + filename + ' '\n session.merge(obj)\n session.commit()\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=obj.user,\n object=obj, title=f'Объявление {obj.name}', date=return_date(obj.user))\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\[email protected]('/edit_profile/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(id):\n if str(current_user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(current_user.objects).split('|, '))\n form = EditProfileForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n form.new_name.data = new.name\n form.new_email.data = new.email\n form.new_password.data = new.password\n form.new_town.data = new.town\n form.new_phone.data = new.phone\n if form.validate_on_submit():\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n if not chek_password_combination(form.new_password.data):\n return render_template('edit_profile.html', form=form,\n title='Регистрация', pass_message='Слишком слабый пароль')\n if form.new_password.data != form.new_password_again.data:\n return render_template('edit_profile.html', name=\n current_user.name, email=current_user.email, password=\n current_user.password, town=current_user.town, phone=\n current_user.phone, message='Пароли не совпадают',\n title='Редактирование профиля', form=form)\n if not check_phone(form.new_phone.data)[0]:\n return render_template('edit_profile.html', form=form,\n title='Регистрация', phone_message=check_phone(form.\n new_phone.data)[1])\n if sessions.query(users.User).filter(users.User.email == form.\n new_email.data, form.new_email.data != current_user.email\n ).first():\n return render_template('edit_profile.html', form=form,\n title='Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n else:\n new.name = form.new_name.data\n new.email = form.new_email.data\n new.password = form.new_password.data\n new.town = form.new_town.data\n new.phone = form.new_phone.data\n sessions.commit()\n return redirect(f'/profile/{current_user.id}')\n else:\n abort(404)\n return render_template('edit_profile.html', name=current_user.name,\n email=current_user.email, password=current_user.password, town=\n current_user.town, phone=current_user.phone, kolvo=kolvo, title=\n 'Редактирование профиля', form=form)\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html', title='Новое объявление',\n form=form, files=files, id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\[email protected]('/obj/<int:id>', methods=['GET', 'POST'])\ndef show_obj(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n if request.method == 'POST':\n file, filename = open_file(id, 'object')\n if not file:\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=\n obj.user, object=obj, title=f'Объявление {obj.name}', date=\n return_date(obj.user))\n if obj:\n if filename not in obj.pictures:\n obj.pictures = str(obj.pictures) + ' ' + filename + ' '\n session.merge(obj)\n session.commit()\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=obj.user,\n object=obj, title=f'Объявление {obj.name}', date=return_date(obj.user))\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\[email protected]('/edit_profile/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(id):\n if str(current_user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(current_user.objects).split('|, '))\n form = EditProfileForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n form.new_name.data = new.name\n form.new_email.data = new.email\n form.new_password.data = new.password\n form.new_town.data = new.town\n form.new_phone.data = new.phone\n if form.validate_on_submit():\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if new:\n if not chek_password_combination(form.new_password.data):\n return render_template('edit_profile.html', form=form,\n title='Регистрация', pass_message='Слишком слабый пароль')\n if form.new_password.data != form.new_password_again.data:\n return render_template('edit_profile.html', name=\n current_user.name, email=current_user.email, password=\n current_user.password, town=current_user.town, phone=\n current_user.phone, message='Пароли не совпадают',\n title='Редактирование профиля', form=form)\n if not check_phone(form.new_phone.data)[0]:\n return render_template('edit_profile.html', form=form,\n title='Регистрация', phone_message=check_phone(form.\n new_phone.data)[1])\n if sessions.query(users.User).filter(users.User.email == form.\n new_email.data, form.new_email.data != current_user.email\n ).first():\n return render_template('edit_profile.html', form=form,\n title='Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n else:\n new.name = form.new_name.data\n new.email = form.new_email.data\n new.password = form.new_password.data\n new.town = form.new_town.data\n new.phone = form.new_phone.data\n sessions.commit()\n return redirect(f'/profile/{current_user.id}')\n else:\n abort(404)\n return render_template('edit_profile.html', name=current_user.name,\n email=current_user.email, password=current_user.password, town=\n current_user.town, phone=current_user.phone, kolvo=kolvo, title=\n 'Редактирование профиля', form=form)\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html', title='Новое объявление',\n form=form, files=files, id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\[email protected]('/obj/<int:id>', methods=['GET', 'POST'])\ndef show_obj(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n if request.method == 'POST':\n file, filename = open_file(id, 'object')\n if not file:\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=\n obj.user, object=obj, title=f'Объявление {obj.name}', date=\n return_date(obj.user))\n if obj:\n if filename not in obj.pictures:\n obj.pictures = str(obj.pictures) + ' ' + filename + ' '\n session.merge(obj)\n session.commit()\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=obj.user,\n object=obj, title=f'Объявление {obj.name}', date=return_date(obj.user))\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html', title='Новое объявление',\n form=form, files=files, id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\[email protected]('/obj/<int:id>', methods=['GET', 'POST'])\ndef show_obj(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n if request.method == 'POST':\n file, filename = open_file(id, 'object')\n if not file:\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=\n obj.user, object=obj, title=f'Объявление {obj.name}', date=\n return_date(obj.user))\n if obj:\n if filename not in obj.pictures:\n obj.pictures = str(obj.pictures) + ' ' + filename + ' '\n session.merge(obj)\n session.commit()\n files = obj.pictures.split()\n return render_template('object_page.html', files=files, author=obj.user,\n object=obj, title=f'Объявление {obj.name}', date=return_date(obj.user))\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html', title='Новое объявление',\n form=form, files=files, id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n\n\[email protected]('/add_obj', methods=['GET', 'POST'])\n@login_required\ndef add_obj():\n form = ObjectsForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n obj = objects.Object()\n obj.name = form.name.data\n obj.name_for_find = form.name.data.lower()\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку')\n elif form.price.data <= 0:\n print(form.price.data)\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n current_user.objects.append(obj)\n sessions.merge(current_user)\n sessions.commit()\n return redirect('/')\n return render_template('add_objects.html', title='Новое объявление',\n form=form, files=files, id=None)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/unblock/<int:id>')\ndef unblock(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = False\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\[email protected]('/profile/<int:id>')\ndef profile(id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.id == id).first()\n not_sold_objs = sessions.query(objects.Object).filter(objects.Object.\n sold == 0, objects.Object.user_id == user.id)\n sold_objs = sessions.query(objects.Object).filter(objects.Object.sold ==\n 1, objects.Object.user_id == user.id)\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(user)\n session.commit()\n files = user.avatar\n if str(user.objects) == '[]':\n kolvo = 0\n else:\n kolvo = len(str(user.objects).split('|, '))\n return render_template('profile_page.html', kolvo=kolvo, title=user.\n name, files=files, id=id, user=user, not_sold_objs=not_sold_objs,\n sold_objs=sold_objs, date=return_date(user))\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\n<function token>\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if not chek_password_combination(form.password.data):\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Слишком слабый пароль')\n if form.password.data != form.password_again.data:\n return render_template('register.html', form=form, title=\n 'Регистрация', pass_message='Пароли не совпадают')\n if not check_phone(form.phone.data)[0]:\n return render_template('register.html', form=form, title=\n 'Регистрация', phone_message=check_phone(form.phone.data)[1])\n sessions = db_session.create_session()\n if sessions.query(users.User).filter(users.User.email == form.email\n .data).first():\n return render_template('register.html', form=form, title=\n 'Регистрация', email_message=\n 'Пользователь с такой почтой уже существует')\n user = users.User(name=form.name.data, email=form.email.data,\n password=form.password.data, town=form.town.data, phone=form.\n phone.data)\n user.set_password(form.password.data)\n sessions.add(user)\n sessions.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\[email protected]('/change_avatar', methods=['GET', 'POST'])\n@login_required\ndef change_avatar():\n if request.method == 'POST':\n session = db_session.create_session()\n filename = open_file(current_user.id, 'avatar')\n if not filename[0]:\n files = current_user.avatar\n return render_template('change_avatar.html', title=\n 'Смена аватарки', files=files)\n current_user.avatar = '../' + '/'.join(filename.split('/')[-4:])\n session.merge(current_user)\n session.commit()\n files = current_user.avatar\n return render_template('change_avatar.html', title='Смена аватарки',\n files=files)\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\n<function token>\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\n<function token>\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\n<function token>\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n\n\[email protected]('/block/<int:id>')\ndef block(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.block = True\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\n<function token>\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\n<function token>\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/promote/<int:id>')\ndef promote(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 1\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef return_date(user):\n months = {(1): 'января', (2): 'февраля', (3): 'марта', (4): 'апреля', (\n 5): 'мая', (6): 'июня', (7): 'июля', (8): 'августа', (9):\n 'сентября', (10): 'октября', (11): 'ноября', (12): 'декабря'}\n return (\n f'{user.created_date.date().day} {months[user.created_date.date().month]} {user.created_date.date().year} года'\n )\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\n<function token>\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\n<function token>\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\n<function token>\n\n\[email protected]('/users_list')\n@login_required\ndef users_list():\n sessions = db_session.create_session()\n users_list = sessions.query(users.User).all()\n return render_template('users_list.html', users_list=users_list, title=\n 'Список всех пользователей')\n\n\n<function token>\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n sessions = db_session.create_session()\n user = sessions.query(users.User).filter(users.User.email == form.\n email.data).first()\n if user and user.password == form.password.data:\n if user.block:\n return render_template('login.html', message=\n 'Ваша страница заблокирована за нарушение правил сайта.',\n title='Вход', form=form)\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html', message=\n 'Неправильный логин или пароль', title='Вход', form=form)\n return render_template('login.html', title='Вход', form=form)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/confirm_password/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef confirm_password(id):\n form = ConfirmPasswordForm()\n sessions = db_session.create_session()\n new = sessions.query(users.User).filter(users.User.id == id).first()\n if form.validate_on_submit():\n if form.password.data == new.password:\n return redirect(f'/edit_profile/{new.id}')\n else:\n return render_template('confirm_password.html', message=\n 'Неправильный пароль', title='Подтверждение пароля', form=form)\n return render_template('confirm_password.html', title=\n 'Подтверждение пароля', form=form)\n\n\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\[email protected]('/object_delete/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef object_delete(id):\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id).first(\n )\n if obj:\n sessions.delete(obj)\n sessions.commit()\n else:\n abort(404)\n return redirect('/')\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\n<function token>\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/drop/<int:id>')\ndef drop(id):\n session = db_session.create_session()\n user = session.query(users.User).filter(users.User.id == id).first()\n user.admin = 0\n session.merge(current_user)\n session.commit()\n return redirect('/')\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef log():\n logging.info('Info')\n logging.warning('Warning')\n logging.error('Error')\n logging.critical('Critical or Fatal')\n\n\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\n<function token>\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\n<function token>\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/object_delete_photos/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef delete_photos(id):\n session = db_session.create_session()\n obj = session.query(objects.Object).filter(objects.Object.id == id).first()\n obj.pictures = ' '\n session.commit()\n return redirect(f'/obj/{id}')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\n<function token>\n\n\[email protected]('/edit_object/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef edit_obj(id):\n form = ObjectsForm()\n if request.method == 'GET':\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if obj:\n form.name.data = obj.name\n form.price.data = obj.price\n form.description.data = obj.description\n form.category.data = obj.category\n form.sold.data = obj.sold\n else:\n abort(404)\n if form.validate_on_submit():\n sessions = db_session.create_session()\n obj = sessions.query(objects.Object).filter(objects.Object.id == id\n ).first()\n if not form.rules.data:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n rules_message='Вы должны согласиться с правилами сайта!')\n if obj:\n obj.name = form.name.data\n if form.price.data > 10000000000:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln=\n 'Мы не можем брать ответственность за столь серьёзную сделку'\n )\n elif form.price.data < 0:\n return render_template('add_objects.html', title=\n 'Новое объявление', form=form, files=files, id=None,\n incor_ln='Укажите корректную цену')\n else:\n obj.price = form.price.data\n obj.name_for_find = form.name.data.lower()\n obj.description = form.description.data\n obj.category = form.category.data\n obj.sold = form.sold.data\n sessions.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_objects.html', title=\n 'Редактирование объекта', form=form, id=id)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef open_file(id, type):\n file = request.files['file']\n if file.filename.split('.')[-1] not in ALLOWED_TYPES:\n return False, False\n if type == 'avatar':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/avatar_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return filename\n if type == 'object':\n path_of_folder = '/'.join(UPLOAD_FOLDER.split('\\\\')\n ) + '/object_' + str(id) + '/'\n try:\n os.mkdir(path_of_folder)\n except FileExistsError:\n pass\n filename = path_of_folder + file.filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return file, '../' + '/'.join(filename.split('/')[-4:])\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,589 |
ec02b7774a5e5318d39259f8b9311f80075d065c
|
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import spearmanr
tips = sns.load_dataset('tips')
print("The Tips Dataset looks like :\n", tips.head(), "\n")
iris = sns.load_dataset('iris')
print("The Iris Dataset Looks like :\n", iris.head(), "\n")
sns.jointplot(data=tips, x='total_bill', y='tip',kind='kde',color='green')
sns.jointplot(data=tips, x='total_bill', y='tip',stat_func=spearmanr,kind='reg', color='red',height=9,ratio=2)
plt.show()
|
[
"import seaborn as sns\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import spearmanr\r\n\r\ntips = sns.load_dataset('tips')\r\nprint(\"The Tips Dataset looks like :\\n\", tips.head(), \"\\n\")\r\n\r\niris = sns.load_dataset('iris')\r\nprint(\"The Iris Dataset Looks like :\\n\", iris.head(), \"\\n\")\r\n\r\nsns.jointplot(data=tips, x='total_bill', y='tip',kind='kde',color='green')\r\n\r\nsns.jointplot(data=tips, x='total_bill', y='tip',stat_func=spearmanr,kind='reg', color='red',height=9,ratio=2)\r\n\r\nplt.show()",
"import seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import spearmanr\ntips = sns.load_dataset('tips')\nprint('The Tips Dataset looks like :\\n', tips.head(), '\\n')\niris = sns.load_dataset('iris')\nprint('The Iris Dataset Looks like :\\n', iris.head(), '\\n')\nsns.jointplot(data=tips, x='total_bill', y='tip', kind='kde', color='green')\nsns.jointplot(data=tips, x='total_bill', y='tip', stat_func=spearmanr, kind\n ='reg', color='red', height=9, ratio=2)\nplt.show()\n",
"<import token>\ntips = sns.load_dataset('tips')\nprint('The Tips Dataset looks like :\\n', tips.head(), '\\n')\niris = sns.load_dataset('iris')\nprint('The Iris Dataset Looks like :\\n', iris.head(), '\\n')\nsns.jointplot(data=tips, x='total_bill', y='tip', kind='kde', color='green')\nsns.jointplot(data=tips, x='total_bill', y='tip', stat_func=spearmanr, kind\n ='reg', color='red', height=9, ratio=2)\nplt.show()\n",
"<import token>\n<assignment token>\nprint('The Tips Dataset looks like :\\n', tips.head(), '\\n')\n<assignment token>\nprint('The Iris Dataset Looks like :\\n', iris.head(), '\\n')\nsns.jointplot(data=tips, x='total_bill', y='tip', kind='kde', color='green')\nsns.jointplot(data=tips, x='total_bill', y='tip', stat_func=spearmanr, kind\n ='reg', color='red', height=9, ratio=2)\nplt.show()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,590 |
86824e7eea0047b4e5b7b4ee16796e44d0392fbd
|
from django.apps import AppConfig
class SummitConfig(AppConfig):
name = 'summit'
|
[
"from django.apps import AppConfig\n\n\nclass SummitConfig(AppConfig):\n name = 'summit'\n",
"<import token>\n\n\nclass SummitConfig(AppConfig):\n name = 'summit'\n",
"<import token>\n\n\nclass SummitConfig(AppConfig):\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,591 |
8ff04d06e3dd955b4fc29bcb76ee5a9274efb462
|
from backend_restful.DBHandler import DBHandler
from bson.objectid import ObjectId
from pprint import pprint
from rest_framework import serializers
import json
class TagSerializer(serializers.Serializer):
associated = serializers.CharField()
designation = serializers.CharField()
class tags:
response = {}
def __init__(self, data={}):
#data["associated"] = "5ddd62ffd8639286d599dcd6"
#data["designation"] = "5ddd62ffd8639286d599dcd7"
serializer = TagSerializer(data=data)
if serializer.is_valid():
#query database for dashboard info
client = DBHandler.get_database_client()
db = client.test
education = db.education
user_id = data.get("user_info")
#pprint(user_id)
query_result = education.find_one(
{"_id" : ObjectId(user_id),
"associated._id": ObjectId(serializer.validated_data.get("associated")),
"associated.designations._id": ObjectId(serializer.validated_data.get("designation"))
},
{
"associated.designations.tags.tasks":0
}
)
#this is to avoid ObjectId not serializer error
#query_result["_id"] = user_id
print("showing results")
pprint(query_result)
for associate in query_result["associated"]:
print(associate.get("_id"))
if data["associated"] == str(associate.get("_id")):
for designation in associate["designations"]:
if data["designation"] == str(designation.get("_id")):
self.response=json.loads(json.dumps(designation, default=str))
return
self.response=json.loads(json.dumps({"error": "does not exist"}, default=str))
else:
self.response = serializer.errors
|
[
"from backend_restful.DBHandler import DBHandler\nfrom bson.objectid import ObjectId\nfrom pprint import pprint\nfrom rest_framework import serializers\nimport json\n\nclass TagSerializer(serializers.Serializer):\n associated = serializers.CharField()\n designation = serializers.CharField()\n\nclass tags:\n response = {}\n def __init__(self, data={}):\n #data[\"associated\"] = \"5ddd62ffd8639286d599dcd6\"\n #data[\"designation\"] = \"5ddd62ffd8639286d599dcd7\"\n serializer = TagSerializer(data=data)\n if serializer.is_valid():\n #query database for dashboard info\n client = DBHandler.get_database_client()\n db = client.test\n education = db.education\n user_id = data.get(\"user_info\")\n #pprint(user_id)\n query_result = education.find_one(\n {\"_id\" : ObjectId(user_id),\n \"associated._id\": ObjectId(serializer.validated_data.get(\"associated\")),\n \"associated.designations._id\": ObjectId(serializer.validated_data.get(\"designation\"))\n },\n {\n \"associated.designations.tags.tasks\":0\n }\n )\n #this is to avoid ObjectId not serializer error\n #query_result[\"_id\"] = user_id\n print(\"showing results\")\n pprint(query_result)\n for associate in query_result[\"associated\"]:\n print(associate.get(\"_id\"))\n if data[\"associated\"] == str(associate.get(\"_id\")):\n for designation in associate[\"designations\"]:\n if data[\"designation\"] == str(designation.get(\"_id\")):\n self.response=json.loads(json.dumps(designation, default=str))\n return\n self.response=json.loads(json.dumps({\"error\": \"does not exist\"}, default=str))\n else:\n self.response = serializer.errors",
"from backend_restful.DBHandler import DBHandler\nfrom bson.objectid import ObjectId\nfrom pprint import pprint\nfrom rest_framework import serializers\nimport json\n\n\nclass TagSerializer(serializers.Serializer):\n associated = serializers.CharField()\n designation = serializers.CharField()\n\n\nclass tags:\n response = {}\n\n def __init__(self, data={}):\n serializer = TagSerializer(data=data)\n if serializer.is_valid():\n client = DBHandler.get_database_client()\n db = client.test\n education = db.education\n user_id = data.get('user_info')\n query_result = education.find_one({'_id': ObjectId(user_id),\n 'associated._id': ObjectId(serializer.validated_data.get(\n 'associated')), 'associated.designations._id': ObjectId(\n serializer.validated_data.get('designation'))}, {\n 'associated.designations.tags.tasks': 0})\n print('showing results')\n pprint(query_result)\n for associate in query_result['associated']:\n print(associate.get('_id'))\n if data['associated'] == str(associate.get('_id')):\n for designation in associate['designations']:\n if data['designation'] == str(designation.get('_id')):\n self.response = json.loads(json.dumps(\n designation, default=str))\n return\n self.response = json.loads(json.dumps({'error':\n 'does not exist'}, default=str))\n else:\n self.response = serializer.errors\n",
"<import token>\n\n\nclass TagSerializer(serializers.Serializer):\n associated = serializers.CharField()\n designation = serializers.CharField()\n\n\nclass tags:\n response = {}\n\n def __init__(self, data={}):\n serializer = TagSerializer(data=data)\n if serializer.is_valid():\n client = DBHandler.get_database_client()\n db = client.test\n education = db.education\n user_id = data.get('user_info')\n query_result = education.find_one({'_id': ObjectId(user_id),\n 'associated._id': ObjectId(serializer.validated_data.get(\n 'associated')), 'associated.designations._id': ObjectId(\n serializer.validated_data.get('designation'))}, {\n 'associated.designations.tags.tasks': 0})\n print('showing results')\n pprint(query_result)\n for associate in query_result['associated']:\n print(associate.get('_id'))\n if data['associated'] == str(associate.get('_id')):\n for designation in associate['designations']:\n if data['designation'] == str(designation.get('_id')):\n self.response = json.loads(json.dumps(\n designation, default=str))\n return\n self.response = json.loads(json.dumps({'error':\n 'does not exist'}, default=str))\n else:\n self.response = serializer.errors\n",
"<import token>\n\n\nclass TagSerializer(serializers.Serializer):\n <assignment token>\n <assignment token>\n\n\nclass tags:\n response = {}\n\n def __init__(self, data={}):\n serializer = TagSerializer(data=data)\n if serializer.is_valid():\n client = DBHandler.get_database_client()\n db = client.test\n education = db.education\n user_id = data.get('user_info')\n query_result = education.find_one({'_id': ObjectId(user_id),\n 'associated._id': ObjectId(serializer.validated_data.get(\n 'associated')), 'associated.designations._id': ObjectId(\n serializer.validated_data.get('designation'))}, {\n 'associated.designations.tags.tasks': 0})\n print('showing results')\n pprint(query_result)\n for associate in query_result['associated']:\n print(associate.get('_id'))\n if data['associated'] == str(associate.get('_id')):\n for designation in associate['designations']:\n if data['designation'] == str(designation.get('_id')):\n self.response = json.loads(json.dumps(\n designation, default=str))\n return\n self.response = json.loads(json.dumps({'error':\n 'does not exist'}, default=str))\n else:\n self.response = serializer.errors\n",
"<import token>\n<class token>\n\n\nclass tags:\n response = {}\n\n def __init__(self, data={}):\n serializer = TagSerializer(data=data)\n if serializer.is_valid():\n client = DBHandler.get_database_client()\n db = client.test\n education = db.education\n user_id = data.get('user_info')\n query_result = education.find_one({'_id': ObjectId(user_id),\n 'associated._id': ObjectId(serializer.validated_data.get(\n 'associated')), 'associated.designations._id': ObjectId(\n serializer.validated_data.get('designation'))}, {\n 'associated.designations.tags.tasks': 0})\n print('showing results')\n pprint(query_result)\n for associate in query_result['associated']:\n print(associate.get('_id'))\n if data['associated'] == str(associate.get('_id')):\n for designation in associate['designations']:\n if data['designation'] == str(designation.get('_id')):\n self.response = json.loads(json.dumps(\n designation, default=str))\n return\n self.response = json.loads(json.dumps({'error':\n 'does not exist'}, default=str))\n else:\n self.response = serializer.errors\n",
"<import token>\n<class token>\n\n\nclass tags:\n <assignment token>\n\n def __init__(self, data={}):\n serializer = TagSerializer(data=data)\n if serializer.is_valid():\n client = DBHandler.get_database_client()\n db = client.test\n education = db.education\n user_id = data.get('user_info')\n query_result = education.find_one({'_id': ObjectId(user_id),\n 'associated._id': ObjectId(serializer.validated_data.get(\n 'associated')), 'associated.designations._id': ObjectId(\n serializer.validated_data.get('designation'))}, {\n 'associated.designations.tags.tasks': 0})\n print('showing results')\n pprint(query_result)\n for associate in query_result['associated']:\n print(associate.get('_id'))\n if data['associated'] == str(associate.get('_id')):\n for designation in associate['designations']:\n if data['designation'] == str(designation.get('_id')):\n self.response = json.loads(json.dumps(\n designation, default=str))\n return\n self.response = json.loads(json.dumps({'error':\n 'does not exist'}, default=str))\n else:\n self.response = serializer.errors\n",
"<import token>\n<class token>\n\n\nclass tags:\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
98,592 |
698493e589ae470a2f2705464970faa18ea370b0
|
import json
import pickle
import numpy as np
import matplotlib.colors as mcol
import matplotlib.pyplot as plt
def color_map_califa_old(option='json'):
if option == 'json':
cmap_cal_dic = json.load(open("code/cmap_cal_json.txt"))
elif option == 'pickle':
with open('cmap_cal_pickle.txt', 'rb') as handle:
cmap_cal_dic = pickle.loads(handle.read())
cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)
return cmap_cal
def Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_kauff = np.linspace(x_min, 0.0, 100)
ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03',
**kwargs)
def Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_kw = np.linspace(x_min, 0.4, 100)
ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)
def Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_grazy = np.linspace(x_min, -0.2, 100)
ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)
def SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, 0.3, 100)
ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)
def SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, 0.5, 100)
ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **kwargs)
def OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, -0.6, 100)
ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)
def OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, 0.5, 100)
ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **kwargs)
def Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, 0.01, 100)
ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)
def Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, -0.35)
ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)
def Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, 0.08, 100)
ax.plot(x_set_line, espinosa(x_set_line), label=label,**kwargs)
def kauffmann(x):
val = 0.61 / (x - 0.05) + 1.3
return val
def kewley(logNIIHa):
val = 0.61 / (logNIIHa - 0.47) + 1.19
return val
def grazy(logNIIHa):
x = logNIIHa
val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409
* x) - 31.093
return val
def AGNline(logSIIHa):
val = 0.72 / (logSIIHa - 0.32) + 1.30
return val
def LINSy2line(logSIIHa):
val = 1.89 * logSIIHa + 0.76
return val
def AGNline2(logOIHa):
val = 0.73 / (logOIHa + 0.59) + 1.33
return val
def LINSy2line2(logOIHa):
val = 1.18 * logOIHa + 1.30
return val
def espinosa(logNIIHa):
# val = 0.39524936 / (logNIIHa - 0.19433616) + 0.83978817 -> script
# val = 0.271991 / (logNIIHa - 0.151508) + 0.707842 -> Paper_v1
# val = 0.1117129 / (logNIIHa - 0.00561609) + 0.5615803 ->Paper_v2
val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872
return val
def O3S2_line_c(x):
# bins 50
# array([0.06873157, 0.0220047 , 0.63478451])
# val = 0.18304241 / (x - 0.0816372) + 0.84992528
# val = 0.06873157 / (x - 0.0220047) + 0.63478451 -> Paper_v1
# val = 0.05374055 / (x - 0.01436536) + 0.59681538 ->Paper_v2
val = 0.04074804 / (x + 0.01253238) + 0.58154113
return val
def O3O1_line_c(x):
# val = 0.17214096 / (x - (-0.19999267)) + 0.66782104#->95%b100
# val = 0.20010436 / (x - (-0.30512696)) + 0.65999132#->92%b75
# val = 0.13626915 / (x - (-0.34052757)) + 0.59185332 #->92%b100 -> Paper_v1
# val = 0.07056593 / (x - (-0.49660009)) + 0.55574729 #->92%b=60
# val = 0.07329029 / (x + 0.42586138) + 0.60909743 ->Paper_v2
# val = 0.06602301 / (x + 0.55165265) + 0.5308747
val = 0.05612915 / (x + 0.39641533) + 0.60969495
return val
def Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_kauff = np.linspace(x_min, 0.0, 100)
ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03',
**kwargs)
def Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_kw = np.linspace(x_min, 0.4, 100)
ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)
def Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_grazy = np.linspace(x_min, -0.2, 100)
ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)
def SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, 0.3, 100)
ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)
def SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, 0.5, 100)
ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **kwargs)
def OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, -0.6, 100)
ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)
def OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, 0.5, 100)
ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **kwargs)
def Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, -0.02, 100)
ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)
def Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, -0.4, 100)
ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)
def Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):
if ax is None:
ax_flag = True
fig, ax = plt.subplots()
else:
ax_flag = False
x_set_line = np.linspace(x_min, 0.00302, 100)
ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)
def kauffmann(x):
val = 0.61 / (x - 0.05) + 1.3
return val
def kewley(logNIIHa):
val = 0.61 / (logNIIHa - 0.47) + 1.19
return val
def grazy(logNIIHa):
x = logNIIHa
val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409
* x) - 31.093
return val
def A_l(R_v, lw):
# From Cardelli,1989
# F_cor = F * 10 ***(0.4*Av*A_l(R_v,l))
lw = lw / 10000
x = 1 / lw
if x > 1.1:
y = x - 1.82
a_x = 1.0 + 0.17699*y - 0.50447*y**2 - 0.02427*y**3 + 0.72085*y**4 \
+ 0.01979*y**5 - 0.77530*y**6 + 0.32999*y**7
b_x = 1.41338*y + 2.28305*y**2 + 1.07233*y**3 - 5.38434*y**4 \
- 0.62251*y**5 + 5.30260*y**6 - 2.09002*y**7
else:
a_x = 0.574 * x ** 1.61
b_x = -0.527 * x ** 1.61
A_l_ = a_x + b_x/R_v
return A_l_
def color_map_califa(option='v'):
######## CALIFA CTs #############################
cdict = {
'red' : (
(0.0,0,0),
(0.00392156862745098,0,0),
(0.00784313725490196,0,0),
(0.0117647058823529,0,0),
(0.0156862745098039,0,0),
(0.0196078431372549,0,0),
(0.0235294117647059,0,0),
(0.0274509803921569,0,0),
(0.0313725490196078,0,0),
(0.0352941176470588,0,0),
(0.0392156862745098,0,0),
(0.0431372549019608,0,0),
(0.0470588235294118,0,0),
(0.0509803921568627,0,0),
(0.0549019607843137,0,0),
(0.0588235294117647,0,0),
(0.0627450980392157,0,0),
(0.0666666666666667,0,0),
(0.0705882352941176,0,0),
(0.0745098039215686,0,0),
(0.0784313725490196,0,0),
(0.0823529411764706,0,0),
(0.0862745098039216,0,0),
(0.0901960784313725,0,0),
(0.0941176470588235,0,0),
(0.0980392156862745,0,0),
(0.101960784313725,0,0),
(0.105882352941176,0,0),
(0.109803921568627,0,0),
(0.113725490196078,0,0),
(0.117647058823529,0,0),
(0.12156862745098,0,0),
(0.125490196078431,0,0),
(0.129411764705882,0,0),
(0.133333333333333,0,0),
(0.137254901960784,0,0),
(0.141176470588235,0,0),
(0.145098039215686,0,0),
(0.149019607843137,0,0),
(0.152941176470588,0,0),
(0.156862745098039,0,0),
(0.16078431372549,0,0),
(0.164705882352941,0,0),
(0.168627450980392,0,0),
(0.172549019607843,0,0),
(0.176470588235294,0,0),
(0.180392156862745,0,0),
(0.184313725490196,0,0),
(0.188235294117647,0,0),
(0.192156862745098,0,0),
(0.196078431372549,0.019921875,0.019921875),
(0.2,0.03984375,0.03984375),
(0.203921568627451,0.059765625,0.059765625),
(0.207843137254902,0.0796875,0.0796875),
(0.211764705882353,0.099609375,0.099609375),
(0.215686274509804,0.11953125,0.11953125),
(0.219607843137255,0.139453125,0.139453125),
(0.223529411764706,0.159375,0.159375),
(0.227450980392157,0.179296875,0.179296875),
(0.231372549019608,0.19921875,0.19921875),
(0.235294117647059,0.219140625,0.219140625),
(0.23921568627451,0.2390625,0.2390625),
(0.243137254901961,0.258984375,0.258984375),
(0.247058823529412,0.27890625,0.27890625),
(0.250980392156863,0.298828125,0.298828125),
(0.254901960784314,0.31875,0.31875),
(0.258823529411765,0.338671875,0.338671875),
(0.262745098039216,0.35859375,0.35859375),
(0.266666666666667,0.378515625,0.378515625),
(0.270588235294118,0.3984375,0.3984375),
(0.274509803921569,0.418359375,0.418359375),
(0.27843137254902,0.43828125,0.43828125),
(0.282352941176471,0.458203125,0.458203125),
(0.286274509803922,0.478125,0.478125),
(0.290196078431373,0.498046875,0.498046875),
(0.294117647058824,0.51796875,0.51796875),
(0.298039215686275,0.537890625,0.537890625),
(0.301960784313725,0.5578125,0.5578125),
(0.305882352941176,0.577734375,0.577734375),
(0.309803921568627,0.59765625,0.59765625),
(0.313725490196078,0.617578125,0.617578125),
(0.317647058823529,0.6375,0.6375),
(0.32156862745098,0.657421875,0.657421875),
(0.325490196078431,0.67734375,0.67734375),
(0.329411764705882,0.697265625,0.697265625),
(0.333333333333333,0.7171875,0.7171875),
(0.337254901960784,0.737109375,0.737109375),
(0.341176470588235,0.75703125,0.75703125),
(0.345098039215686,0.776953125,0.776953125),
(0.349019607843137,0.796875,0.796875),
(0.352941176470588,0.816796875,0.816796875),
(0.356862745098039,0.83671875,0.83671875),
(0.36078431372549,0.856640625,0.856640625),
(0.364705882352941,0.8765625,0.8765625),
(0.368627450980392,0.896484375,0.896484375),
(0.372549019607843,0.91640625,0.91640625),
(0.376470588235294,0.936328125,0.936328125),
(0.380392156862745,0.95625,0.95625),
(0.384313725490196,0.976171875,0.976171875),
(0.388235294117647,0.99609375,0.99609375),
(0.392156862745098,0.99609375,0.99609375),
(0.396078431372549,0.99609375,0.99609375),
(0.4,0.99609375,0.99609375),
(0.403921568627451,0.99609375,0.99609375),
(0.407843137254902,0.99609375,0.99609375),
(0.411764705882353,0.99609375,0.99609375),
(0.415686274509804,0.99609375,0.99609375),
(0.419607843137255,0.99609375,0.99609375),
(0.423529411764706,0.99609375,0.99609375),
(0.427450980392157,0.99609375,0.99609375),
(0.431372549019608,0.99609375,0.99609375),
(0.435294117647059,0.99609375,0.99609375),
(0.43921568627451,0.99609375,0.99609375),
(0.443137254901961,0.99609375,0.99609375),
(0.447058823529412,0.99609375,0.99609375),
(0.450980392156863,0.99609375,0.99609375),
(0.454901960784314,0.99609375,0.99609375),
(0.458823529411765,0.99609375,0.99609375),
(0.462745098039216,0.99609375,0.99609375),
(0.466666666666667,0.99609375,0.99609375),
(0.470588235294118,0.99609375,0.99609375),
(0.474509803921569,0.99609375,0.99609375),
(0.47843137254902,0.99609375,0.99609375),
(0.482352941176471,0.99609375,0.99609375),
(0.486274509803922,0.99609375,0.99609375),
(0.490196078431373,0.99609375,0.99609375),
(0.494117647058824,0.99609375,0.99609375),
(0.498039215686275,0.99609375,0.99609375),
(0.501960784313725,0.99609375,0.99609375),
(0.505882352941176,0.99609375,0.99609375),
(0.509803921568627,0.99609375,0.99609375),
(0.513725490196078,0.99609375,0.99609375),
(0.517647058823529,0.99609375,0.99609375),
(0.52156862745098,0.99609375,0.99609375),
(0.525490196078431,0.99609375,0.99609375),
(0.529411764705882,0.99609375,0.99609375),
(0.533333333333333,0.99609375,0.99609375),
(0.537254901960784,0.99609375,0.99609375),
(0.541176470588235,0.99609375,0.99609375),
(0.545098039215686,0.99609375,0.99609375),
(0.549019607843137,0.99609375,0.99609375),
(0.552941176470588,0.99609375,0.99609375),
(0.556862745098039,0.99609375,0.99609375),
(0.56078431372549,0.99609375,0.99609375),
(0.564705882352941,0.99609375,0.99609375),
(0.568627450980392,0.99609375,0.99609375),
(0.572549019607843,0.99609375,0.99609375),
(0.576470588235294,0.99609375,0.99609375),
(0.580392156862745,0.99609375,0.99609375),
(0.584313725490196,0.99609375,0.99609375),
(0.588235294117647,0.98046875,0.98046875),
(0.592156862745098,0.96484375,0.96484375),
(0.596078431372549,0.94921875,0.94921875),
(0.6,0.93359375,0.93359375),
(0.603921568627451,0.91796875,0.91796875),
(0.607843137254902,0.90234375,0.90234375),
(0.611764705882353,0.88671875,0.88671875),
(0.615686274509804,0.87109375,0.87109375),
(0.619607843137255,0.85546875,0.85546875),
(0.623529411764706,0.83984375,0.83984375),
(0.627450980392157,0.82421875,0.82421875),
(0.631372549019608,0.80859375,0.80859375),
(0.635294117647059,0.79296875,0.79296875),
(0.63921568627451,0.77734375,0.77734375),
(0.643137254901961,0.76171875,0.76171875),
(0.647058823529412,0.74609375,0.74609375),
(0.650980392156863,0.73046875,0.73046875),
(0.654901960784314,0.71484375,0.71484375),
(0.658823529411765,0.69921875,0.69921875),
(0.662745098039216,0.68359375,0.68359375),
(0.666666666666667,0.66796875,0.66796875),
(0.670588235294118,0.65234375,0.65234375),
(0.674509803921569,0.63671875,0.63671875),
(0.67843137254902,0.62109375,0.62109375),
(0.682352941176471,0.60546875,0.60546875),
(0.686274509803922,0.58984375,0.58984375),
(0.690196078431373,0.57421875,0.57421875),
(0.694117647058824,0.55859375,0.55859375),
(0.698039215686274,0.54296875,0.54296875),
(0.701960784313725,0.52734375,0.52734375),
(0.705882352941177,0.51171875,0.51171875),
(0.709803921568627,0.49609375,0.49609375),
(0.713725490196078,0.48046875,0.48046875),
(0.717647058823529,0.46484375,0.46484375),
(0.72156862745098,0.44921875,0.44921875),
(0.725490196078431,0.43359375,0.43359375),
(0.729411764705882,0.41796875,0.41796875),
(0.733333333333333,0.40234375,0.40234375),
(0.737254901960784,0.38671875,0.38671875),
(0.741176470588235,0.37109375,0.37109375),
(0.745098039215686,0.35546875,0.35546875),
(0.749019607843137,0.33984375,0.33984375),
(0.752941176470588,0.32421875,0.32421875),
(0.756862745098039,0.30859375,0.30859375),
(0.76078431372549,0.29296875,0.29296875),
(0.764705882352941,0.27734375,0.27734375),
(0.768627450980392,0.26171875,0.26171875),
(0.772549019607843,0.24609375,0.24609375),
(0.776470588235294,0.23046875,0.23046875),
(0.780392156862745,0.21484375,0.21484375),
(0.784313725490196,0.22663359375,0.22663359375),
(0.788235294117647,0.2384234375,0.2384234375),
(0.792156862745098,0.250212890625,0.250212890625),
(0.796078431372549,0.262002734375,0.262002734375),
(0.8,0.273792578125,0.273792578125),
(0.803921568627451,0.285582421875,0.285582421875),
(0.807843137254902,0.297372265625,0.297372265625),
(0.811764705882353,0.309162109375,0.309162109375),
(0.815686274509804,0.3209515625,0.3209515625),
(0.819607843137255,0.33274140625,0.33274140625),
(0.823529411764706,0.34453125,0.34453125),
(0.827450980392157,0.35632109375,0.35632109375),
(0.831372549019608,0.3681109375,0.3681109375),
(0.835294117647059,0.379900390625,0.379900390625),
(0.83921568627451,0.39169140625,0.39169140625),
(0.843137254901961,0.40348046875,0.40348046875),
(0.847058823529412,0.41526953125,0.41526953125),
(0.850980392156863,0.42705859375,0.42705859375),
(0.854901960784314,0.43884765625,0.43884765625),
(0.858823529411765,0.450640625,0.450640625),
(0.862745098039216,0.4624296875,0.4624296875),
(0.866666666666667,0.47421875,0.47421875),
(0.870588235294118,0.4860078125,0.4860078125),
(0.874509803921569,0.497796875,0.497796875),
(0.87843137254902,0.50958984375,0.50958984375),
(0.882352941176471,0.52137890625,0.52137890625),
(0.886274509803922,0.53316796875,0.53316796875),
(0.890196078431373,0.54495703125,0.54495703125),
(0.894117647058824,0.55674609375,0.55674609375),
(0.898039215686275,0.56853515625,0.56853515625),
(0.901960784313726,0.580328125,0.580328125),
(0.905882352941176,0.5921171875,0.5921171875),
(0.909803921568627,0.60390625,0.60390625),
(0.913725490196078,0.6156953125,0.6156953125),
(0.917647058823529,0.627484375,0.627484375),
(0.92156862745098,0.63927734375,0.63927734375),
(0.925490196078431,0.65106640625,0.65106640625),
(0.929411764705882,0.66285546875,0.66285546875),
(0.933333333333333,0.67464453125,0.67464453125),
(0.937254901960784,0.68643359375,0.68643359375),
(0.941176470588235,0.69822265625,0.69822265625),
(0.945098039215686,0.710015625,0.710015625),
(0.949019607843137,0.7218046875,0.7218046875),
(0.952941176470588,0.73359375,0.73359375),
(0.956862745098039,0.7453828125,0.7453828125),
(0.96078431372549,0.757171875,0.757171875),
(0.964705882352941,0.76896484375,0.76896484375),
(0.968627450980392,0.78075390625,0.78075390625),
(0.972549019607843,0.79254296875,0.79254296875),
(0.976470588235294,0.80433203125,0.80433203125),
(0.980392156862745,0.81612109375,0.81612109375),
(0.984313725490196,0.82791015625,0.82791015625),
(0.988235294117647,0.839703125,0.839703125),
(0.992156862745098,0.8514921875,0.8514921875),
(0.996078431372549,0.86328125,0.86328125),
(1.00000,0.86328125,0.86328125)),
'green' : (
(0.00,0.02984375,0.02984375),
(0.00392156862745098,0.02984375,0.02984375),
(0.00784313725490196,0.044765625,0.044765625),
(0.0117647058823529,0.0596875,0.0596875),
(0.0156862745098039,0.074609375,0.074609375),
(0.0196078431372549,0.08953125,0.08953125),
(0.0235294117647059,0.104453125,0.104453125),
(0.0274509803921569,0.119375,0.119375),
(0.0313725490196078,0.134296875,0.134296875),
(0.0352941176470588,0.14921875,0.14921875),
(0.0392156862745098,0.164140625,0.164140625),
(0.0431372549019608,0.1790625,0.1790625),
(0.0470588235294118,0.193984375,0.193984375),
(0.0509803921568627,0.20890625,0.20890625),
(0.0549019607843137,0.223828125,0.223828125),
(0.0588235294117647,0.23875,0.23875),
(0.0627450980392157,0.253671875,0.253671875),
(0.0666666666666667,0.26859375,0.26859375),
(0.0705882352941176,0.283515625,0.283515625),
(0.0745098039215686,0.2984375,0.2984375),
(0.0784313725490196,0.313359375,0.313359375),
(0.0823529411764706,0.32828125,0.32828125),
(0.0862745098039216,0.343203125,0.343203125),
(0.0901960784313725,0.358125,0.358125),
(0.0941176470588235,0.373046875,0.373046875),
(0.0980392156862745,0.38796875,0.38796875),
(0.101960784313725,0.402890625,0.402890625),
(0.105882352941176,0.4178125,0.4178125),
(0.109803921568627,0.432734375,0.432734375),
(0.113725490196078,0.44765625,0.44765625),
(0.117647058823529,0.462578125,0.462578125),
(0.12156862745098,0.4775,0.4775),
(0.125490196078431,0.492421875,0.492421875),
(0.129411764705882,0.50734375,0.50734375),
(0.133333333333333,0.522265625,0.522265625),
(0.137254901960784,0.5371875,0.5371875),
(0.141176470588235,0.552109375,0.552109375),
(0.145098039215686,0.56703125,0.56703125),
(0.149019607843137,0.581953125,0.581953125),
(0.152941176470588,0.596875,0.596875),
(0.156862745098039,0.611796875,0.611796875),
(0.16078431372549,0.62671875,0.62671875),
(0.164705882352941,0.641640625,0.641640625),
(0.168627450980392,0.6565625,0.6565625),
(0.172549019607843,0.671484375,0.671484375),
(0.176470588235294,0.68640625,0.68640625),
(0.180392156862745,0.701328125,0.701328125),
(0.184313725490196,0.71625,0.71625),
(0.188235294117647,0.731171875,0.731171875),
(0.192156862745098,0.74609375,0.74609375),
(0.196078431372549,0.731171875,0.731171875),
(0.2,0.71625,0.71625),
(0.203921568627451,0.701328125,0.701328125),
(0.207843137254902,0.68640625,0.68640625),
(0.211764705882353,0.671484375,0.671484375),
(0.215686274509804,0.6565625,0.6565625),
(0.219607843137255,0.641640625,0.641640625),
(0.223529411764706,0.62671875,0.62671875),
(0.227450980392157,0.611796875,0.611796875),
(0.231372549019608,0.596875,0.596875),
(0.235294117647059,0.581953125,0.581953125),
(0.23921568627451,0.56703125,0.56703125),
(0.243137254901961,0.552109375,0.552109375),
(0.247058823529412,0.5371875,0.5371875),
(0.250980392156863,0.522265625,0.522265625),
(0.254901960784314,0.50734375,0.50734375),
(0.258823529411765,0.492421875,0.492421875),
(0.262745098039216,0.4775,0.4775),
(0.266666666666667,0.462578125,0.462578125),
(0.270588235294118,0.44765625,0.44765625),
(0.274509803921569,0.432734375,0.432734375),
(0.27843137254902,0.4178125,0.4178125),
(0.282352941176471,0.402890625,0.402890625),
(0.286274509803922,0.38796875,0.38796875),
(0.290196078431373,0.373046875,0.373046875),
(0.294117647058824,0.358125,0.358125),
(0.298039215686275,0.343203125,0.343203125),
(0.301960784313725,0.32828125,0.32828125),
(0.305882352941176,0.313359375,0.313359375),
(0.309803921568627,0.2984375,0.2984375),
(0.313725490196078,0.283515625,0.283515625),
(0.317647058823529,0.26859375,0.26859375),
(0.32156862745098,0.253671875,0.253671875),
(0.325490196078431,0.23875,0.23875),
(0.329411764705882,0.223828125,0.223828125),
(0.333333333333333,0.20890625,0.20890625),
(0.337254901960784,0.193984375,0.193984375),
(0.341176470588235,0.1790625,0.1790625),
(0.345098039215686,0.164140625,0.164140625),
(0.349019607843137,0.14921875,0.14921875),
(0.352941176470588,0.134296875,0.134296875),
(0.356862745098039,0.119375,0.119375),
(0.36078431372549,0.104453125,0.104453125),
(0.364705882352941,0.08953125,0.08953125),
(0.368627450980392,0.074609375,0.074609375),
(0.372549019607843,0.0596875,0.0596875),
(0.376470588235294,0.044765625,0.044765625),
(0.380392156862745,0.0298437890625,0.0298437890625),
(0.384313725490196,0.014921875,0.014921875),
(0.388235294117647,0,0),
(0.392156862745098,0.012890625,0.012890625),
(0.396078431372549,0.02578125,0.02578125),
(0.4,0.038671875,0.038671875),
(0.403921568627451,0.0515625,0.0515625),
(0.407843137254902,0.064453125,0.064453125),
(0.411764705882353,0.07734375,0.07734375),
(0.415686274509804,0.090234375,0.090234375),
(0.419607843137255,0.103125,0.103125),
(0.423529411764706,0.116015625,0.116015625),
(0.427450980392157,0.12890625,0.12890625),
(0.431372549019608,0.141796875,0.141796875),
(0.435294117647059,0.1546875,0.1546875),
(0.43921568627451,0.167578125,0.167578125),
(0.443137254901961,0.18046875,0.18046875),
(0.447058823529412,0.193359375,0.193359375),
(0.450980392156863,0.20625,0.20625),
(0.454901960784314,0.219140625,0.219140625),
(0.458823529411765,0.23203125,0.23203125),
(0.462745098039216,0.244921875,0.244921875),
(0.466666666666667,0.2578125,0.2578125),
(0.470588235294118,0.270703125,0.270703125),
(0.474509803921569,0.28359375,0.28359375),
(0.47843137254902,0.296484375,0.296484375),
(0.482352941176471,0.309375,0.309375),
(0.486274509803922,0.322265625,0.322265625),
(0.490196078431373,0.33515625,0.33515625),
(0.494117647058824,0.348046875,0.348046875),
(0.498039215686275,0.3609375,0.3609375),
(0.501960784313725,0.373828125,0.373828125),
(0.505882352941176,0.38671875,0.38671875),
(0.509803921568627,0.399609375,0.399609375),
(0.513725490196078,0.4125,0.4125),
(0.517647058823529,0.425390625,0.425390625),
(0.52156862745098,0.43828125,0.43828125),
(0.525490196078431,0.451171875,0.451171875),
(0.529411764705882,0.4640625,0.4640625),
(0.533333333333333,0.476953125,0.476953125),
(0.537254901960784,0.48984375,0.48984375),
(0.541176470588235,0.502734375,0.502734375),
(0.545098039215686,0.515625,0.515625),
(0.549019607843137,0.528515625,0.528515625),
(0.552941176470588,0.54140625,0.54140625),
(0.556862745098039,0.554296875,0.554296875),
(0.56078431372549,0.5671875,0.5671875),
(0.564705882352941,0.580078125,0.580078125),
(0.568627450980392,0.59296875,0.59296875),
(0.572549019607843,0.605859375,0.605859375),
(0.576470588235294,0.61875,0.61875),
(0.580392156862745,0.631640625,0.631640625),
(0.584313725490196,0.64453125,0.64453125),
(0.588235294117647,0.6359375,0.6359375),
(0.592156862745098,0.62734375,0.62734375),
(0.596078431372549,0.61875,0.61875),
(0.6,0.61015625,0.61015625),
(0.603921568627451,0.6015625,0.6015625),
(0.607843137254902,0.59296875,0.59296875),
(0.611764705882353,0.584375,0.584375),
(0.615686274509804,0.57578125,0.57578125),
(0.619607843137255,0.5671875,0.5671875),
(0.623529411764706,0.55859375,0.55859375),
(0.627450980392157,0.55,0.55),
(0.631372549019608,0.54140625,0.54140625),
(0.635294117647059,0.5328125,0.5328125),
(0.63921568627451,0.52421875,0.52421875),
(0.643137254901961,0.515625,0.515625),
(0.647058823529412,0.50703125,0.50703125),
(0.650980392156863,0.4984375,0.4984375),
(0.654901960784314,0.48984375,0.48984375),
(0.658823529411765,0.48125,0.48125),
(0.662745098039216,0.47265625,0.47265625),
(0.666666666666667,0.4640625,0.4640625),
(0.670588235294118,0.45546875,0.45546875),
(0.674509803921569,0.446875,0.446875),
(0.67843137254902,0.43828125,0.43828125),
(0.682352941176471,0.4296875,0.4296875),
(0.686274509803922,0.42109375,0.42109375),
(0.690196078431373,0.4125,0.4125),
(0.694117647058824,0.40390625,0.40390625),
(0.698039215686274,0.3953125,0.3953125),
(0.701960784313725,0.38671875,0.38671875),
(0.705882352941177,0.378125,0.378125),
(0.709803921568627,0.36953125,0.36953125),
(0.713725490196078,0.3609375,0.3609375),
(0.717647058823529,0.35234375,0.35234375),
(0.72156862745098,0.34375,0.34375),
(0.725490196078431,0.33515625,0.33515625),
(0.729411764705882,0.3265625,0.3265625),
(0.733333333333333,0.31796875,0.31796875),
(0.737254901960784,0.309375,0.309375),
(0.741176470588235,0.30078125,0.30078125),
(0.745098039215686,0.2921875,0.2921875),
(0.749019607843137,0.28359375,0.28359375),
(0.752941176470588,0.275,0.275),
(0.756862745098039,0.26640625,0.26640625),
(0.76078431372549,0.2578125,0.2578125),
(0.764705882352941,0.24921875,0.24921875),
(0.768627450980392,0.240625,0.240625),
(0.772549019607843,0.23203125,0.23203125),
(0.776470588235294,0.2234375,0.2234375),
(0.780392156862745,0.21484375,0.21484375),
(0.784313725490196,0.222301171875,0.222301171875),
(0.788235294117647,0.22975859375,0.22975859375),
(0.792156862745098,0.237216015625,0.237216015625),
(0.796078431372549,0.2446734375,0.2446734375),
(0.8,0.252130859375,0.252130859375),
(0.803921568627451,0.259587890625,0.259587890625),
(0.807843137254902,0.2670453125,0.2670453125),
(0.811764705882353,0.274502734375,0.274502734375),
(0.815686274509804,0.28196015625,0.28196015625),
(0.819607843137255,0.289417578125,0.289417578125),
(0.823529411764706,0.296875,0.296875),
(0.827450980392157,0.304332421875,0.304332421875),
(0.831372549019608,0.31178984375,0.31178984375),
(0.835294117647059,0.319247265625,0.319247265625),
(0.83921568627451,0.3267046875,0.3267046875),
(0.843137254901961,0.334162109375,0.334162109375),
(0.847058823529412,0.34161953125,0.34161953125),
(0.850980392156863,0.3490765625,0.3490765625),
(0.854901960784314,0.356533984375,0.356533984375),
(0.858823529411765,0.36399140625,0.36399140625),
(0.862745098039216,0.371448828125,0.371448828125),
(0.866666666666667,0.37890625,0.37890625),
(0.870588235294118,0.386363671875,0.386363671875),
(0.874509803921569,0.3938203125,0.3938203125),
(0.87843137254902,0.40127734375,0.40127734375),
(0.882352941176471,0.408734375,0.408734375),
(0.886274509803922,0.41619140625,0.41619140625),
(0.890196078431373,0.42365234375,0.42365234375),
(0.894117647058824,0.431109375,0.431109375),
(0.898039215686275,0.43856640625,0.43856640625),
(0.901960784313726,0.4460234375,0.4460234375),
(0.905882352941176,0.45348046875,0.45348046875),
(0.909803921568627,0.4609375,0.4609375),
(0.913725490196078,0.46839453125,0.46839453125),
(0.917647058823529,0.4758515625,0.4758515625),
(0.92156862745098,0.48330859375,0.48330859375),
(0.925490196078431,0.490765625,0.490765625),
(0.929411764705882,0.49822265625,0.49822265625),
(0.933333333333333,0.50568359375,0.50568359375),
(0.937254901960784,0.513140625,0.513140625),
(0.941176470588235,0.52059765625,0.52059765625),
(0.945098039215686,0.5280546875,0.5280546875),
(0.949019607843137,0.53551171875,0.53551171875),
(0.952941176470588,0.54296875,0.54296875),
(0.956862745098039,0.55042578125,0.55042578125),
(0.96078431372549,0.5578828125,0.5578828125),
(0.964705882352941,0.56533984375,0.56533984375),
(0.968627450980392,0.572796875,0.572796875),
(0.972549019607843,0.58025390625,0.58025390625),
(0.976470588235294,0.58771484375,0.58771484375),
(0.980392156862745,0.595171875,0.595171875),
(0.984313725490196,0.60262890625,0.60262890625),
(0.988235294117647,0.6100859375,0.6100859375),
(0.992156862745098,0.61754296875,0.61754296875),
(0.996078431372549,0.625,0.625),
(1.0000,0.625,0.625)),
'blue' : (
(0.0,0.51984375,0.51984375),
(0.00392156862745098,0.51984375,0.51984375),
(0.00784313725490196,0.529765625,0.529765625),
(0.0117647058823529,0.5396875,0.5396875),
(0.0156862745098039,0.549609375,0.549609375),
(0.0196078431372549,0.55953125,0.55953125),
(0.0235294117647059,0.569453125,0.569453125),
(0.0274509803921569,0.579375,0.579375),
(0.0313725490196078,0.589296875,0.589296875),
(0.0352941176470588,0.59921875,0.59921875),
(0.0392156862745098,0.609140625,0.609140625),
(0.0431372549019608,0.6190625,0.6190625),
(0.0470588235294118,0.628984375,0.628984375),
(0.0509803921568627,0.63890625,0.63890625),
(0.0549019607843137,0.648828125,0.648828125),
(0.0588235294117647,0.65875,0.65875),
(0.0627450980392157,0.668671875,0.668671875),
(0.0666666666666667,0.67859375,0.67859375),
(0.0705882352941176,0.688515625,0.688515625),
(0.0745098039215686,0.6984375,0.6984375),
(0.0784313725490196,0.708359375,0.708359375),
(0.0823529411764706,0.71828125,0.71828125),
(0.0862745098039216,0.728203125,0.728203125),
(0.0901960784313725,0.738125,0.738125),
(0.0941176470588235,0.748046875,0.748046875),
(0.0980392156862745,0.75796875,0.75796875),
(0.101960784313725,0.767890625,0.767890625),
(0.105882352941176,0.7778125,0.7778125),
(0.109803921568627,0.787734375,0.787734375),
(0.113725490196078,0.79765625,0.79765625),
(0.117647058823529,0.807578125,0.807578125),
(0.12156862745098,0.8175,0.8175),
(0.125490196078431,0.827421875,0.827421875),
(0.129411764705882,0.83734375,0.83734375),
(0.133333333333333,0.847265625,0.847265625),
(0.137254901960784,0.8571875,0.8571875),
(0.141176470588235,0.867109375,0.867109375),
(0.145098039215686,0.87703125,0.87703125),
(0.149019607843137,0.886953125,0.886953125),
(0.152941176470588,0.896875,0.896875),
(0.156862745098039,0.906796875,0.906796875),
(0.16078431372549,0.91671875,0.91671875),
(0.164705882352941,0.926640625,0.926640625),
(0.168627450980392,0.9365625,0.9365625),
(0.172549019607843,0.946484375,0.946484375),
(0.176470588235294,0.95640625,0.95640625),
(0.180392156862745,0.966328125,0.966328125),
(0.184313725490196,0.97625,0.97625),
(0.188235294117647,0.986171875,0.986171875),
(0.192156862745098,0.99609375,0.99609375),
(0.196078431372549,0.976171875,0.976171875),
(0.2,0.95625,0.95625),
(0.203921568627451,0.936328125,0.936328125),
(0.207843137254902,0.91640625,0.91640625),
(0.211764705882353,0.896484375,0.896484375),
(0.215686274509804,0.8765625,0.8765625),
(0.219607843137255,0.856640625,0.856640625),
(0.223529411764706,0.83671875,0.83671875),
(0.227450980392157,0.816796875,0.816796875),
(0.231372549019608,0.796875,0.796875),
(0.235294117647059,0.776953125,0.776953125),
(0.23921568627451,0.75703125,0.75703125),
(0.243137254901961,0.737109375,0.737109375),
(0.247058823529412,0.7171875,0.7171875),
(0.250980392156863,0.697265625,0.697265625),
(0.254901960784314,0.67734375,0.67734375),
(0.258823529411765,0.657421875,0.657421875),
(0.262745098039216,0.6375,0.6375),
(0.266666666666667,0.617578125,0.617578125),
(0.270588235294118,0.59765625,0.59765625),
(0.274509803921569,0.577734375,0.577734375),
(0.27843137254902,0.5578125,0.5578125),
(0.282352941176471,0.537890625,0.537890625),
(0.286274509803922,0.51796875,0.51796875),
(0.290196078431373,0.498046875,0.498046875),
(0.294117647058824,0.478125,0.478125),
(0.298039215686275,0.458203125,0.458203125),
(0.301960784313725,0.43828125,0.43828125),
(0.305882352941176,0.418359375,0.418359375),
(0.309803921568627,0.3984375,0.3984375),
(0.313725490196078,0.378515625,0.378515625),
(0.317647058823529,0.35859375,0.35859375),
(0.32156862745098,0.338671875,0.338671875),
(0.325490196078431,0.31875,0.31875),
(0.329411764705882,0.298828125,0.298828125),
(0.333333333333333,0.27890625,0.27890625),
(0.337254901960784,0.258984375,0.258984375),
(0.341176470588235,0.2390625,0.2390625),
(0.345098039215686,0.219140625,0.219140625),
(0.349019607843137,0.19921875,0.19921875),
(0.352941176470588,0.179296875,0.179296875),
(0.356862745098039,0.159375,0.159375),
(0.36078431372549,0.139453125,0.139453125),
(0.364705882352941,0.11953125,0.11953125),
(0.368627450980392,0.099609375,0.099609375),
(0.372549019607843,0.0796875,0.0796875),
(0.376470588235294,0.059765625,0.059765625),
(0.380392156862745,0.03984375,0.03984375),
(0.384313725490196,0.019921875,0.019921875),
(0.388235294117647,0,0),
(0.392156862745098,0,0),
(0.396078431372549,0,0),
(0.4,0,0),
(0.403921568627451,0,0),
(0.407843137254902,0,0),
(0.411764705882353,0,0),
(0.415686274509804,0,0),
(0.419607843137255,0,0),
(0.423529411764706,0,0),
(0.427450980392157,0,0),
(0.431372549019608,0,0),
(0.435294117647059,0,0),
(0.43921568627451,0,0),
(0.443137254901961,0,0),
(0.447058823529412,0,0),
(0.450980392156863,0,0),
(0.454901960784314,0,0),
(0.458823529411765,0,0),
(0.462745098039216,0,0),
(0.466666666666667,0,0),
(0.470588235294118,0,0),
(0.474509803921569,0,0),
(0.47843137254902,0,0),
(0.482352941176471,0,0),
(0.486274509803922,0,0),
(0.490196078431373,0,0),
(0.494117647058824,0,0),
(0.498039215686275,0,0),
(0.501960784313725,0,0),
(0.505882352941176,0,0),
(0.509803921568627,0,0),
(0.513725490196078,0,0),
(0.517647058823529,0,0),
(0.52156862745098,0,0),
(0.525490196078431,0,0),
(0.529411764705882,0,0),
(0.533333333333333,0,0),
(0.537254901960784,0,0),
(0.541176470588235,0,0),
(0.545098039215686,0,0),
(0.549019607843137,0,0),
(0.552941176470588,0,0),
(0.556862745098039,0,0),
(0.56078431372549,0,0),
(0.564705882352941,0,0),
(0.568627450980392,0,0),
(0.572549019607843,0,0),
(0.576470588235294,0,0),
(0.580392156862745,0,0),
(0.584313725490196,0,0),
(0.588235294117647,0.004296875,0.004296875),
(0.592156862745098,0.00859375,0.00859375),
(0.596078431372549,0.012890625,0.012890625),
(0.6,0.0171875,0.0171875),
(0.603921568627451,0.021484375,0.021484375),
(0.607843137254902,0.02578125,0.02578125),
(0.611764705882353,0.030078125,0.030078125),
(0.615686274509804,0.034375,0.034375),
(0.619607843137255,0.038671875,0.038671875),
(0.623529411764706,0.04296875,0.04296875),
(0.627450980392157,0.047265625,0.047265625),
(0.631372549019608,0.0515625,0.0515625),
(0.635294117647059,0.055859375,0.055859375),
(0.63921568627451,0.06015625,0.06015625),
(0.643137254901961,0.064453125,0.064453125),
(0.647058823529412,0.06875,0.06875),
(0.650980392156863,0.073046875,0.073046875),
(0.654901960784314,0.07734375,0.07734375),
(0.658823529411765,0.081640625,0.081640625),
(0.662745098039216,0.0859375,0.0859375),
(0.666666666666667,0.090234375,0.090234375),
(0.670588235294118,0.09453125,0.09453125),
(0.674509803921569,0.098828125,0.098828125),
(0.67843137254902,0.103125,0.103125),
(0.682352941176471,0.107421875,0.107421875),
(0.686274509803922,0.11171875,0.11171875),
(0.690196078431373,0.116015625,0.116015625),
(0.694117647058824,0.1203125,0.1203125),
(0.698039215686274,0.124609375,0.124609375),
(0.701960784313725,0.12890625,0.12890625),
(0.705882352941177,0.133203125,0.133203125),
(0.709803921568627,0.1375,0.1375),
(0.713725490196078,0.141796875,0.141796875),
(0.717647058823529,0.14609375,0.14609375),
(0.72156862745098,0.150390625,0.150390625),
(0.725490196078431,0.1546875,0.1546875),
(0.729411764705882,0.158984375,0.158984375),
(0.733333333333333,0.16328125,0.16328125),
(0.737254901960784,0.167578125,0.167578125),
(0.741176470588235,0.171875,0.171875),
(0.745098039215686,0.176171875,0.176171875),
(0.749019607843137,0.18046875,0.18046875),
(0.752941176470588,0.184765625,0.184765625),
(0.756862745098039,0.1890625,0.1890625),
(0.76078431372549,0.193359375,0.193359375),
(0.764705882352941,0.19765625,0.19765625),
(0.768627450980392,0.201953125,0.201953125),
(0.772549019607843,0.20625,0.20625),
(0.776470588235294,0.210546875,0.210546875),
(0.780392156862745,0.21484375,0.21484375),
(0.784313725490196,0.22663359375,0.22663359375),
(0.788235294117647,0.2384234375,0.2384234375),
(0.792156862745098,0.250212890625,0.250212890625),
(0.796078431372549,0.262002734375,0.262002734375),
(0.8,0.273792578125,0.273792578125),
(0.803921568627451,0.285582421875,0.285582421875),
(0.807843137254902,0.297372265625,0.297372265625),
(0.811764705882353,0.309162109375,0.309162109375),
(0.815686274509804,0.3209515625,0.3209515625),
(0.819607843137255,0.33274140625,0.33274140625),
(0.823529411764706,0.34453125,0.34453125),
(0.827450980392157,0.35632109375,0.35632109375),
(0.831372549019608,0.3681109375,0.3681109375),
(0.835294117647059,0.379900390625,0.379900390625),
(0.83921568627451,0.39169140625,0.39169140625),
(0.843137254901961,0.40348046875,0.40348046875),
(0.847058823529412,0.41526953125,0.41526953125),
(0.850980392156863,0.42705859375,0.42705859375),
(0.854901960784314,0.43884765625,0.43884765625),
(0.858823529411765,0.450640625,0.450640625),
(0.862745098039216,0.4624296875,0.4624296875),
(0.866666666666667,0.47421875,0.47421875),
(0.870588235294118,0.4860078125,0.4860078125),
(0.874509803921569,0.497796875,0.497796875),
(0.87843137254902,0.50958984375,0.50958984375),
(0.882352941176471,0.52137890625,0.52137890625),
(0.886274509803922,0.53316796875,0.53316796875),
(0.890196078431373,0.54495703125,0.54495703125),
(0.894117647058824,0.55674609375,0.55674609375),
(0.898039215686275,0.56853515625,0.56853515625),
(0.901960784313726,0.580328125,0.580328125),
(0.905882352941176,0.5921171875,0.5921171875),
(0.909803921568627,0.60390625,0.60390625),
(0.913725490196078,0.6156953125,0.6156953125),
(0.917647058823529,0.627484375,0.627484375),
(0.92156862745098,0.63927734375,0.63927734375),
(0.925490196078431,0.65106640625,0.65106640625),
(0.929411764705882,0.66285546875,0.66285546875),
(0.933333333333333,0.67464453125,0.67464453125),
(0.937254901960784,0.68643359375,0.68643359375),
(0.941176470588235,0.69822265625,0.69822265625),
(0.945098039215686,0.710015625,0.710015625),
(0.949019607843137,0.7218046875,0.7218046875),
(0.952941176470588,0.73359375,0.73359375),
(0.956862745098039,0.7453828125,0.7453828125),
(0.96078431372549,0.757171875,0.757171875),
(0.964705882352941,0.76896484375,0.76896484375),
(0.968627450980392,0.78075390625,0.78075390625),
(0.972549019607843,0.79254296875,0.79254296875),
(0.976470588235294,0.80433203125,0.80433203125),
(0.980392156862745,0.81612109375,0.81612109375),
(0.984313725490196,0.82791015625,0.82791015625),
(0.988235294117647,0.839703125,0.839703125),
(0.992156862745098,0.8514921875,0.8514921875),
(0.996078431372549,0.86328125,0.86328125),
(1.00,0.86328125,0.86328125)),
}
vcdict = {
'red' : (
(0,1,1),
(0.00392156862745098,0.54508984375,0.54508984375),
(0.00784313725490196,0.5285703125,0.5285703125),
(0.0117647058823529,0.5120546875,0.5120546875),
(0.0156862745098039,0.49553515625,0.49553515625),
(0.0196078431372549,0.47901953125,0.47901953125),
(0.0235294117647059,0.4625,0.4625),
(0.0274509803921569,0.44598046875,0.44598046875),
(0.0313725490196078,0.42946484375,0.42946484375),
(0.0352941176470588,0.4129453125,0.4129453125),
(0.0392156862745098,0.3964296875,0.3964296875),
(0.0431372549019608,0.379910546875,0.379910546875),
(0.0470588235294118,0.36339296875,0.36339296875),
(0.0509803921568627,0.346875,0.346875),
(0.0549019607843137,0.33035703125,0.33035703125),
(0.0588235294117647,0.313839453125,0.313839453125),
(0.0627450980392157,0.297321484375,0.297321484375),
(0.0666666666666667,0.280803515625,0.280803515625),
(0.0705882352941176,0.2642859375,0.2642859375),
(0.0745098039215686,0.24776796875,0.24776796875),
(0.0784313725490196,0.23125,0.23125),
(0.0823529411764706,0.21473203125,0.21473203125),
(0.0862745098039216,0.198214453125,0.198214453125),
(0.0901960784313725,0.181696484375,0.181696484375),
(0.0941176470588235,0.165178515625,0.165178515625),
(0.0980392156862745,0.148660546875,0.148660546875),
(0.101960784313725,0.13214296875,0.13214296875),
(0.105882352941176,0.115625,0.115625),
(0.109803921568627,0.09910703125,0.09910703125),
(0.113725490196078,0.082589453125,0.082589453125),
(0.117647058823529,0.066071484375,0.066071484375),
(0.12156862745098,0.049553515625,0.049553515625),
(0.125490196078431,0.0330357421875,0.0330357421875),
(0.129411764705882,0.016517890625,0.016517890625),
(0.133333333333333,0,0),
(0.137254901960784,0,0),
(0.141176470588235,0,0),
(0.145098039215686,0,0),
(0.149019607843137,0,0),
(0.152941176470588,0,0),
(0.156862745098039,0,0),
(0.16078431372549,0,0),
(0.164705882352941,0,0),
(0.168627450980392,0,0),
(0.172549019607843,0,0),
(0.176470588235294,0,0),
(0.180392156862745,0,0),
(0.184313725490196,0,0),
(0.188235294117647,0,0),
(0.192156862745098,0,0),
(0.196078431372549,0,0),
(0.2,0,0),
(0.203921568627451,0,0),
(0.207843137254902,0,0),
(0.211764705882353,0,0),
(0.215686274509804,0,0),
(0.219607843137255,0,0),
(0.223529411764706,0,0),
(0.227450980392157,0,0),
(0.231372549019608,0,0),
(0.235294117647059,0,0),
(0.23921568627451,0,0),
(0.243137254901961,0,0),
(0.247058823529412,0,0),
(0.250980392156863,0,0),
(0.254901960784314,0,0),
(0.258823529411765,0,0),
(0.262745098039216,0,0),
(0.266666666666667,0,0),
(0.270588235294118,0,0),
(0.274509803921569,0,0),
(0.27843137254902,0,0),
(0.282352941176471,0,0),
(0.286274509803922,0,0),
(0.290196078431373,0,0),
(0.294117647058824,0,0),
(0.298039215686275,0,0),
(0.301960784313725,0,0),
(0.305882352941176,0,0),
(0.309803921568627,0,0),
(0.313725490196078,0,0),
(0.317647058823529,0,0),
(0.32156862745098,0,0),
(0.325490196078431,0,0),
(0.329411764705882,0,0),
(0.333333333333333,0,0),
(0.337254901960784,0,0),
(0.341176470588235,0,0),
(0.345098039215686,0,0),
(0.349019607843137,0,0),
(0.352941176470588,0.0061383984375,0.0061383984375),
(0.356862745098039,0.012276796875,0.012276796875),
(0.36078431372549,0.0184151953125,0.0184151953125),
(0.364705882352941,0.0245535546875,0.0245535546875),
(0.368627450980392,0.030691953125,0.030691953125),
(0.372549019607843,0.0368303515625,0.0368303515625),
(0.376470588235294,0.04296875,0.04296875),
(0.380392156862745,0.04910703125,0.04910703125),
(0.384313725490196,0.055245703125,0.055245703125),
(0.388235294117647,0.061383984375,0.061383984375),
(0.392156862745098,0.067522265625,0.067522265625),
(0.396078431372549,0.073660546875,0.073660546875),
(0.4,0.07979921875,0.07979921875),
(0.403921568627451,0.0859375,0.0859375),
(0.407843137254902,0.09207578125,0.09207578125),
(0.411764705882353,0.098214453125,0.098214453125),
(0.415686274509804,0.104352734375,0.104352734375),
(0.419607843137255,0.110491015625,0.110491015625),
(0.423529411764706,0.116629296875,0.116629296875),
(0.427450980392157,0.12276796875,0.12276796875),
(0.431372549019608,0.12890625,0.12890625),
(0.435294117647059,0.13504453125,0.13504453125),
(0.43921568627451,0.141183203125,0.141183203125),
(0.443137254901961,0.147321484375,0.147321484375),
(0.447058823529412,0.153459765625,0.153459765625),
(0.450980392156863,0.159598046875,0.159598046875),
(0.454901960784314,0.16573671875,0.16573671875),
(0.458823529411765,0.171875,0.171875),
(0.462745098039216,0.17801328125,0.17801328125),
(0.466666666666667,0.184151953125,0.184151953125),
(0.470588235294118,0.190290234375,0.190290234375),
(0.474509803921569,0.196428515625,0.196428515625),
(0.47843137254902,0.202566796875,0.202566796875),
(0.482352941176471,0.20870546875,0.20870546875),
(0.486274509803922,0.21484375,0.21484375),
(0.490196078431373,0.233370703125,0.233370703125),
(0.494117647058824,0.251897265625,0.251897265625),
(0.498039215686275,0.27042421875,0.27042421875),
(0.501960784313725,0.28895078125,0.28895078125),
(0.505882352941176,0.307477734375,0.307477734375),
(0.509803921568627,0.326004296875,0.326004296875),
(0.513725490196078,0.34453125,0.34453125),
(0.517647058823529,0.363058203125,0.363058203125),
(0.52156862745098,0.381584765625,0.381584765625),
(0.525490196078431,0.40011328125,0.40011328125),
(0.529411764705882,0.41863671875,0.41863671875),
(0.533333333333333,0.4371640625,0.4371640625),
(0.537254901960784,0.45569140625,0.45569140625),
(0.541176470588235,0.47421875,0.47421875),
(0.545098039215686,0.49274609375,0.49274609375),
(0.549019607843137,0.5112734375,0.5112734375),
(0.552941176470588,0.52980078125,0.52980078125),
(0.556862745098039,0.54832421875,0.54832421875),
(0.56078431372549,0.5668515625,0.5668515625),
(0.564705882352941,0.58537890625,0.58537890625),
(0.568627450980392,0.60390625,0.60390625),
(0.572549019607843,0.62243359375,0.62243359375),
(0.576470588235294,0.6409609375,0.6409609375),
(0.580392156862745,0.65948828125,0.65948828125),
(0.584313725490196,0.67801171875,0.67801171875),
(0.588235294117647,0.6965390625,0.6965390625),
(0.592156862745098,0.71506640625,0.71506640625),
(0.596078431372549,0.73359375,0.73359375),
(0.6,0.75212109375,0.75212109375),
(0.603921568627451,0.7706484375,0.7706484375),
(0.607843137254902,0.78917578125,0.78917578125),
(0.611764705882353,0.80769921875,0.80769921875),
(0.615686274509804,0.8262265625,0.8262265625),
(0.619607843137255,0.84475390625,0.84475390625),
(0.623529411764706,0.86328125,0.86328125),
(0.627450980392157,0.86549609375,0.86549609375),
(0.631372549019608,0.86770703125,0.86770703125),
(0.635294117647059,0.869921875,0.869921875),
(0.63921568627451,0.87213671875,0.87213671875),
(0.643137254901961,0.87434765625,0.87434765625),
(0.647058823529412,0.8765625,0.8765625),
(0.650980392156863,0.87877734375,0.87877734375),
(0.654901960784314,0.88098828125,0.88098828125),
(0.658823529411765,0.883203125,0.883203125),
(0.662745098039216,0.88541796875,0.88541796875),
(0.666666666666667,0.88762890625,0.88762890625),
(0.670588235294118,0.88984375,0.88984375),
(0.674509803921569,0.89205859375,0.89205859375),
(0.67843137254902,0.89426953125,0.89426953125),
(0.682352941176471,0.896484375,0.896484375),
(0.686274509803922,0.89869921875,0.89869921875),
(0.690196078431373,0.90091015625,0.90091015625),
(0.694117647058824,0.903125,0.903125),
(0.698039215686274,0.90533984375,0.90533984375),
(0.701960784313725,0.90755078125,0.90755078125),
(0.705882352941177,0.909765625,0.909765625),
(0.709803921568627,0.91198046875,0.91198046875),
(0.713725490196078,0.91419140625,0.91419140625),
(0.717647058823529,0.91640625,0.91640625),
(0.72156862745098,0.91862109375,0.91862109375),
(0.725490196078431,0.92083203125,0.92083203125),
(0.729411764705882,0.923046875,0.923046875),
(0.733333333333333,0.92526171875,0.92526171875),
(0.737254901960784,0.92747265625,0.92747265625),
(0.741176470588235,0.9296875,0.9296875),
(0.745098039215686,0.93190234375,0.93190234375),
(0.749019607843137,0.93411328125,0.93411328125),
(0.752941176470588,0.936328125,0.936328125),
(0.756862745098039,0.93854296875,0.93854296875),
(0.76078431372549,0.94075390625,0.94075390625),
(0.764705882352941,0.94296875,0.94296875),
(0.768627450980392,0.94518359375,0.94518359375),
(0.772549019607843,0.94739453125,0.94739453125),
(0.776470588235294,0.949609375,0.949609375),
(0.780392156862745,0.95182421875,0.95182421875),
(0.784313725490196,0.95403515625,0.95403515625),
(0.788235294117647,0.95625,0.95625),
(0.792156862745098,0.95846484375,0.95846484375),
(0.796078431372549,0.96067578125,0.96067578125),
(0.8,0.962890625,0.962890625),
(0.803921568627451,0.96510546875,0.96510546875),
(0.807843137254902,0.96731640625,0.96731640625),
(0.811764705882353,0.96953125,0.96953125),
(0.815686274509804,0.97174609375,0.97174609375),
(0.819607843137255,0.97395703125,0.97395703125),
(0.823529411764706,0.976171875,0.976171875),
(0.827450980392157,0.97838671875,0.97838671875),
(0.831372549019608,0.98059765625,0.98059765625),
(0.835294117647059,0.9828125,0.9828125),
(0.83921568627451,0.98502734375,0.98502734375),
(0.843137254901961,0.98723828125,0.98723828125),
(0.847058823529412,0.989453125,0.989453125),
(0.850980392156863,0.99166796875,0.99166796875),
(0.854901960784314,0.99387890625,0.99387890625),
(0.858823529411765,0.99609375,0.99609375),
(0.862745098039216,0.99609375,0.99609375),
(0.866666666666667,0.99609375,0.99609375),
(0.870588235294118,0.99609375,0.99609375),
(0.874509803921569,0.99609375,0.99609375),
(0.87843137254902,0.99609375,0.99609375),
(0.882352941176471,0.99609375,0.99609375),
(0.886274509803922,0.99609375,0.99609375),
(0.890196078431373,0.99609375,0.99609375),
(0.894117647058824,0.99609375,0.99609375),
(0.898039215686275,0.99609375,0.99609375),
(0.901960784313726,0.99609375,0.99609375),
(0.905882352941176,0.99609375,0.99609375),
(0.909803921568627,0.99609375,0.99609375),
(0.913725490196078,0.99609375,0.99609375),
(0.917647058823529,0.99609375,0.99609375),
(0.92156862745098,0.99609375,0.99609375),
(0.925490196078431,0.99609375,0.99609375),
(0.929411764705882,0.99609375,0.99609375),
(0.933333333333333,0.99609375,0.99609375),
(0.937254901960784,0.99609375,0.99609375),
(0.941176470588235,0.99609375,0.99609375),
(0.945098039215686,0.99609375,0.99609375),
(0.949019607843137,0.99609375,0.99609375),
(0.952941176470588,0.99609375,0.99609375),
(0.956862745098039,0.99609375,0.99609375),
(0.96078431372549,0.99609375,0.99609375),
(0.964705882352941,0.99609375,0.99609375),
(0.968627450980392,0.99609375,0.99609375),
(0.972549019607843,0.99609375,0.99609375),
(0.976470588235294,0.99609375,0.99609375),
(0.980392156862745,0.99609375,0.99609375),
(0.984313725490196,0.99609375,0.99609375),
(0.988235294117647,0.99609375,0.99609375),
(0.992156862745098,0.99609375,0.99609375),
(0.996078431372549,0.99609375,0.99609375),
(1,0.99609375,0.99609375)),
'green' : (
(0,1,1),
(0.00392156862745098,0,0),
(0.00784313725490196,0,0),
(0.0117647058823529,0,0),
(0.0156862745098039,0,0),
(0.0196078431372549,0,0),
(0.0235294117647059,0,0),
(0.0274509803921569,0,0),
(0.0313725490196078,0,0),
(0.0352941176470588,0,0),
(0.0392156862745098,0,0),
(0.0431372549019608,0,0),
(0.0470588235294118,0,0),
(0.0509803921568627,0,0),
(0.0549019607843137,0,0),
(0.0588235294117647,0,0),
(0.0627450980392157,0,0),
(0.0666666666666667,0,0),
(0.0705882352941176,0,0),
(0.0745098039215686,0,0),
(0.0784313725490196,0,0),
(0.0823529411764706,0,0),
(0.0862745098039216,0,0),
(0.0901960784313725,0,0),
(0.0941176470588235,0,0),
(0.0980392156862745,0,0),
(0.101960784313725,0,0),
(0.105882352941176,0,0),
(0.109803921568627,0,0),
(0.113725490196078,0,0),
(0.117647058823529,0,0),
(0.12156862745098,0,0),
(0.125490196078431,0,0),
(0.129411764705882,0,0),
(0.133333333333333,0,0),
(0.137254901960784,0.0135653515625,0.0135653515625),
(0.141176470588235,0.0271306640625,0.0271306640625),
(0.145098039215686,0.04069609375,0.04069609375),
(0.149019607843137,0.054261328125,0.054261328125),
(0.152941176470588,0.0678265625,0.0678265625),
(0.156862745098039,0.0813921875,0.0813921875),
(0.16078431372549,0.094957421875,0.094957421875),
(0.164705882352941,0.10852265625,0.10852265625),
(0.168627450980392,0.122087890625,0.122087890625),
(0.172549019607843,0.135653515625,0.135653515625),
(0.176470588235294,0.14921875,0.14921875),
(0.180392156862745,0.162783984375,0.162783984375),
(0.184313725490196,0.176349609375,0.176349609375),
(0.188235294117647,0.18991484375,0.18991484375),
(0.192156862745098,0.203480078125,0.203480078125),
(0.196078431372549,0.2170453125,0.2170453125),
(0.2,0.2306109375,0.2306109375),
(0.203921568627451,0.244176171875,0.244176171875),
(0.207843137254902,0.25774140625,0.25774140625),
(0.211764705882353,0.27130703125,0.27130703125),
(0.215686274509804,0.284872265625,0.284872265625),
(0.219607843137255,0.2984375,0.2984375),
(0.223529411764706,0.312002734375,0.312002734375),
(0.227450980392157,0.325568359375,0.325568359375),
(0.231372549019608,0.33913359375,0.33913359375),
(0.235294117647059,0.352698828125,0.352698828125),
(0.23921568627451,0.3662640625,0.3662640625),
(0.243137254901961,0.3798296875,0.3798296875),
(0.247058823529412,0.39339453125,0.39339453125),
(0.250980392156863,0.4069609375,0.4069609375),
(0.254901960784314,0.42052734375,0.42052734375),
(0.258823529411765,0.43408984375,0.43408984375),
(0.262745098039216,0.44765625,0.44765625),
(0.266666666666667,0.46122265625,0.46122265625),
(0.270588235294118,0.47478515625,0.47478515625),
(0.274509803921569,0.4883515625,0.4883515625),
(0.27843137254902,0.50191796875,0.50191796875),
(0.282352941176471,0.515484375,0.515484375),
(0.286274509803922,0.529046875,0.529046875),
(0.290196078431373,0.54261328125,0.54261328125),
(0.294117647058824,0.5561796875,0.5561796875),
(0.298039215686275,0.56974609375,0.56974609375),
(0.301960784313725,0.58330859375,0.58330859375),
(0.305882352941176,0.596875,0.596875),
(0.309803921568627,0.61044140625,0.61044140625),
(0.313725490196078,0.62400390625,0.62400390625),
(0.317647058823529,0.6375703125,0.6375703125),
(0.32156862745098,0.65113671875,0.65113671875),
(0.325490196078431,0.664703125,0.664703125),
(0.329411764705882,0.678265625,0.678265625),
(0.333333333333333,0.69183203125,0.69183203125),
(0.337254901960784,0.7053984375,0.7053984375),
(0.341176470588235,0.71896484375,0.71896484375),
(0.345098039215686,0.73252734375,0.73252734375),
(0.349019607843137,0.74609375,0.74609375),
(0.352941176470588,0.7309140625,0.7309140625),
(0.356862745098039,0.71573828125,0.71573828125),
(0.36078431372549,0.70055859375,0.70055859375),
(0.364705882352941,0.68537890625,0.68537890625),
(0.368627450980392,0.67019921875,0.67019921875),
(0.372549019607843,0.6550234375,0.6550234375),
(0.376470588235294,0.63984375,0.63984375),
(0.380392156862745,0.6246640625,0.6246640625),
(0.384313725490196,0.60948828125,0.60948828125),
(0.388235294117647,0.59430859375,0.59430859375),
(0.392156862745098,0.57912890625,0.57912890625),
(0.396078431372549,0.56394921875,0.56394921875),
(0.4,0.5487734375,0.5487734375),
(0.403921568627451,0.53359375,0.53359375),
(0.407843137254902,0.5184140625,0.5184140625),
(0.411764705882353,0.50323828125,0.50323828125),
(0.415686274509804,0.48805859375,0.48805859375),
(0.419607843137255,0.47287890625,0.47287890625),
(0.423529411764706,0.45769921875,0.45769921875),
(0.427450980392157,0.4425234375,0.4425234375),
(0.431372549019608,0.42734375,0.42734375),
(0.435294117647059,0.4121640625,0.4121640625),
(0.43921568627451,0.39698828125,0.39698828125),
(0.443137254901961,0.381808203125,0.381808203125),
(0.447058823529412,0.366629296875,0.366629296875),
(0.450980392156863,0.35145078125,0.35145078125),
(0.454901960784314,0.336272265625,0.336272265625),
(0.458823529411765,0.32109375,0.32109375),
(0.462745098039216,0.305915234375,0.305915234375),
(0.466666666666667,0.29073671875,0.29073671875),
(0.470588235294118,0.2755578125,0.2755578125),
(0.474509803921569,0.260379296875,0.260379296875),
(0.47843137254902,0.24520078125,0.24520078125),
(0.482352941176471,0.230022265625,0.230022265625),
(0.486274509803922,0.21484375,0.21484375),
(0.490196078431373,0.2265625,0.2265625),
(0.494117647058824,0.23828125,0.23828125),
(0.498039215686275,0.25,0.25),
(0.501960784313725,0.26171875,0.26171875),
(0.505882352941176,0.2734375,0.2734375),
(0.509803921568627,0.28515625,0.28515625),
(0.513725490196078,0.296875,0.296875),
(0.517647058823529,0.30859375,0.30859375),
(0.52156862745098,0.3203125,0.3203125),
(0.525490196078431,0.33203125,0.33203125),
(0.529411764705882,0.34375,0.34375),
(0.533333333333333,0.35546875,0.35546875),
(0.537254901960784,0.3671875,0.3671875),
(0.541176470588235,0.37890625,0.37890625),
(0.545098039215686,0.390625,0.390625),
(0.549019607843137,0.40234375,0.40234375),
(0.552941176470588,0.4140625,0.4140625),
(0.556862745098039,0.42578125,0.42578125),
(0.56078431372549,0.4375,0.4375),
(0.564705882352941,0.44921875,0.44921875),
(0.568627450980392,0.4609375,0.4609375),
(0.572549019607843,0.47265625,0.47265625),
(0.576470588235294,0.484375,0.484375),
(0.580392156862745,0.49609375,0.49609375),
(0.584313725490196,0.5078125,0.5078125),
(0.588235294117647,0.51953125,0.51953125),
(0.592156862745098,0.53125,0.53125),
(0.596078431372549,0.54296875,0.54296875),
(0.6,0.5546875,0.5546875),
(0.603921568627451,0.56640625,0.56640625),
(0.607843137254902,0.578125,0.578125),
(0.611764705882353,0.58984375,0.58984375),
(0.615686274509804,0.6015625,0.6015625),
(0.619607843137255,0.61328125,0.61328125),
(0.623529411764706,0.625,0.625),
(0.627450980392157,0.61458203125,0.61458203125),
(0.631372549019608,0.60416796875,0.60416796875),
(0.635294117647059,0.59375,0.59375),
(0.63921568627451,0.58333203125,0.58333203125),
(0.643137254901961,0.57291796875,0.57291796875),
(0.647058823529412,0.5625,0.5625),
(0.650980392156863,0.55208203125,0.55208203125),
(0.654901960784314,0.54166796875,0.54166796875),
(0.658823529411765,0.53125,0.53125),
(0.662745098039216,0.52083203125,0.52083203125),
(0.666666666666667,0.51041796875,0.51041796875),
(0.670588235294118,0.5,0.5),
(0.674509803921569,0.48958203125,0.48958203125),
(0.67843137254902,0.47916796875,0.47916796875),
(0.682352941176471,0.46875,0.46875),
(0.686274509803922,0.45833203125,0.45833203125),
(0.690196078431373,0.44791796875,0.44791796875),
(0.694117647058824,0.4375,0.4375),
(0.698039215686274,0.42708203125,0.42708203125),
(0.701960784313725,0.41666796875,0.41666796875),
(0.705882352941177,0.40625,0.40625),
(0.709803921568627,0.39583203125,0.39583203125),
(0.713725490196078,0.385416796875,0.385416796875),
(0.717647058823529,0.375,0.375),
(0.72156862745098,0.364583203125,0.364583203125),
(0.725490196078431,0.354166796875,0.354166796875),
(0.729411764705882,0.34375,0.34375),
(0.733333333333333,0.333333203125,0.333333203125),
(0.737254901960784,0.322916796875,0.322916796875),
(0.741176470588235,0.3125,0.3125),
(0.745098039215686,0.302083203125,0.302083203125),
(0.749019607843137,0.291666796875,0.291666796875),
(0.752941176470588,0.28125,0.28125),
(0.756862745098039,0.270833203125,0.270833203125),
(0.76078431372549,0.260416796875,0.260416796875),
(0.764705882352941,0.25,0.25),
(0.768627450980392,0.239583203125,0.239583203125),
(0.772549019607843,0.229166796875,0.229166796875),
(0.776470588235294,0.21875,0.21875),
(0.780392156862745,0.208333203125,0.208333203125),
(0.784313725490196,0.197916796875,0.197916796875),
(0.788235294117647,0.1875,0.1875),
(0.792156862745098,0.177083203125,0.177083203125),
(0.796078431372549,0.166666796875,0.166666796875),
(0.8,0.15625,0.15625),
(0.803921568627451,0.145833203125,0.145833203125),
(0.807843137254902,0.135416796875,0.135416796875),
(0.811764705882353,0.125,0.125),
(0.815686274509804,0.114583203125,0.114583203125),
(0.819607843137255,0.104166796875,0.104166796875),
(0.823529411764706,0.09375,0.09375),
(0.827450980392157,0.083333203125,0.083333203125),
(0.831372549019608,0.072916796875,0.072916796875),
(0.835294117647059,0.0625,0.0625),
(0.83921568627451,0.052083203125,0.052083203125),
(0.843137254901961,0.041666796875,0.041666796875),
(0.847058823529412,0.03125,0.03125),
(0.850980392156863,0.0208333203125,0.0208333203125),
(0.854901960784314,0.0104166796875,0.0104166796875),
(0.858823529411765,0,0),
(0.862745098039216,0.0184151953125,0.0184151953125),
(0.866666666666667,0.0368303515625,0.0368303515625),
(0.870588235294118,0.055245703125,0.055245703125),
(0.874509803921569,0.073660546875,0.073660546875),
(0.87843137254902,0.09207578125,0.09207578125),
(0.882352941176471,0.110491015625,0.110491015625),
(0.886274509803922,0.12890625,0.12890625),
(0.890196078431373,0.147321484375,0.147321484375),
(0.894117647058824,0.16573671875,0.16573671875),
(0.898039215686275,0.184151953125,0.184151953125),
(0.901960784313726,0.202566796875,0.202566796875),
(0.905882352941176,0.22098203125,0.22098203125),
(0.909803921568627,0.239397265625,0.239397265625),
(0.913725490196078,0.2578125,0.2578125),
(0.917647058823529,0.276227734375,0.276227734375),
(0.92156862745098,0.29464296875,0.29464296875),
(0.925490196078431,0.313058203125,0.313058203125),
(0.929411764705882,0.331473046875,0.331473046875),
(0.933333333333333,0.34988828125,0.34988828125),
(0.937254901960784,0.368303515625,0.368303515625),
(0.941176470588235,0.38671875,0.38671875),
(0.945098039215686,0.4051328125,0.4051328125),
(0.949019607843137,0.42355078125,0.42355078125),
(0.952941176470588,0.44196484375,0.44196484375),
(0.956862745098039,0.46037890625,0.46037890625),
(0.96078431372549,0.47879296875,0.47879296875),
(0.964705882352941,0.4972109375,0.4972109375),
(0.968627450980392,0.515625,0.515625),
(0.972549019607843,0.5340390625,0.5340390625),
(0.976470588235294,0.55245703125,0.55245703125),
(0.980392156862745,0.57087109375,0.57087109375),
(0.984313725490196,0.58928515625,0.58928515625),
(0.988235294117647,0.60769921875,0.60769921875),
(0.992156862745098,0.6261171875,0.6261171875),
(0.996078431372549,0.64453125,0.64453125),
(1,0.64453125,0.64453125)),
'blue' : (
(0,1,1),
(0.00392156862745098,0.80569140625,0.80569140625),
(0.00784313725490196,0.7964296875,0.7964296875),
(0.0117647058823529,0.7871640625,0.7871640625),
(0.0156862745098039,0.77790234375,0.77790234375),
(0.0196078431372549,0.76863671875,0.76863671875),
(0.0235294117647059,0.759375,0.759375),
(0.0274509803921569,0.75011328125,0.75011328125),
(0.0313725490196078,0.74084765625,0.74084765625),
(0.0352941176470588,0.7315859375,0.7315859375),
(0.0392156862745098,0.7223203125,0.7223203125),
(0.0431372549019608,0.71305859375,0.71305859375),
(0.0470588235294118,0.70379296875,0.70379296875),
(0.0509803921568627,0.69453125,0.69453125),
(0.0549019607843137,0.68526953125,0.68526953125),
(0.0588235294117647,0.67600390625,0.67600390625),
(0.0627450980392157,0.6667421875,0.6667421875),
(0.0666666666666667,0.6574765625,0.6574765625),
(0.0705882352941176,0.64821484375,0.64821484375),
(0.0745098039215686,0.63894921875,0.63894921875),
(0.0784313725490196,0.6296875,0.6296875),
(0.0823529411764706,0.62042578125,0.62042578125),
(0.0862745098039216,0.61116015625,0.61116015625),
(0.0901960784313725,0.6018984375,0.6018984375),
(0.0941176470588235,0.5926328125,0.5926328125),
(0.0980392156862745,0.58337109375,0.58337109375),
(0.101960784313725,0.57410546875,0.57410546875),
(0.105882352941176,0.56484375,0.56484375),
(0.109803921568627,0.55558203125,0.55558203125),
(0.113725490196078,0.54631640625,0.54631640625),
(0.117647058823529,0.5370546875,0.5370546875),
(0.12156862745098,0.5277890625,0.5277890625),
(0.125490196078431,0.51852734375,0.51852734375),
(0.129411764705882,0.50926171875,0.50926171875),
(0.133333333333333,0.5,0.5),
(0.137254901960784,0.50901953125,0.50901953125),
(0.141176470588235,0.5180390625,0.5180390625),
(0.145098039215686,0.52705859375,0.52705859375),
(0.149019607843137,0.536078125,0.536078125),
(0.152941176470588,0.54509765625,0.54509765625),
(0.156862745098039,0.55412109375,0.55412109375),
(0.16078431372549,0.563140625,0.563140625),
(0.164705882352941,0.57216015625,0.57216015625),
(0.168627450980392,0.5811796875,0.5811796875),
(0.172549019607843,0.59019921875,0.59019921875),
(0.176470588235294,0.59921875,0.59921875),
(0.180392156862745,0.60823828125,0.60823828125),
(0.184313725490196,0.6172578125,0.6172578125),
(0.188235294117647,0.62627734375,0.62627734375),
(0.192156862745098,0.635296875,0.635296875),
(0.196078431372549,0.64431640625,0.64431640625),
(0.2,0.65333984375,0.65333984375),
(0.203921568627451,0.662359375,0.662359375),
(0.207843137254902,0.67137890625,0.67137890625),
(0.211764705882353,0.6803984375,0.6803984375),
(0.215686274509804,0.68941796875,0.68941796875),
(0.219607843137255,0.6984375,0.6984375),
(0.223529411764706,0.70745703125,0.70745703125),
(0.227450980392157,0.7164765625,0.7164765625),
(0.231372549019608,0.72549609375,0.72549609375),
(0.235294117647059,0.734515625,0.734515625),
(0.23921568627451,0.74353515625,0.74353515625),
(0.243137254901961,0.75255859375,0.75255859375),
(0.247058823529412,0.761578125,0.761578125),
(0.250980392156863,0.77059765625,0.77059765625),
(0.254901960784314,0.7796171875,0.7796171875),
(0.258823529411765,0.78863671875,0.78863671875),
(0.262745098039216,0.79765625,0.79765625),
(0.266666666666667,0.80667578125,0.80667578125),
(0.270588235294118,0.8156953125,0.8156953125),
(0.274509803921569,0.82471484375,0.82471484375),
(0.27843137254902,0.833734375,0.833734375),
(0.282352941176471,0.84275390625,0.84275390625),
(0.286274509803922,0.85177734375,0.85177734375),
(0.290196078431373,0.860796875,0.860796875),
(0.294117647058824,0.86981640625,0.86981640625),
(0.298039215686275,0.8788359375,0.8788359375),
(0.301960784313725,0.88785546875,0.88785546875),
(0.305882352941176,0.896875,0.896875),
(0.309803921568627,0.90589453125,0.90589453125),
(0.313725490196078,0.9149140625,0.9149140625),
(0.317647058823529,0.92393359375,0.92393359375),
(0.32156862745098,0.932953125,0.932953125),
(0.325490196078431,0.94197265625,0.94197265625),
(0.329411764705882,0.95099609375,0.95099609375),
(0.333333333333333,0.960015625,0.960015625),
(0.337254901960784,0.96903515625,0.96903515625),
(0.341176470588235,0.9780546875,0.9780546875),
(0.345098039215686,0.98707421875,0.98707421875),
(0.349019607843137,0.99609375,0.99609375),
(0.352941176470588,0.9737734375,0.9737734375),
(0.356862745098039,0.95144921875,0.95144921875),
(0.36078431372549,0.92912890625,0.92912890625),
(0.364705882352941,0.90680859375,0.90680859375),
(0.368627450980392,0.88448828125,0.88448828125),
(0.372549019607843,0.8621640625,0.8621640625),
(0.376470588235294,0.83984375,0.83984375),
(0.380392156862745,0.8175234375,0.8175234375),
(0.384313725490196,0.79519921875,0.79519921875),
(0.388235294117647,0.77287890625,0.77287890625),
(0.392156862745098,0.75055859375,0.75055859375),
(0.396078431372549,0.72823828125,0.72823828125),
(0.4,0.7059140625,0.7059140625),
(0.403921568627451,0.68359375,0.68359375),
(0.407843137254902,0.6612734375,0.6612734375),
(0.411764705882353,0.63894921875,0.63894921875),
(0.415686274509804,0.61662890625,0.61662890625),
(0.419607843137255,0.59430859375,0.59430859375),
(0.423529411764706,0.57198828125,0.57198828125),
(0.427450980392157,0.5496640625,0.5496640625),
(0.431372549019608,0.52734375,0.52734375),
(0.435294117647059,0.5050234375,0.5050234375),
(0.43921568627451,0.48269921875,0.48269921875),
(0.443137254901961,0.46037890625,0.46037890625),
(0.447058823529412,0.43805859375,0.43805859375),
(0.450980392156863,0.41573828125,0.41573828125),
(0.454901960784314,0.3934140625,0.3934140625),
(0.458823529411765,0.37109375,0.37109375),
(0.462745098039216,0.348772265625,0.348772265625),
(0.466666666666667,0.32645078125,0.32645078125),
(0.470588235294118,0.304129296875,0.304129296875),
(0.474509803921569,0.281808203125,0.281808203125),
(0.47843137254902,0.25948671875,0.25948671875),
(0.482352941176471,0.237165234375,0.237165234375),
(0.486274509803922,0.21484375,0.21484375),
(0.490196078431373,0.233370703125,0.233370703125),
(0.494117647058824,0.251897265625,0.251897265625),
(0.498039215686275,0.27042421875,0.27042421875),
(0.501960784313725,0.28895078125,0.28895078125),
(0.505882352941176,0.307477734375,0.307477734375),
(0.509803921568627,0.326004296875,0.326004296875),
(0.513725490196078,0.34453125,0.34453125),
(0.517647058823529,0.363058203125,0.363058203125),
(0.52156862745098,0.381584765625,0.381584765625),
(0.525490196078431,0.40011328125,0.40011328125),
(0.529411764705882,0.41863671875,0.41863671875),
(0.533333333333333,0.4371640625,0.4371640625),
(0.537254901960784,0.45569140625,0.45569140625),
(0.541176470588235,0.47421875,0.47421875),
(0.545098039215686,0.49274609375,0.49274609375),
(0.549019607843137,0.5112734375,0.5112734375),
(0.552941176470588,0.52980078125,0.52980078125),
(0.556862745098039,0.54832421875,0.54832421875),
(0.56078431372549,0.5668515625,0.5668515625),
(0.564705882352941,0.58537890625,0.58537890625),
(0.568627450980392,0.60390625,0.60390625),
(0.572549019607843,0.62243359375,0.62243359375),
(0.576470588235294,0.6409609375,0.6409609375),
(0.580392156862745,0.65948828125,0.65948828125),
(0.584313725490196,0.67801171875,0.67801171875),
(0.588235294117647,0.6965390625,0.6965390625),
(0.592156862745098,0.71506640625,0.71506640625),
(0.596078431372549,0.73359375,0.73359375),
(0.6,0.75212109375,0.75212109375),
(0.603921568627451,0.7706484375,0.7706484375),
(0.607843137254902,0.78917578125,0.78917578125),
(0.611764705882353,0.80769921875,0.80769921875),
(0.615686274509804,0.8262265625,0.8262265625),
(0.619607843137255,0.84475390625,0.84475390625),
(0.623529411764706,0.86328125,0.86328125),
(0.627450980392157,0.84889453125,0.84889453125),
(0.631372549019608,0.83450390625,0.83450390625),
(0.635294117647059,0.8201171875,0.8201171875),
(0.63921568627451,0.80573046875,0.80573046875),
(0.643137254901961,0.79133984375,0.79133984375),
(0.647058823529412,0.776953125,0.776953125),
(0.650980392156863,0.76256640625,0.76256640625),
(0.654901960784314,0.74817578125,0.74817578125),
(0.658823529411765,0.7337890625,0.7337890625),
(0.662745098039216,0.71940234375,0.71940234375),
(0.666666666666667,0.70501171875,0.70501171875),
(0.670588235294118,0.690625,0.690625),
(0.674509803921569,0.67623828125,0.67623828125),
(0.67843137254902,0.66184765625,0.66184765625),
(0.682352941176471,0.6474609375,0.6474609375),
(0.686274509803922,0.63307421875,0.63307421875),
(0.690196078431373,0.61868359375,0.61868359375),
(0.694117647058824,0.604296875,0.604296875),
(0.698039215686274,0.58991015625,0.58991015625),
(0.701960784313725,0.57551953125,0.57551953125),
(0.705882352941177,0.5611328125,0.5611328125),
(0.709803921568627,0.54674609375,0.54674609375),
(0.713725490196078,0.53235546875,0.53235546875),
(0.717647058823529,0.51796875,0.51796875),
(0.72156862745098,0.50358203125,0.50358203125),
(0.725490196078431,0.48919140625,0.48919140625),
(0.729411764705882,0.4748046875,0.4748046875),
(0.733333333333333,0.46041796875,0.46041796875),
(0.737254901960784,0.44602734375,0.44602734375),
(0.741176470588235,0.431640625,0.431640625),
(0.745098039215686,0.41725390625,0.41725390625),
(0.749019607843137,0.40286328125,0.40286328125),
(0.752941176470588,0.3884765625,0.3884765625),
(0.756862745098039,0.374088671875,0.374088671875),
(0.76078431372549,0.359700390625,0.359700390625),
(0.764705882352941,0.3453125,0.3453125),
(0.768627450980392,0.330924609375,0.330924609375),
(0.772549019607843,0.316536328125,0.316536328125),
(0.776470588235294,0.3021484375,0.3021484375),
(0.780392156862745,0.287760546875,0.287760546875),
(0.784313725490196,0.273372265625,0.273372265625),
(0.788235294117647,0.258984375,0.258984375),
(0.792156862745098,0.244596484375,0.244596484375),
(0.796078431372549,0.230208203125,0.230208203125),
(0.8,0.2158203125,0.2158203125),
(0.803921568627451,0.201432421875,0.201432421875),
(0.807843137254902,0.187044140625,0.187044140625),
(0.811764705882353,0.17265625,0.17265625),
(0.815686274509804,0.158268359375,0.158268359375),
(0.819607843137255,0.143880078125,0.143880078125),
(0.823529411764706,0.1294921875,0.1294921875),
(0.827450980392157,0.115104296875,0.115104296875),
(0.831372549019608,0.100716015625,0.100716015625),
(0.835294117647059,0.086328125,0.086328125),
(0.83921568627451,0.071940234375,0.071940234375),
(0.843137254901961,0.057551953125,0.057551953125),
(0.847058823529412,0.0431640625,0.0431640625),
(0.850980392156863,0.028776015625,0.028776015625),
(0.854901960784314,0.01438796875,0.01438796875),
(0.858823529411765,0,0),
(0.862745098039216,0,0),
(0.866666666666667,0,0),
(0.870588235294118,0,0),
(0.874509803921569,0,0),
(0.87843137254902,0,0),
(0.882352941176471,0,0),
(0.886274509803922,0,0),
(0.890196078431373,0,0),
(0.894117647058824,0,0),
(0.898039215686275,0,0),
(0.901960784313726,0,0),
(0.905882352941176,0,0),
(0.909803921568627,0,0),
(0.913725490196078,0,0),
(0.917647058823529,0,0),
(0.92156862745098,0,0),
(0.925490196078431,0,0),
(0.929411764705882,0,0),
(0.933333333333333,0,0),
(0.937254901960784,0,0),
(0.941176470588235,0,0),
(0.945098039215686,0,0),
(0.949019607843137,0,0),
(0.952941176470588,0,0),
(0.956862745098039,0,0),
(0.96078431372549,0,0),
(0.964705882352941,0,0),
(0.968627450980392,0,0),
(0.972549019607843,0,0),
(0.976470588235294,0,0),
(0.980392156862745,0,0),
(0.984313725490196,0,0),
(0.988235294117647,0,0),
(0.992156862745098,0,0),
(0.996078431372549,0,0),
(1,0,0)),
}
califa = mcol.LinearSegmentedColormap('califa', cdict)
vcalifa = mcol.LinearSegmentedColormap('vcalifa', vcdict)
if option == 'v':
return vcalifa
else:
return califa
def A_l(R_v, lw):
# From Cardelli,1989
# F_cor = F * 10 ***(0.4*Av*A_l(R_v,l))
lw = lw / 10000
x = 1 / lw
if x > 1.1:
y = x - 1.82
a_x = 1.0 + 0.17699*y - 0.50447*y**2 - 0.02427*y**3 + 0.72085*y**4 \
+ 0.01979*y**5 - 0.77530*y**6 + 0.32999*y**7
b_x = 1.41338*y + 2.28305*y**2 + 1.07233*y**3 - 5.38434*y**4 \
- 0.62251*y**5 + 5.30260*y**6 - 2.09002*y**7
else:
a_x = 0.574 * x ** 1.61
b_x = -0.527 * x ** 1.61
A_l_ = a_x + b_x/R_v
return A_l_
|
[
"import json\nimport pickle\nimport numpy as np\nimport matplotlib.colors as mcol\nimport matplotlib.pyplot as plt\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open(\"code/cmap_cal_json.txt\"))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03',\n **kwargs)\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **kwargs)\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **kwargs)\n \n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label,**kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409\n * x) - 31.093\n return val\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.30\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.30\n return val\n\ndef espinosa(logNIIHa):\n # val = 0.39524936 / (logNIIHa - 0.19433616) + 0.83978817 -> script\n # val = 0.271991 / (logNIIHa - 0.151508) + 0.707842 -> Paper_v1\n # val = 0.1117129 / (logNIIHa - 0.00561609) + 0.5615803 ->Paper_v2\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\ndef O3S2_line_c(x):\n # bins 50\n # array([0.06873157, 0.0220047 , 0.63478451])\n # val = 0.18304241 / (x - 0.0816372) + 0.84992528\n # val = 0.06873157 / (x - 0.0220047) + 0.63478451 -> Paper_v1\n # val = 0.05374055 / (x - 0.01436536) + 0.59681538 ->Paper_v2\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\ndef O3O1_line_c(x):\n # val = 0.17214096 / (x - (-0.19999267)) + 0.66782104#->95%b100\n # val = 0.20010436 / (x - (-0.30512696)) + 0.65999132#->92%b75\n # val = 0.13626915 / (x - (-0.34052757)) + 0.59185332 #->92%b100 -> Paper_v1\n # val = 0.07056593 / (x - (-0.49660009)) + 0.55574729 #->92%b=60\n # val = 0.07329029 / (x + 0.42586138) + 0.60909743 ->Paper_v2\n # val = 0.06602301 / (x + 0.55165265) + 0.5308747\n val = 0.05612915 / (x + 0.39641533) + 0.60969495\n return val\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03',\n **kwargs)\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **kwargs)\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **kwargs)\n \n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.4, 100)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.00302, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409\n * x) - 31.093\n return val\n\ndef A_l(R_v, lw):\n # From Cardelli,1989\n # F_cor = F * 10 ***(0.4*Av*A_l(R_v,l))\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = 1.0 + 0.17699*y - 0.50447*y**2 - 0.02427*y**3 + 0.72085*y**4 \\\n + 0.01979*y**5 - 0.77530*y**6 + 0.32999*y**7\n b_x = 1.41338*y + 2.28305*y**2 + 1.07233*y**3 - 5.38434*y**4 \\\n - 0.62251*y**5 + 5.30260*y**6 - 2.09002*y**7\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x/R_v\n return A_l_\n\ndef color_map_califa(option='v'):\n######## CALIFA CTs #############################\n cdict = {\n 'red' : (\n (0.0,0,0),\n (0.00392156862745098,0,0),\n (0.00784313725490196,0,0),\n (0.0117647058823529,0,0),\n (0.0156862745098039,0,0),\n (0.0196078431372549,0,0),\n (0.0235294117647059,0,0),\n (0.0274509803921569,0,0),\n (0.0313725490196078,0,0),\n (0.0352941176470588,0,0),\n (0.0392156862745098,0,0),\n (0.0431372549019608,0,0),\n (0.0470588235294118,0,0),\n (0.0509803921568627,0,0),\n (0.0549019607843137,0,0),\n (0.0588235294117647,0,0),\n (0.0627450980392157,0,0),\n (0.0666666666666667,0,0),\n (0.0705882352941176,0,0),\n (0.0745098039215686,0,0),\n (0.0784313725490196,0,0),\n (0.0823529411764706,0,0),\n (0.0862745098039216,0,0),\n (0.0901960784313725,0,0),\n (0.0941176470588235,0,0),\n (0.0980392156862745,0,0),\n (0.101960784313725,0,0),\n (0.105882352941176,0,0),\n (0.109803921568627,0,0),\n (0.113725490196078,0,0),\n (0.117647058823529,0,0),\n (0.12156862745098,0,0),\n (0.125490196078431,0,0),\n (0.129411764705882,0,0),\n (0.133333333333333,0,0),\n (0.137254901960784,0,0),\n (0.141176470588235,0,0),\n (0.145098039215686,0,0),\n (0.149019607843137,0,0),\n (0.152941176470588,0,0),\n (0.156862745098039,0,0),\n (0.16078431372549,0,0),\n (0.164705882352941,0,0),\n (0.168627450980392,0,0),\n (0.172549019607843,0,0),\n (0.176470588235294,0,0),\n (0.180392156862745,0,0),\n (0.184313725490196,0,0),\n (0.188235294117647,0,0),\n (0.192156862745098,0,0),\n (0.196078431372549,0.019921875,0.019921875),\n (0.2,0.03984375,0.03984375),\n (0.203921568627451,0.059765625,0.059765625),\n (0.207843137254902,0.0796875,0.0796875),\n (0.211764705882353,0.099609375,0.099609375),\n (0.215686274509804,0.11953125,0.11953125),\n (0.219607843137255,0.139453125,0.139453125),\n (0.223529411764706,0.159375,0.159375),\n (0.227450980392157,0.179296875,0.179296875),\n (0.231372549019608,0.19921875,0.19921875),\n (0.235294117647059,0.219140625,0.219140625),\n (0.23921568627451,0.2390625,0.2390625),\n (0.243137254901961,0.258984375,0.258984375),\n (0.247058823529412,0.27890625,0.27890625),\n (0.250980392156863,0.298828125,0.298828125),\n (0.254901960784314,0.31875,0.31875),\n (0.258823529411765,0.338671875,0.338671875),\n (0.262745098039216,0.35859375,0.35859375),\n (0.266666666666667,0.378515625,0.378515625),\n (0.270588235294118,0.3984375,0.3984375),\n (0.274509803921569,0.418359375,0.418359375),\n (0.27843137254902,0.43828125,0.43828125),\n (0.282352941176471,0.458203125,0.458203125),\n (0.286274509803922,0.478125,0.478125),\n (0.290196078431373,0.498046875,0.498046875),\n (0.294117647058824,0.51796875,0.51796875),\n (0.298039215686275,0.537890625,0.537890625),\n (0.301960784313725,0.5578125,0.5578125),\n (0.305882352941176,0.577734375,0.577734375),\n (0.309803921568627,0.59765625,0.59765625),\n (0.313725490196078,0.617578125,0.617578125),\n (0.317647058823529,0.6375,0.6375),\n (0.32156862745098,0.657421875,0.657421875),\n (0.325490196078431,0.67734375,0.67734375),\n (0.329411764705882,0.697265625,0.697265625),\n (0.333333333333333,0.7171875,0.7171875),\n (0.337254901960784,0.737109375,0.737109375),\n (0.341176470588235,0.75703125,0.75703125),\n (0.345098039215686,0.776953125,0.776953125),\n (0.349019607843137,0.796875,0.796875),\n (0.352941176470588,0.816796875,0.816796875),\n (0.356862745098039,0.83671875,0.83671875),\n (0.36078431372549,0.856640625,0.856640625),\n (0.364705882352941,0.8765625,0.8765625),\n (0.368627450980392,0.896484375,0.896484375),\n (0.372549019607843,0.91640625,0.91640625),\n (0.376470588235294,0.936328125,0.936328125),\n (0.380392156862745,0.95625,0.95625),\n (0.384313725490196,0.976171875,0.976171875),\n (0.388235294117647,0.99609375,0.99609375),\n (0.392156862745098,0.99609375,0.99609375),\n (0.396078431372549,0.99609375,0.99609375),\n (0.4,0.99609375,0.99609375),\n (0.403921568627451,0.99609375,0.99609375),\n (0.407843137254902,0.99609375,0.99609375),\n (0.411764705882353,0.99609375,0.99609375),\n (0.415686274509804,0.99609375,0.99609375),\n (0.419607843137255,0.99609375,0.99609375),\n (0.423529411764706,0.99609375,0.99609375),\n (0.427450980392157,0.99609375,0.99609375),\n (0.431372549019608,0.99609375,0.99609375),\n (0.435294117647059,0.99609375,0.99609375),\n (0.43921568627451,0.99609375,0.99609375),\n (0.443137254901961,0.99609375,0.99609375),\n (0.447058823529412,0.99609375,0.99609375),\n (0.450980392156863,0.99609375,0.99609375),\n (0.454901960784314,0.99609375,0.99609375),\n (0.458823529411765,0.99609375,0.99609375),\n (0.462745098039216,0.99609375,0.99609375),\n (0.466666666666667,0.99609375,0.99609375),\n (0.470588235294118,0.99609375,0.99609375),\n (0.474509803921569,0.99609375,0.99609375),\n (0.47843137254902,0.99609375,0.99609375),\n (0.482352941176471,0.99609375,0.99609375),\n (0.486274509803922,0.99609375,0.99609375),\n (0.490196078431373,0.99609375,0.99609375),\n (0.494117647058824,0.99609375,0.99609375),\n (0.498039215686275,0.99609375,0.99609375),\n (0.501960784313725,0.99609375,0.99609375),\n (0.505882352941176,0.99609375,0.99609375),\n (0.509803921568627,0.99609375,0.99609375),\n (0.513725490196078,0.99609375,0.99609375),\n (0.517647058823529,0.99609375,0.99609375),\n (0.52156862745098,0.99609375,0.99609375),\n (0.525490196078431,0.99609375,0.99609375),\n (0.529411764705882,0.99609375,0.99609375),\n (0.533333333333333,0.99609375,0.99609375),\n (0.537254901960784,0.99609375,0.99609375),\n (0.541176470588235,0.99609375,0.99609375),\n (0.545098039215686,0.99609375,0.99609375),\n (0.549019607843137,0.99609375,0.99609375),\n (0.552941176470588,0.99609375,0.99609375),\n (0.556862745098039,0.99609375,0.99609375),\n (0.56078431372549,0.99609375,0.99609375),\n (0.564705882352941,0.99609375,0.99609375),\n (0.568627450980392,0.99609375,0.99609375),\n (0.572549019607843,0.99609375,0.99609375),\n (0.576470588235294,0.99609375,0.99609375),\n (0.580392156862745,0.99609375,0.99609375),\n (0.584313725490196,0.99609375,0.99609375),\n (0.588235294117647,0.98046875,0.98046875),\n (0.592156862745098,0.96484375,0.96484375),\n (0.596078431372549,0.94921875,0.94921875),\n (0.6,0.93359375,0.93359375),\n (0.603921568627451,0.91796875,0.91796875),\n (0.607843137254902,0.90234375,0.90234375),\n (0.611764705882353,0.88671875,0.88671875),\n (0.615686274509804,0.87109375,0.87109375),\n (0.619607843137255,0.85546875,0.85546875),\n (0.623529411764706,0.83984375,0.83984375),\n (0.627450980392157,0.82421875,0.82421875),\n (0.631372549019608,0.80859375,0.80859375),\n (0.635294117647059,0.79296875,0.79296875),\n (0.63921568627451,0.77734375,0.77734375),\n (0.643137254901961,0.76171875,0.76171875),\n (0.647058823529412,0.74609375,0.74609375),\n (0.650980392156863,0.73046875,0.73046875),\n (0.654901960784314,0.71484375,0.71484375),\n (0.658823529411765,0.69921875,0.69921875),\n (0.662745098039216,0.68359375,0.68359375),\n (0.666666666666667,0.66796875,0.66796875),\n (0.670588235294118,0.65234375,0.65234375),\n (0.674509803921569,0.63671875,0.63671875),\n (0.67843137254902,0.62109375,0.62109375),\n (0.682352941176471,0.60546875,0.60546875),\n (0.686274509803922,0.58984375,0.58984375),\n (0.690196078431373,0.57421875,0.57421875),\n (0.694117647058824,0.55859375,0.55859375),\n (0.698039215686274,0.54296875,0.54296875),\n (0.701960784313725,0.52734375,0.52734375),\n (0.705882352941177,0.51171875,0.51171875),\n (0.709803921568627,0.49609375,0.49609375),\n (0.713725490196078,0.48046875,0.48046875),\n (0.717647058823529,0.46484375,0.46484375),\n (0.72156862745098,0.44921875,0.44921875),\n (0.725490196078431,0.43359375,0.43359375),\n (0.729411764705882,0.41796875,0.41796875),\n (0.733333333333333,0.40234375,0.40234375),\n (0.737254901960784,0.38671875,0.38671875),\n (0.741176470588235,0.37109375,0.37109375),\n (0.745098039215686,0.35546875,0.35546875),\n (0.749019607843137,0.33984375,0.33984375),\n (0.752941176470588,0.32421875,0.32421875),\n (0.756862745098039,0.30859375,0.30859375),\n (0.76078431372549,0.29296875,0.29296875),\n (0.764705882352941,0.27734375,0.27734375),\n (0.768627450980392,0.26171875,0.26171875),\n (0.772549019607843,0.24609375,0.24609375),\n (0.776470588235294,0.23046875,0.23046875),\n (0.780392156862745,0.21484375,0.21484375),\n (0.784313725490196,0.22663359375,0.22663359375),\n (0.788235294117647,0.2384234375,0.2384234375),\n (0.792156862745098,0.250212890625,0.250212890625),\n (0.796078431372549,0.262002734375,0.262002734375),\n (0.8,0.273792578125,0.273792578125),\n (0.803921568627451,0.285582421875,0.285582421875),\n (0.807843137254902,0.297372265625,0.297372265625),\n (0.811764705882353,0.309162109375,0.309162109375),\n (0.815686274509804,0.3209515625,0.3209515625),\n (0.819607843137255,0.33274140625,0.33274140625),\n (0.823529411764706,0.34453125,0.34453125),\n (0.827450980392157,0.35632109375,0.35632109375),\n (0.831372549019608,0.3681109375,0.3681109375),\n (0.835294117647059,0.379900390625,0.379900390625),\n (0.83921568627451,0.39169140625,0.39169140625),\n (0.843137254901961,0.40348046875,0.40348046875),\n (0.847058823529412,0.41526953125,0.41526953125),\n (0.850980392156863,0.42705859375,0.42705859375),\n (0.854901960784314,0.43884765625,0.43884765625),\n (0.858823529411765,0.450640625,0.450640625),\n (0.862745098039216,0.4624296875,0.4624296875),\n (0.866666666666667,0.47421875,0.47421875),\n (0.870588235294118,0.4860078125,0.4860078125),\n (0.874509803921569,0.497796875,0.497796875),\n (0.87843137254902,0.50958984375,0.50958984375),\n (0.882352941176471,0.52137890625,0.52137890625),\n (0.886274509803922,0.53316796875,0.53316796875),\n (0.890196078431373,0.54495703125,0.54495703125),\n (0.894117647058824,0.55674609375,0.55674609375),\n (0.898039215686275,0.56853515625,0.56853515625),\n (0.901960784313726,0.580328125,0.580328125),\n (0.905882352941176,0.5921171875,0.5921171875),\n (0.909803921568627,0.60390625,0.60390625),\n (0.913725490196078,0.6156953125,0.6156953125),\n (0.917647058823529,0.627484375,0.627484375),\n (0.92156862745098,0.63927734375,0.63927734375),\n (0.925490196078431,0.65106640625,0.65106640625),\n (0.929411764705882,0.66285546875,0.66285546875),\n (0.933333333333333,0.67464453125,0.67464453125),\n (0.937254901960784,0.68643359375,0.68643359375),\n (0.941176470588235,0.69822265625,0.69822265625),\n (0.945098039215686,0.710015625,0.710015625),\n (0.949019607843137,0.7218046875,0.7218046875),\n (0.952941176470588,0.73359375,0.73359375),\n (0.956862745098039,0.7453828125,0.7453828125),\n (0.96078431372549,0.757171875,0.757171875),\n (0.964705882352941,0.76896484375,0.76896484375),\n (0.968627450980392,0.78075390625,0.78075390625),\n (0.972549019607843,0.79254296875,0.79254296875),\n (0.976470588235294,0.80433203125,0.80433203125),\n (0.980392156862745,0.81612109375,0.81612109375),\n (0.984313725490196,0.82791015625,0.82791015625),\n (0.988235294117647,0.839703125,0.839703125),\n (0.992156862745098,0.8514921875,0.8514921875),\n (0.996078431372549,0.86328125,0.86328125),\n (1.00000,0.86328125,0.86328125)),\n 'green' : (\n (0.00,0.02984375,0.02984375),\n (0.00392156862745098,0.02984375,0.02984375),\n (0.00784313725490196,0.044765625,0.044765625),\n (0.0117647058823529,0.0596875,0.0596875),\n (0.0156862745098039,0.074609375,0.074609375),\n (0.0196078431372549,0.08953125,0.08953125),\n (0.0235294117647059,0.104453125,0.104453125),\n (0.0274509803921569,0.119375,0.119375),\n (0.0313725490196078,0.134296875,0.134296875),\n (0.0352941176470588,0.14921875,0.14921875),\n (0.0392156862745098,0.164140625,0.164140625),\n (0.0431372549019608,0.1790625,0.1790625),\n (0.0470588235294118,0.193984375,0.193984375),\n (0.0509803921568627,0.20890625,0.20890625),\n (0.0549019607843137,0.223828125,0.223828125),\n (0.0588235294117647,0.23875,0.23875),\n (0.0627450980392157,0.253671875,0.253671875),\n (0.0666666666666667,0.26859375,0.26859375),\n (0.0705882352941176,0.283515625,0.283515625),\n (0.0745098039215686,0.2984375,0.2984375),\n (0.0784313725490196,0.313359375,0.313359375),\n (0.0823529411764706,0.32828125,0.32828125),\n (0.0862745098039216,0.343203125,0.343203125),\n (0.0901960784313725,0.358125,0.358125),\n (0.0941176470588235,0.373046875,0.373046875),\n (0.0980392156862745,0.38796875,0.38796875),\n (0.101960784313725,0.402890625,0.402890625),\n (0.105882352941176,0.4178125,0.4178125),\n (0.109803921568627,0.432734375,0.432734375),\n (0.113725490196078,0.44765625,0.44765625),\n (0.117647058823529,0.462578125,0.462578125),\n (0.12156862745098,0.4775,0.4775),\n (0.125490196078431,0.492421875,0.492421875),\n (0.129411764705882,0.50734375,0.50734375),\n (0.133333333333333,0.522265625,0.522265625),\n (0.137254901960784,0.5371875,0.5371875),\n (0.141176470588235,0.552109375,0.552109375),\n (0.145098039215686,0.56703125,0.56703125),\n (0.149019607843137,0.581953125,0.581953125),\n (0.152941176470588,0.596875,0.596875),\n (0.156862745098039,0.611796875,0.611796875),\n (0.16078431372549,0.62671875,0.62671875),\n (0.164705882352941,0.641640625,0.641640625),\n (0.168627450980392,0.6565625,0.6565625),\n (0.172549019607843,0.671484375,0.671484375),\n (0.176470588235294,0.68640625,0.68640625),\n (0.180392156862745,0.701328125,0.701328125),\n (0.184313725490196,0.71625,0.71625),\n (0.188235294117647,0.731171875,0.731171875),\n (0.192156862745098,0.74609375,0.74609375),\n (0.196078431372549,0.731171875,0.731171875),\n (0.2,0.71625,0.71625),\n (0.203921568627451,0.701328125,0.701328125),\n (0.207843137254902,0.68640625,0.68640625),\n (0.211764705882353,0.671484375,0.671484375),\n (0.215686274509804,0.6565625,0.6565625),\n (0.219607843137255,0.641640625,0.641640625),\n (0.223529411764706,0.62671875,0.62671875),\n (0.227450980392157,0.611796875,0.611796875),\n (0.231372549019608,0.596875,0.596875),\n (0.235294117647059,0.581953125,0.581953125),\n (0.23921568627451,0.56703125,0.56703125),\n (0.243137254901961,0.552109375,0.552109375),\n (0.247058823529412,0.5371875,0.5371875),\n (0.250980392156863,0.522265625,0.522265625),\n (0.254901960784314,0.50734375,0.50734375),\n (0.258823529411765,0.492421875,0.492421875),\n (0.262745098039216,0.4775,0.4775),\n (0.266666666666667,0.462578125,0.462578125),\n (0.270588235294118,0.44765625,0.44765625),\n (0.274509803921569,0.432734375,0.432734375),\n (0.27843137254902,0.4178125,0.4178125),\n (0.282352941176471,0.402890625,0.402890625),\n (0.286274509803922,0.38796875,0.38796875),\n (0.290196078431373,0.373046875,0.373046875),\n (0.294117647058824,0.358125,0.358125),\n (0.298039215686275,0.343203125,0.343203125),\n (0.301960784313725,0.32828125,0.32828125),\n (0.305882352941176,0.313359375,0.313359375),\n (0.309803921568627,0.2984375,0.2984375),\n (0.313725490196078,0.283515625,0.283515625),\n (0.317647058823529,0.26859375,0.26859375),\n (0.32156862745098,0.253671875,0.253671875),\n (0.325490196078431,0.23875,0.23875),\n (0.329411764705882,0.223828125,0.223828125),\n (0.333333333333333,0.20890625,0.20890625),\n (0.337254901960784,0.193984375,0.193984375),\n (0.341176470588235,0.1790625,0.1790625),\n (0.345098039215686,0.164140625,0.164140625),\n (0.349019607843137,0.14921875,0.14921875),\n (0.352941176470588,0.134296875,0.134296875),\n (0.356862745098039,0.119375,0.119375),\n (0.36078431372549,0.104453125,0.104453125),\n (0.364705882352941,0.08953125,0.08953125),\n (0.368627450980392,0.074609375,0.074609375),\n (0.372549019607843,0.0596875,0.0596875),\n (0.376470588235294,0.044765625,0.044765625),\n (0.380392156862745,0.0298437890625,0.0298437890625),\n (0.384313725490196,0.014921875,0.014921875),\n (0.388235294117647,0,0),\n (0.392156862745098,0.012890625,0.012890625),\n (0.396078431372549,0.02578125,0.02578125),\n (0.4,0.038671875,0.038671875),\n (0.403921568627451,0.0515625,0.0515625),\n (0.407843137254902,0.064453125,0.064453125),\n (0.411764705882353,0.07734375,0.07734375),\n (0.415686274509804,0.090234375,0.090234375),\n (0.419607843137255,0.103125,0.103125),\n (0.423529411764706,0.116015625,0.116015625),\n (0.427450980392157,0.12890625,0.12890625),\n (0.431372549019608,0.141796875,0.141796875),\n (0.435294117647059,0.1546875,0.1546875),\n (0.43921568627451,0.167578125,0.167578125),\n (0.443137254901961,0.18046875,0.18046875),\n (0.447058823529412,0.193359375,0.193359375),\n (0.450980392156863,0.20625,0.20625),\n (0.454901960784314,0.219140625,0.219140625),\n (0.458823529411765,0.23203125,0.23203125),\n (0.462745098039216,0.244921875,0.244921875),\n (0.466666666666667,0.2578125,0.2578125),\n (0.470588235294118,0.270703125,0.270703125),\n (0.474509803921569,0.28359375,0.28359375),\n (0.47843137254902,0.296484375,0.296484375),\n (0.482352941176471,0.309375,0.309375),\n (0.486274509803922,0.322265625,0.322265625),\n (0.490196078431373,0.33515625,0.33515625),\n (0.494117647058824,0.348046875,0.348046875),\n (0.498039215686275,0.3609375,0.3609375),\n (0.501960784313725,0.373828125,0.373828125),\n (0.505882352941176,0.38671875,0.38671875),\n (0.509803921568627,0.399609375,0.399609375),\n (0.513725490196078,0.4125,0.4125),\n (0.517647058823529,0.425390625,0.425390625),\n (0.52156862745098,0.43828125,0.43828125),\n (0.525490196078431,0.451171875,0.451171875),\n (0.529411764705882,0.4640625,0.4640625),\n (0.533333333333333,0.476953125,0.476953125),\n (0.537254901960784,0.48984375,0.48984375),\n (0.541176470588235,0.502734375,0.502734375),\n (0.545098039215686,0.515625,0.515625),\n (0.549019607843137,0.528515625,0.528515625),\n (0.552941176470588,0.54140625,0.54140625),\n (0.556862745098039,0.554296875,0.554296875),\n (0.56078431372549,0.5671875,0.5671875),\n (0.564705882352941,0.580078125,0.580078125),\n (0.568627450980392,0.59296875,0.59296875),\n (0.572549019607843,0.605859375,0.605859375),\n (0.576470588235294,0.61875,0.61875),\n (0.580392156862745,0.631640625,0.631640625),\n (0.584313725490196,0.64453125,0.64453125),\n (0.588235294117647,0.6359375,0.6359375),\n (0.592156862745098,0.62734375,0.62734375),\n (0.596078431372549,0.61875,0.61875),\n (0.6,0.61015625,0.61015625),\n (0.603921568627451,0.6015625,0.6015625),\n (0.607843137254902,0.59296875,0.59296875),\n (0.611764705882353,0.584375,0.584375),\n (0.615686274509804,0.57578125,0.57578125),\n (0.619607843137255,0.5671875,0.5671875),\n (0.623529411764706,0.55859375,0.55859375),\n (0.627450980392157,0.55,0.55),\n (0.631372549019608,0.54140625,0.54140625),\n (0.635294117647059,0.5328125,0.5328125),\n (0.63921568627451,0.52421875,0.52421875),\n (0.643137254901961,0.515625,0.515625),\n (0.647058823529412,0.50703125,0.50703125),\n (0.650980392156863,0.4984375,0.4984375),\n (0.654901960784314,0.48984375,0.48984375),\n (0.658823529411765,0.48125,0.48125),\n (0.662745098039216,0.47265625,0.47265625),\n (0.666666666666667,0.4640625,0.4640625),\n (0.670588235294118,0.45546875,0.45546875),\n (0.674509803921569,0.446875,0.446875),\n (0.67843137254902,0.43828125,0.43828125),\n (0.682352941176471,0.4296875,0.4296875),\n (0.686274509803922,0.42109375,0.42109375),\n (0.690196078431373,0.4125,0.4125),\n (0.694117647058824,0.40390625,0.40390625),\n (0.698039215686274,0.3953125,0.3953125),\n (0.701960784313725,0.38671875,0.38671875),\n (0.705882352941177,0.378125,0.378125),\n (0.709803921568627,0.36953125,0.36953125),\n (0.713725490196078,0.3609375,0.3609375),\n (0.717647058823529,0.35234375,0.35234375),\n (0.72156862745098,0.34375,0.34375),\n (0.725490196078431,0.33515625,0.33515625),\n (0.729411764705882,0.3265625,0.3265625),\n (0.733333333333333,0.31796875,0.31796875),\n (0.737254901960784,0.309375,0.309375),\n (0.741176470588235,0.30078125,0.30078125),\n (0.745098039215686,0.2921875,0.2921875),\n (0.749019607843137,0.28359375,0.28359375),\n (0.752941176470588,0.275,0.275),\n (0.756862745098039,0.26640625,0.26640625),\n (0.76078431372549,0.2578125,0.2578125),\n (0.764705882352941,0.24921875,0.24921875),\n (0.768627450980392,0.240625,0.240625),\n (0.772549019607843,0.23203125,0.23203125),\n (0.776470588235294,0.2234375,0.2234375),\n (0.780392156862745,0.21484375,0.21484375),\n (0.784313725490196,0.222301171875,0.222301171875),\n (0.788235294117647,0.22975859375,0.22975859375),\n (0.792156862745098,0.237216015625,0.237216015625),\n (0.796078431372549,0.2446734375,0.2446734375),\n (0.8,0.252130859375,0.252130859375),\n (0.803921568627451,0.259587890625,0.259587890625),\n (0.807843137254902,0.2670453125,0.2670453125),\n (0.811764705882353,0.274502734375,0.274502734375),\n (0.815686274509804,0.28196015625,0.28196015625),\n (0.819607843137255,0.289417578125,0.289417578125),\n (0.823529411764706,0.296875,0.296875),\n (0.827450980392157,0.304332421875,0.304332421875),\n (0.831372549019608,0.31178984375,0.31178984375),\n (0.835294117647059,0.319247265625,0.319247265625),\n (0.83921568627451,0.3267046875,0.3267046875),\n (0.843137254901961,0.334162109375,0.334162109375),\n (0.847058823529412,0.34161953125,0.34161953125),\n (0.850980392156863,0.3490765625,0.3490765625),\n (0.854901960784314,0.356533984375,0.356533984375),\n (0.858823529411765,0.36399140625,0.36399140625),\n (0.862745098039216,0.371448828125,0.371448828125),\n (0.866666666666667,0.37890625,0.37890625),\n (0.870588235294118,0.386363671875,0.386363671875),\n (0.874509803921569,0.3938203125,0.3938203125),\n (0.87843137254902,0.40127734375,0.40127734375),\n (0.882352941176471,0.408734375,0.408734375),\n (0.886274509803922,0.41619140625,0.41619140625),\n (0.890196078431373,0.42365234375,0.42365234375),\n (0.894117647058824,0.431109375,0.431109375),\n (0.898039215686275,0.43856640625,0.43856640625),\n (0.901960784313726,0.4460234375,0.4460234375),\n (0.905882352941176,0.45348046875,0.45348046875),\n (0.909803921568627,0.4609375,0.4609375),\n (0.913725490196078,0.46839453125,0.46839453125),\n (0.917647058823529,0.4758515625,0.4758515625),\n (0.92156862745098,0.48330859375,0.48330859375),\n (0.925490196078431,0.490765625,0.490765625),\n (0.929411764705882,0.49822265625,0.49822265625),\n (0.933333333333333,0.50568359375,0.50568359375),\n (0.937254901960784,0.513140625,0.513140625),\n (0.941176470588235,0.52059765625,0.52059765625),\n (0.945098039215686,0.5280546875,0.5280546875),\n (0.949019607843137,0.53551171875,0.53551171875),\n (0.952941176470588,0.54296875,0.54296875),\n (0.956862745098039,0.55042578125,0.55042578125),\n (0.96078431372549,0.5578828125,0.5578828125),\n (0.964705882352941,0.56533984375,0.56533984375),\n (0.968627450980392,0.572796875,0.572796875),\n (0.972549019607843,0.58025390625,0.58025390625),\n (0.976470588235294,0.58771484375,0.58771484375),\n (0.980392156862745,0.595171875,0.595171875),\n (0.984313725490196,0.60262890625,0.60262890625),\n (0.988235294117647,0.6100859375,0.6100859375),\n (0.992156862745098,0.61754296875,0.61754296875),\n (0.996078431372549,0.625,0.625),\n (1.0000,0.625,0.625)),\n 'blue' : (\n (0.0,0.51984375,0.51984375),\n (0.00392156862745098,0.51984375,0.51984375),\n (0.00784313725490196,0.529765625,0.529765625),\n (0.0117647058823529,0.5396875,0.5396875),\n (0.0156862745098039,0.549609375,0.549609375),\n (0.0196078431372549,0.55953125,0.55953125),\n (0.0235294117647059,0.569453125,0.569453125),\n (0.0274509803921569,0.579375,0.579375),\n (0.0313725490196078,0.589296875,0.589296875),\n (0.0352941176470588,0.59921875,0.59921875),\n (0.0392156862745098,0.609140625,0.609140625),\n (0.0431372549019608,0.6190625,0.6190625),\n (0.0470588235294118,0.628984375,0.628984375),\n (0.0509803921568627,0.63890625,0.63890625),\n (0.0549019607843137,0.648828125,0.648828125),\n (0.0588235294117647,0.65875,0.65875),\n (0.0627450980392157,0.668671875,0.668671875),\n (0.0666666666666667,0.67859375,0.67859375),\n (0.0705882352941176,0.688515625,0.688515625),\n (0.0745098039215686,0.6984375,0.6984375),\n (0.0784313725490196,0.708359375,0.708359375),\n (0.0823529411764706,0.71828125,0.71828125),\n (0.0862745098039216,0.728203125,0.728203125),\n (0.0901960784313725,0.738125,0.738125),\n (0.0941176470588235,0.748046875,0.748046875),\n (0.0980392156862745,0.75796875,0.75796875),\n (0.101960784313725,0.767890625,0.767890625),\n (0.105882352941176,0.7778125,0.7778125),\n (0.109803921568627,0.787734375,0.787734375),\n (0.113725490196078,0.79765625,0.79765625),\n (0.117647058823529,0.807578125,0.807578125),\n (0.12156862745098,0.8175,0.8175),\n (0.125490196078431,0.827421875,0.827421875),\n (0.129411764705882,0.83734375,0.83734375),\n (0.133333333333333,0.847265625,0.847265625),\n (0.137254901960784,0.8571875,0.8571875),\n (0.141176470588235,0.867109375,0.867109375),\n (0.145098039215686,0.87703125,0.87703125),\n (0.149019607843137,0.886953125,0.886953125),\n (0.152941176470588,0.896875,0.896875),\n (0.156862745098039,0.906796875,0.906796875),\n (0.16078431372549,0.91671875,0.91671875),\n (0.164705882352941,0.926640625,0.926640625),\n (0.168627450980392,0.9365625,0.9365625),\n (0.172549019607843,0.946484375,0.946484375),\n (0.176470588235294,0.95640625,0.95640625),\n (0.180392156862745,0.966328125,0.966328125),\n (0.184313725490196,0.97625,0.97625),\n (0.188235294117647,0.986171875,0.986171875),\n (0.192156862745098,0.99609375,0.99609375),\n (0.196078431372549,0.976171875,0.976171875),\n (0.2,0.95625,0.95625),\n (0.203921568627451,0.936328125,0.936328125),\n (0.207843137254902,0.91640625,0.91640625),\n (0.211764705882353,0.896484375,0.896484375),\n (0.215686274509804,0.8765625,0.8765625),\n (0.219607843137255,0.856640625,0.856640625),\n (0.223529411764706,0.83671875,0.83671875),\n (0.227450980392157,0.816796875,0.816796875),\n (0.231372549019608,0.796875,0.796875),\n (0.235294117647059,0.776953125,0.776953125),\n (0.23921568627451,0.75703125,0.75703125),\n (0.243137254901961,0.737109375,0.737109375),\n (0.247058823529412,0.7171875,0.7171875),\n (0.250980392156863,0.697265625,0.697265625),\n (0.254901960784314,0.67734375,0.67734375),\n (0.258823529411765,0.657421875,0.657421875),\n (0.262745098039216,0.6375,0.6375),\n (0.266666666666667,0.617578125,0.617578125),\n (0.270588235294118,0.59765625,0.59765625),\n (0.274509803921569,0.577734375,0.577734375),\n (0.27843137254902,0.5578125,0.5578125),\n (0.282352941176471,0.537890625,0.537890625),\n (0.286274509803922,0.51796875,0.51796875),\n (0.290196078431373,0.498046875,0.498046875),\n (0.294117647058824,0.478125,0.478125),\n (0.298039215686275,0.458203125,0.458203125),\n (0.301960784313725,0.43828125,0.43828125),\n (0.305882352941176,0.418359375,0.418359375),\n (0.309803921568627,0.3984375,0.3984375),\n (0.313725490196078,0.378515625,0.378515625),\n (0.317647058823529,0.35859375,0.35859375),\n (0.32156862745098,0.338671875,0.338671875),\n (0.325490196078431,0.31875,0.31875),\n (0.329411764705882,0.298828125,0.298828125),\n (0.333333333333333,0.27890625,0.27890625),\n (0.337254901960784,0.258984375,0.258984375),\n (0.341176470588235,0.2390625,0.2390625),\n (0.345098039215686,0.219140625,0.219140625),\n (0.349019607843137,0.19921875,0.19921875),\n (0.352941176470588,0.179296875,0.179296875),\n (0.356862745098039,0.159375,0.159375),\n (0.36078431372549,0.139453125,0.139453125),\n (0.364705882352941,0.11953125,0.11953125),\n (0.368627450980392,0.099609375,0.099609375),\n (0.372549019607843,0.0796875,0.0796875),\n (0.376470588235294,0.059765625,0.059765625),\n (0.380392156862745,0.03984375,0.03984375),\n (0.384313725490196,0.019921875,0.019921875),\n (0.388235294117647,0,0),\n (0.392156862745098,0,0),\n (0.396078431372549,0,0),\n (0.4,0,0),\n (0.403921568627451,0,0),\n (0.407843137254902,0,0),\n (0.411764705882353,0,0),\n (0.415686274509804,0,0),\n (0.419607843137255,0,0),\n (0.423529411764706,0,0),\n (0.427450980392157,0,0),\n (0.431372549019608,0,0),\n (0.435294117647059,0,0),\n (0.43921568627451,0,0),\n (0.443137254901961,0,0),\n (0.447058823529412,0,0),\n (0.450980392156863,0,0),\n (0.454901960784314,0,0),\n (0.458823529411765,0,0),\n (0.462745098039216,0,0),\n (0.466666666666667,0,0),\n (0.470588235294118,0,0),\n (0.474509803921569,0,0),\n (0.47843137254902,0,0),\n (0.482352941176471,0,0),\n (0.486274509803922,0,0),\n (0.490196078431373,0,0),\n (0.494117647058824,0,0),\n (0.498039215686275,0,0),\n (0.501960784313725,0,0),\n (0.505882352941176,0,0),\n (0.509803921568627,0,0),\n (0.513725490196078,0,0),\n (0.517647058823529,0,0),\n (0.52156862745098,0,0),\n (0.525490196078431,0,0),\n (0.529411764705882,0,0),\n (0.533333333333333,0,0),\n (0.537254901960784,0,0),\n (0.541176470588235,0,0),\n (0.545098039215686,0,0),\n (0.549019607843137,0,0),\n (0.552941176470588,0,0),\n (0.556862745098039,0,0),\n (0.56078431372549,0,0),\n (0.564705882352941,0,0),\n (0.568627450980392,0,0),\n (0.572549019607843,0,0),\n (0.576470588235294,0,0),\n (0.580392156862745,0,0),\n (0.584313725490196,0,0),\n (0.588235294117647,0.004296875,0.004296875),\n (0.592156862745098,0.00859375,0.00859375),\n (0.596078431372549,0.012890625,0.012890625),\n (0.6,0.0171875,0.0171875),\n (0.603921568627451,0.021484375,0.021484375),\n (0.607843137254902,0.02578125,0.02578125),\n (0.611764705882353,0.030078125,0.030078125),\n (0.615686274509804,0.034375,0.034375),\n (0.619607843137255,0.038671875,0.038671875),\n (0.623529411764706,0.04296875,0.04296875),\n (0.627450980392157,0.047265625,0.047265625),\n (0.631372549019608,0.0515625,0.0515625),\n (0.635294117647059,0.055859375,0.055859375),\n (0.63921568627451,0.06015625,0.06015625),\n (0.643137254901961,0.064453125,0.064453125),\n (0.647058823529412,0.06875,0.06875),\n (0.650980392156863,0.073046875,0.073046875),\n (0.654901960784314,0.07734375,0.07734375),\n (0.658823529411765,0.081640625,0.081640625),\n (0.662745098039216,0.0859375,0.0859375),\n (0.666666666666667,0.090234375,0.090234375),\n (0.670588235294118,0.09453125,0.09453125),\n (0.674509803921569,0.098828125,0.098828125),\n (0.67843137254902,0.103125,0.103125),\n (0.682352941176471,0.107421875,0.107421875),\n (0.686274509803922,0.11171875,0.11171875),\n (0.690196078431373,0.116015625,0.116015625),\n (0.694117647058824,0.1203125,0.1203125),\n (0.698039215686274,0.124609375,0.124609375),\n (0.701960784313725,0.12890625,0.12890625),\n (0.705882352941177,0.133203125,0.133203125),\n (0.709803921568627,0.1375,0.1375),\n (0.713725490196078,0.141796875,0.141796875),\n (0.717647058823529,0.14609375,0.14609375),\n (0.72156862745098,0.150390625,0.150390625),\n (0.725490196078431,0.1546875,0.1546875),\n (0.729411764705882,0.158984375,0.158984375),\n (0.733333333333333,0.16328125,0.16328125),\n (0.737254901960784,0.167578125,0.167578125),\n (0.741176470588235,0.171875,0.171875),\n (0.745098039215686,0.176171875,0.176171875),\n (0.749019607843137,0.18046875,0.18046875),\n (0.752941176470588,0.184765625,0.184765625),\n (0.756862745098039,0.1890625,0.1890625),\n (0.76078431372549,0.193359375,0.193359375),\n (0.764705882352941,0.19765625,0.19765625),\n (0.768627450980392,0.201953125,0.201953125),\n (0.772549019607843,0.20625,0.20625),\n (0.776470588235294,0.210546875,0.210546875),\n (0.780392156862745,0.21484375,0.21484375),\n (0.784313725490196,0.22663359375,0.22663359375),\n (0.788235294117647,0.2384234375,0.2384234375),\n (0.792156862745098,0.250212890625,0.250212890625),\n (0.796078431372549,0.262002734375,0.262002734375),\n (0.8,0.273792578125,0.273792578125),\n (0.803921568627451,0.285582421875,0.285582421875),\n (0.807843137254902,0.297372265625,0.297372265625),\n (0.811764705882353,0.309162109375,0.309162109375),\n (0.815686274509804,0.3209515625,0.3209515625),\n (0.819607843137255,0.33274140625,0.33274140625),\n (0.823529411764706,0.34453125,0.34453125),\n (0.827450980392157,0.35632109375,0.35632109375),\n (0.831372549019608,0.3681109375,0.3681109375),\n (0.835294117647059,0.379900390625,0.379900390625),\n (0.83921568627451,0.39169140625,0.39169140625),\n (0.843137254901961,0.40348046875,0.40348046875),\n (0.847058823529412,0.41526953125,0.41526953125),\n (0.850980392156863,0.42705859375,0.42705859375),\n (0.854901960784314,0.43884765625,0.43884765625),\n (0.858823529411765,0.450640625,0.450640625),\n (0.862745098039216,0.4624296875,0.4624296875),\n (0.866666666666667,0.47421875,0.47421875),\n (0.870588235294118,0.4860078125,0.4860078125),\n (0.874509803921569,0.497796875,0.497796875),\n (0.87843137254902,0.50958984375,0.50958984375),\n (0.882352941176471,0.52137890625,0.52137890625),\n (0.886274509803922,0.53316796875,0.53316796875),\n (0.890196078431373,0.54495703125,0.54495703125),\n (0.894117647058824,0.55674609375,0.55674609375),\n (0.898039215686275,0.56853515625,0.56853515625),\n (0.901960784313726,0.580328125,0.580328125),\n (0.905882352941176,0.5921171875,0.5921171875),\n (0.909803921568627,0.60390625,0.60390625),\n (0.913725490196078,0.6156953125,0.6156953125),\n (0.917647058823529,0.627484375,0.627484375),\n (0.92156862745098,0.63927734375,0.63927734375),\n (0.925490196078431,0.65106640625,0.65106640625),\n (0.929411764705882,0.66285546875,0.66285546875),\n (0.933333333333333,0.67464453125,0.67464453125),\n (0.937254901960784,0.68643359375,0.68643359375),\n (0.941176470588235,0.69822265625,0.69822265625),\n (0.945098039215686,0.710015625,0.710015625),\n (0.949019607843137,0.7218046875,0.7218046875),\n (0.952941176470588,0.73359375,0.73359375),\n (0.956862745098039,0.7453828125,0.7453828125),\n (0.96078431372549,0.757171875,0.757171875),\n (0.964705882352941,0.76896484375,0.76896484375),\n (0.968627450980392,0.78075390625,0.78075390625),\n (0.972549019607843,0.79254296875,0.79254296875),\n (0.976470588235294,0.80433203125,0.80433203125),\n (0.980392156862745,0.81612109375,0.81612109375),\n (0.984313725490196,0.82791015625,0.82791015625),\n (0.988235294117647,0.839703125,0.839703125),\n (0.992156862745098,0.8514921875,0.8514921875),\n (0.996078431372549,0.86328125,0.86328125),\n (1.00,0.86328125,0.86328125)),\n }\n\n vcdict = {\n 'red' : (\n (0,1,1),\n (0.00392156862745098,0.54508984375,0.54508984375),\n (0.00784313725490196,0.5285703125,0.5285703125),\n (0.0117647058823529,0.5120546875,0.5120546875),\n (0.0156862745098039,0.49553515625,0.49553515625),\n (0.0196078431372549,0.47901953125,0.47901953125),\n (0.0235294117647059,0.4625,0.4625),\n (0.0274509803921569,0.44598046875,0.44598046875),\n (0.0313725490196078,0.42946484375,0.42946484375),\n (0.0352941176470588,0.4129453125,0.4129453125),\n (0.0392156862745098,0.3964296875,0.3964296875),\n (0.0431372549019608,0.379910546875,0.379910546875),\n (0.0470588235294118,0.36339296875,0.36339296875),\n (0.0509803921568627,0.346875,0.346875),\n (0.0549019607843137,0.33035703125,0.33035703125),\n (0.0588235294117647,0.313839453125,0.313839453125),\n (0.0627450980392157,0.297321484375,0.297321484375),\n (0.0666666666666667,0.280803515625,0.280803515625),\n (0.0705882352941176,0.2642859375,0.2642859375),\n (0.0745098039215686,0.24776796875,0.24776796875),\n (0.0784313725490196,0.23125,0.23125),\n (0.0823529411764706,0.21473203125,0.21473203125),\n (0.0862745098039216,0.198214453125,0.198214453125),\n (0.0901960784313725,0.181696484375,0.181696484375),\n (0.0941176470588235,0.165178515625,0.165178515625),\n (0.0980392156862745,0.148660546875,0.148660546875),\n (0.101960784313725,0.13214296875,0.13214296875),\n (0.105882352941176,0.115625,0.115625),\n (0.109803921568627,0.09910703125,0.09910703125),\n (0.113725490196078,0.082589453125,0.082589453125),\n (0.117647058823529,0.066071484375,0.066071484375),\n (0.12156862745098,0.049553515625,0.049553515625),\n (0.125490196078431,0.0330357421875,0.0330357421875),\n (0.129411764705882,0.016517890625,0.016517890625),\n (0.133333333333333,0,0),\n (0.137254901960784,0,0),\n (0.141176470588235,0,0),\n (0.145098039215686,0,0),\n (0.149019607843137,0,0),\n (0.152941176470588,0,0),\n (0.156862745098039,0,0),\n (0.16078431372549,0,0),\n (0.164705882352941,0,0),\n (0.168627450980392,0,0),\n (0.172549019607843,0,0),\n (0.176470588235294,0,0),\n (0.180392156862745,0,0),\n (0.184313725490196,0,0),\n (0.188235294117647,0,0),\n (0.192156862745098,0,0),\n (0.196078431372549,0,0),\n (0.2,0,0),\n (0.203921568627451,0,0),\n (0.207843137254902,0,0),\n (0.211764705882353,0,0),\n (0.215686274509804,0,0),\n (0.219607843137255,0,0),\n (0.223529411764706,0,0),\n (0.227450980392157,0,0),\n (0.231372549019608,0,0),\n (0.235294117647059,0,0),\n (0.23921568627451,0,0),\n (0.243137254901961,0,0),\n (0.247058823529412,0,0),\n (0.250980392156863,0,0),\n (0.254901960784314,0,0),\n (0.258823529411765,0,0),\n (0.262745098039216,0,0),\n (0.266666666666667,0,0),\n (0.270588235294118,0,0),\n (0.274509803921569,0,0),\n (0.27843137254902,0,0),\n (0.282352941176471,0,0),\n (0.286274509803922,0,0),\n (0.290196078431373,0,0),\n (0.294117647058824,0,0),\n (0.298039215686275,0,0),\n (0.301960784313725,0,0),\n (0.305882352941176,0,0),\n (0.309803921568627,0,0),\n (0.313725490196078,0,0),\n (0.317647058823529,0,0),\n (0.32156862745098,0,0),\n (0.325490196078431,0,0),\n (0.329411764705882,0,0),\n (0.333333333333333,0,0),\n (0.337254901960784,0,0),\n (0.341176470588235,0,0),\n (0.345098039215686,0,0),\n (0.349019607843137,0,0),\n (0.352941176470588,0.0061383984375,0.0061383984375),\n (0.356862745098039,0.012276796875,0.012276796875),\n (0.36078431372549,0.0184151953125,0.0184151953125),\n (0.364705882352941,0.0245535546875,0.0245535546875),\n (0.368627450980392,0.030691953125,0.030691953125),\n (0.372549019607843,0.0368303515625,0.0368303515625),\n (0.376470588235294,0.04296875,0.04296875),\n (0.380392156862745,0.04910703125,0.04910703125),\n (0.384313725490196,0.055245703125,0.055245703125),\n (0.388235294117647,0.061383984375,0.061383984375),\n (0.392156862745098,0.067522265625,0.067522265625),\n (0.396078431372549,0.073660546875,0.073660546875),\n (0.4,0.07979921875,0.07979921875),\n (0.403921568627451,0.0859375,0.0859375),\n (0.407843137254902,0.09207578125,0.09207578125),\n (0.411764705882353,0.098214453125,0.098214453125),\n (0.415686274509804,0.104352734375,0.104352734375),\n (0.419607843137255,0.110491015625,0.110491015625),\n (0.423529411764706,0.116629296875,0.116629296875),\n (0.427450980392157,0.12276796875,0.12276796875),\n (0.431372549019608,0.12890625,0.12890625),\n (0.435294117647059,0.13504453125,0.13504453125),\n (0.43921568627451,0.141183203125,0.141183203125),\n (0.443137254901961,0.147321484375,0.147321484375),\n (0.447058823529412,0.153459765625,0.153459765625),\n (0.450980392156863,0.159598046875,0.159598046875),\n (0.454901960784314,0.16573671875,0.16573671875),\n (0.458823529411765,0.171875,0.171875),\n (0.462745098039216,0.17801328125,0.17801328125),\n (0.466666666666667,0.184151953125,0.184151953125),\n (0.470588235294118,0.190290234375,0.190290234375),\n (0.474509803921569,0.196428515625,0.196428515625),\n (0.47843137254902,0.202566796875,0.202566796875),\n (0.482352941176471,0.20870546875,0.20870546875),\n (0.486274509803922,0.21484375,0.21484375),\n (0.490196078431373,0.233370703125,0.233370703125),\n (0.494117647058824,0.251897265625,0.251897265625),\n (0.498039215686275,0.27042421875,0.27042421875),\n (0.501960784313725,0.28895078125,0.28895078125),\n (0.505882352941176,0.307477734375,0.307477734375),\n (0.509803921568627,0.326004296875,0.326004296875),\n (0.513725490196078,0.34453125,0.34453125),\n (0.517647058823529,0.363058203125,0.363058203125),\n (0.52156862745098,0.381584765625,0.381584765625),\n (0.525490196078431,0.40011328125,0.40011328125),\n (0.529411764705882,0.41863671875,0.41863671875),\n (0.533333333333333,0.4371640625,0.4371640625),\n (0.537254901960784,0.45569140625,0.45569140625),\n (0.541176470588235,0.47421875,0.47421875),\n (0.545098039215686,0.49274609375,0.49274609375),\n (0.549019607843137,0.5112734375,0.5112734375),\n (0.552941176470588,0.52980078125,0.52980078125),\n (0.556862745098039,0.54832421875,0.54832421875),\n (0.56078431372549,0.5668515625,0.5668515625),\n (0.564705882352941,0.58537890625,0.58537890625),\n (0.568627450980392,0.60390625,0.60390625),\n (0.572549019607843,0.62243359375,0.62243359375),\n (0.576470588235294,0.6409609375,0.6409609375),\n (0.580392156862745,0.65948828125,0.65948828125),\n (0.584313725490196,0.67801171875,0.67801171875),\n (0.588235294117647,0.6965390625,0.6965390625),\n (0.592156862745098,0.71506640625,0.71506640625),\n (0.596078431372549,0.73359375,0.73359375),\n (0.6,0.75212109375,0.75212109375),\n (0.603921568627451,0.7706484375,0.7706484375),\n (0.607843137254902,0.78917578125,0.78917578125),\n (0.611764705882353,0.80769921875,0.80769921875),\n (0.615686274509804,0.8262265625,0.8262265625),\n (0.619607843137255,0.84475390625,0.84475390625),\n (0.623529411764706,0.86328125,0.86328125),\n (0.627450980392157,0.86549609375,0.86549609375),\n (0.631372549019608,0.86770703125,0.86770703125),\n (0.635294117647059,0.869921875,0.869921875),\n (0.63921568627451,0.87213671875,0.87213671875),\n (0.643137254901961,0.87434765625,0.87434765625),\n (0.647058823529412,0.8765625,0.8765625),\n (0.650980392156863,0.87877734375,0.87877734375),\n (0.654901960784314,0.88098828125,0.88098828125),\n (0.658823529411765,0.883203125,0.883203125),\n (0.662745098039216,0.88541796875,0.88541796875),\n (0.666666666666667,0.88762890625,0.88762890625),\n (0.670588235294118,0.88984375,0.88984375),\n (0.674509803921569,0.89205859375,0.89205859375),\n (0.67843137254902,0.89426953125,0.89426953125),\n (0.682352941176471,0.896484375,0.896484375),\n (0.686274509803922,0.89869921875,0.89869921875),\n (0.690196078431373,0.90091015625,0.90091015625),\n (0.694117647058824,0.903125,0.903125),\n (0.698039215686274,0.90533984375,0.90533984375),\n (0.701960784313725,0.90755078125,0.90755078125),\n (0.705882352941177,0.909765625,0.909765625),\n (0.709803921568627,0.91198046875,0.91198046875),\n (0.713725490196078,0.91419140625,0.91419140625),\n (0.717647058823529,0.91640625,0.91640625),\n (0.72156862745098,0.91862109375,0.91862109375),\n (0.725490196078431,0.92083203125,0.92083203125),\n (0.729411764705882,0.923046875,0.923046875),\n (0.733333333333333,0.92526171875,0.92526171875),\n (0.737254901960784,0.92747265625,0.92747265625),\n (0.741176470588235,0.9296875,0.9296875),\n (0.745098039215686,0.93190234375,0.93190234375),\n (0.749019607843137,0.93411328125,0.93411328125),\n (0.752941176470588,0.936328125,0.936328125),\n (0.756862745098039,0.93854296875,0.93854296875),\n (0.76078431372549,0.94075390625,0.94075390625),\n (0.764705882352941,0.94296875,0.94296875),\n (0.768627450980392,0.94518359375,0.94518359375),\n (0.772549019607843,0.94739453125,0.94739453125),\n (0.776470588235294,0.949609375,0.949609375),\n (0.780392156862745,0.95182421875,0.95182421875),\n (0.784313725490196,0.95403515625,0.95403515625),\n (0.788235294117647,0.95625,0.95625),\n (0.792156862745098,0.95846484375,0.95846484375),\n (0.796078431372549,0.96067578125,0.96067578125),\n (0.8,0.962890625,0.962890625),\n (0.803921568627451,0.96510546875,0.96510546875),\n (0.807843137254902,0.96731640625,0.96731640625),\n (0.811764705882353,0.96953125,0.96953125),\n (0.815686274509804,0.97174609375,0.97174609375),\n (0.819607843137255,0.97395703125,0.97395703125),\n (0.823529411764706,0.976171875,0.976171875),\n (0.827450980392157,0.97838671875,0.97838671875),\n (0.831372549019608,0.98059765625,0.98059765625),\n (0.835294117647059,0.9828125,0.9828125),\n (0.83921568627451,0.98502734375,0.98502734375),\n (0.843137254901961,0.98723828125,0.98723828125),\n (0.847058823529412,0.989453125,0.989453125),\n (0.850980392156863,0.99166796875,0.99166796875),\n (0.854901960784314,0.99387890625,0.99387890625),\n (0.858823529411765,0.99609375,0.99609375),\n (0.862745098039216,0.99609375,0.99609375),\n (0.866666666666667,0.99609375,0.99609375),\n (0.870588235294118,0.99609375,0.99609375),\n (0.874509803921569,0.99609375,0.99609375),\n (0.87843137254902,0.99609375,0.99609375),\n (0.882352941176471,0.99609375,0.99609375),\n (0.886274509803922,0.99609375,0.99609375),\n (0.890196078431373,0.99609375,0.99609375),\n (0.894117647058824,0.99609375,0.99609375),\n (0.898039215686275,0.99609375,0.99609375),\n (0.901960784313726,0.99609375,0.99609375),\n (0.905882352941176,0.99609375,0.99609375),\n (0.909803921568627,0.99609375,0.99609375),\n (0.913725490196078,0.99609375,0.99609375),\n (0.917647058823529,0.99609375,0.99609375),\n (0.92156862745098,0.99609375,0.99609375),\n (0.925490196078431,0.99609375,0.99609375),\n (0.929411764705882,0.99609375,0.99609375),\n (0.933333333333333,0.99609375,0.99609375),\n (0.937254901960784,0.99609375,0.99609375),\n (0.941176470588235,0.99609375,0.99609375),\n (0.945098039215686,0.99609375,0.99609375),\n (0.949019607843137,0.99609375,0.99609375),\n (0.952941176470588,0.99609375,0.99609375),\n (0.956862745098039,0.99609375,0.99609375),\n (0.96078431372549,0.99609375,0.99609375),\n (0.964705882352941,0.99609375,0.99609375),\n (0.968627450980392,0.99609375,0.99609375),\n (0.972549019607843,0.99609375,0.99609375),\n (0.976470588235294,0.99609375,0.99609375),\n (0.980392156862745,0.99609375,0.99609375),\n (0.984313725490196,0.99609375,0.99609375),\n (0.988235294117647,0.99609375,0.99609375),\n (0.992156862745098,0.99609375,0.99609375),\n (0.996078431372549,0.99609375,0.99609375),\n (1,0.99609375,0.99609375)),\n 'green' : (\n (0,1,1),\n (0.00392156862745098,0,0),\n (0.00784313725490196,0,0),\n (0.0117647058823529,0,0),\n (0.0156862745098039,0,0),\n (0.0196078431372549,0,0),\n (0.0235294117647059,0,0),\n (0.0274509803921569,0,0),\n (0.0313725490196078,0,0),\n (0.0352941176470588,0,0),\n (0.0392156862745098,0,0),\n (0.0431372549019608,0,0),\n (0.0470588235294118,0,0),\n (0.0509803921568627,0,0),\n (0.0549019607843137,0,0),\n (0.0588235294117647,0,0),\n (0.0627450980392157,0,0),\n (0.0666666666666667,0,0),\n (0.0705882352941176,0,0),\n (0.0745098039215686,0,0),\n (0.0784313725490196,0,0),\n (0.0823529411764706,0,0),\n (0.0862745098039216,0,0),\n (0.0901960784313725,0,0),\n (0.0941176470588235,0,0),\n (0.0980392156862745,0,0),\n (0.101960784313725,0,0),\n (0.105882352941176,0,0),\n (0.109803921568627,0,0),\n (0.113725490196078,0,0),\n (0.117647058823529,0,0),\n (0.12156862745098,0,0),\n (0.125490196078431,0,0),\n (0.129411764705882,0,0),\n (0.133333333333333,0,0),\n (0.137254901960784,0.0135653515625,0.0135653515625),\n (0.141176470588235,0.0271306640625,0.0271306640625),\n (0.145098039215686,0.04069609375,0.04069609375),\n (0.149019607843137,0.054261328125,0.054261328125),\n (0.152941176470588,0.0678265625,0.0678265625),\n (0.156862745098039,0.0813921875,0.0813921875),\n (0.16078431372549,0.094957421875,0.094957421875),\n (0.164705882352941,0.10852265625,0.10852265625),\n (0.168627450980392,0.122087890625,0.122087890625),\n (0.172549019607843,0.135653515625,0.135653515625),\n (0.176470588235294,0.14921875,0.14921875),\n (0.180392156862745,0.162783984375,0.162783984375),\n (0.184313725490196,0.176349609375,0.176349609375),\n (0.188235294117647,0.18991484375,0.18991484375),\n (0.192156862745098,0.203480078125,0.203480078125),\n (0.196078431372549,0.2170453125,0.2170453125),\n (0.2,0.2306109375,0.2306109375),\n (0.203921568627451,0.244176171875,0.244176171875),\n (0.207843137254902,0.25774140625,0.25774140625),\n (0.211764705882353,0.27130703125,0.27130703125),\n (0.215686274509804,0.284872265625,0.284872265625),\n (0.219607843137255,0.2984375,0.2984375),\n (0.223529411764706,0.312002734375,0.312002734375),\n (0.227450980392157,0.325568359375,0.325568359375),\n (0.231372549019608,0.33913359375,0.33913359375),\n (0.235294117647059,0.352698828125,0.352698828125),\n (0.23921568627451,0.3662640625,0.3662640625),\n (0.243137254901961,0.3798296875,0.3798296875),\n (0.247058823529412,0.39339453125,0.39339453125),\n (0.250980392156863,0.4069609375,0.4069609375),\n (0.254901960784314,0.42052734375,0.42052734375),\n (0.258823529411765,0.43408984375,0.43408984375),\n (0.262745098039216,0.44765625,0.44765625),\n (0.266666666666667,0.46122265625,0.46122265625),\n (0.270588235294118,0.47478515625,0.47478515625),\n (0.274509803921569,0.4883515625,0.4883515625),\n (0.27843137254902,0.50191796875,0.50191796875),\n (0.282352941176471,0.515484375,0.515484375),\n (0.286274509803922,0.529046875,0.529046875),\n (0.290196078431373,0.54261328125,0.54261328125),\n (0.294117647058824,0.5561796875,0.5561796875),\n (0.298039215686275,0.56974609375,0.56974609375),\n (0.301960784313725,0.58330859375,0.58330859375),\n (0.305882352941176,0.596875,0.596875),\n (0.309803921568627,0.61044140625,0.61044140625),\n (0.313725490196078,0.62400390625,0.62400390625),\n (0.317647058823529,0.6375703125,0.6375703125),\n (0.32156862745098,0.65113671875,0.65113671875),\n (0.325490196078431,0.664703125,0.664703125),\n (0.329411764705882,0.678265625,0.678265625),\n (0.333333333333333,0.69183203125,0.69183203125),\n (0.337254901960784,0.7053984375,0.7053984375),\n (0.341176470588235,0.71896484375,0.71896484375),\n (0.345098039215686,0.73252734375,0.73252734375),\n (0.349019607843137,0.74609375,0.74609375),\n (0.352941176470588,0.7309140625,0.7309140625),\n (0.356862745098039,0.71573828125,0.71573828125),\n (0.36078431372549,0.70055859375,0.70055859375),\n (0.364705882352941,0.68537890625,0.68537890625),\n (0.368627450980392,0.67019921875,0.67019921875),\n (0.372549019607843,0.6550234375,0.6550234375),\n (0.376470588235294,0.63984375,0.63984375),\n (0.380392156862745,0.6246640625,0.6246640625),\n (0.384313725490196,0.60948828125,0.60948828125),\n (0.388235294117647,0.59430859375,0.59430859375),\n (0.392156862745098,0.57912890625,0.57912890625),\n (0.396078431372549,0.56394921875,0.56394921875),\n (0.4,0.5487734375,0.5487734375),\n (0.403921568627451,0.53359375,0.53359375),\n (0.407843137254902,0.5184140625,0.5184140625),\n (0.411764705882353,0.50323828125,0.50323828125),\n (0.415686274509804,0.48805859375,0.48805859375),\n (0.419607843137255,0.47287890625,0.47287890625),\n (0.423529411764706,0.45769921875,0.45769921875),\n (0.427450980392157,0.4425234375,0.4425234375),\n (0.431372549019608,0.42734375,0.42734375),\n (0.435294117647059,0.4121640625,0.4121640625),\n (0.43921568627451,0.39698828125,0.39698828125),\n (0.443137254901961,0.381808203125,0.381808203125),\n (0.447058823529412,0.366629296875,0.366629296875),\n (0.450980392156863,0.35145078125,0.35145078125),\n (0.454901960784314,0.336272265625,0.336272265625),\n (0.458823529411765,0.32109375,0.32109375),\n (0.462745098039216,0.305915234375,0.305915234375),\n (0.466666666666667,0.29073671875,0.29073671875),\n (0.470588235294118,0.2755578125,0.2755578125),\n (0.474509803921569,0.260379296875,0.260379296875),\n (0.47843137254902,0.24520078125,0.24520078125),\n (0.482352941176471,0.230022265625,0.230022265625),\n (0.486274509803922,0.21484375,0.21484375),\n (0.490196078431373,0.2265625,0.2265625),\n (0.494117647058824,0.23828125,0.23828125),\n (0.498039215686275,0.25,0.25),\n (0.501960784313725,0.26171875,0.26171875),\n (0.505882352941176,0.2734375,0.2734375),\n (0.509803921568627,0.28515625,0.28515625),\n (0.513725490196078,0.296875,0.296875),\n (0.517647058823529,0.30859375,0.30859375),\n (0.52156862745098,0.3203125,0.3203125),\n (0.525490196078431,0.33203125,0.33203125),\n (0.529411764705882,0.34375,0.34375),\n (0.533333333333333,0.35546875,0.35546875),\n (0.537254901960784,0.3671875,0.3671875),\n (0.541176470588235,0.37890625,0.37890625),\n (0.545098039215686,0.390625,0.390625),\n (0.549019607843137,0.40234375,0.40234375),\n (0.552941176470588,0.4140625,0.4140625),\n (0.556862745098039,0.42578125,0.42578125),\n (0.56078431372549,0.4375,0.4375),\n (0.564705882352941,0.44921875,0.44921875),\n (0.568627450980392,0.4609375,0.4609375),\n (0.572549019607843,0.47265625,0.47265625),\n (0.576470588235294,0.484375,0.484375),\n (0.580392156862745,0.49609375,0.49609375),\n (0.584313725490196,0.5078125,0.5078125),\n (0.588235294117647,0.51953125,0.51953125),\n (0.592156862745098,0.53125,0.53125),\n (0.596078431372549,0.54296875,0.54296875),\n (0.6,0.5546875,0.5546875),\n (0.603921568627451,0.56640625,0.56640625),\n (0.607843137254902,0.578125,0.578125),\n (0.611764705882353,0.58984375,0.58984375),\n (0.615686274509804,0.6015625,0.6015625),\n (0.619607843137255,0.61328125,0.61328125),\n (0.623529411764706,0.625,0.625),\n (0.627450980392157,0.61458203125,0.61458203125),\n (0.631372549019608,0.60416796875,0.60416796875),\n (0.635294117647059,0.59375,0.59375),\n (0.63921568627451,0.58333203125,0.58333203125),\n (0.643137254901961,0.57291796875,0.57291796875),\n (0.647058823529412,0.5625,0.5625),\n (0.650980392156863,0.55208203125,0.55208203125),\n (0.654901960784314,0.54166796875,0.54166796875),\n (0.658823529411765,0.53125,0.53125),\n (0.662745098039216,0.52083203125,0.52083203125),\n (0.666666666666667,0.51041796875,0.51041796875),\n (0.670588235294118,0.5,0.5),\n (0.674509803921569,0.48958203125,0.48958203125),\n (0.67843137254902,0.47916796875,0.47916796875),\n (0.682352941176471,0.46875,0.46875),\n (0.686274509803922,0.45833203125,0.45833203125),\n (0.690196078431373,0.44791796875,0.44791796875),\n (0.694117647058824,0.4375,0.4375),\n (0.698039215686274,0.42708203125,0.42708203125),\n (0.701960784313725,0.41666796875,0.41666796875),\n (0.705882352941177,0.40625,0.40625),\n (0.709803921568627,0.39583203125,0.39583203125),\n (0.713725490196078,0.385416796875,0.385416796875),\n (0.717647058823529,0.375,0.375),\n (0.72156862745098,0.364583203125,0.364583203125),\n (0.725490196078431,0.354166796875,0.354166796875),\n (0.729411764705882,0.34375,0.34375),\n (0.733333333333333,0.333333203125,0.333333203125),\n (0.737254901960784,0.322916796875,0.322916796875),\n (0.741176470588235,0.3125,0.3125),\n (0.745098039215686,0.302083203125,0.302083203125),\n (0.749019607843137,0.291666796875,0.291666796875),\n (0.752941176470588,0.28125,0.28125),\n (0.756862745098039,0.270833203125,0.270833203125),\n (0.76078431372549,0.260416796875,0.260416796875),\n (0.764705882352941,0.25,0.25),\n (0.768627450980392,0.239583203125,0.239583203125),\n (0.772549019607843,0.229166796875,0.229166796875),\n (0.776470588235294,0.21875,0.21875),\n (0.780392156862745,0.208333203125,0.208333203125),\n (0.784313725490196,0.197916796875,0.197916796875),\n (0.788235294117647,0.1875,0.1875),\n (0.792156862745098,0.177083203125,0.177083203125),\n (0.796078431372549,0.166666796875,0.166666796875),\n (0.8,0.15625,0.15625),\n (0.803921568627451,0.145833203125,0.145833203125),\n (0.807843137254902,0.135416796875,0.135416796875),\n (0.811764705882353,0.125,0.125),\n (0.815686274509804,0.114583203125,0.114583203125),\n (0.819607843137255,0.104166796875,0.104166796875),\n (0.823529411764706,0.09375,0.09375),\n (0.827450980392157,0.083333203125,0.083333203125),\n (0.831372549019608,0.072916796875,0.072916796875),\n (0.835294117647059,0.0625,0.0625),\n (0.83921568627451,0.052083203125,0.052083203125),\n (0.843137254901961,0.041666796875,0.041666796875),\n (0.847058823529412,0.03125,0.03125),\n (0.850980392156863,0.0208333203125,0.0208333203125),\n (0.854901960784314,0.0104166796875,0.0104166796875),\n (0.858823529411765,0,0),\n (0.862745098039216,0.0184151953125,0.0184151953125),\n (0.866666666666667,0.0368303515625,0.0368303515625),\n (0.870588235294118,0.055245703125,0.055245703125),\n (0.874509803921569,0.073660546875,0.073660546875),\n (0.87843137254902,0.09207578125,0.09207578125),\n (0.882352941176471,0.110491015625,0.110491015625),\n (0.886274509803922,0.12890625,0.12890625),\n (0.890196078431373,0.147321484375,0.147321484375),\n (0.894117647058824,0.16573671875,0.16573671875),\n (0.898039215686275,0.184151953125,0.184151953125),\n (0.901960784313726,0.202566796875,0.202566796875),\n (0.905882352941176,0.22098203125,0.22098203125),\n (0.909803921568627,0.239397265625,0.239397265625),\n (0.913725490196078,0.2578125,0.2578125),\n (0.917647058823529,0.276227734375,0.276227734375),\n (0.92156862745098,0.29464296875,0.29464296875),\n (0.925490196078431,0.313058203125,0.313058203125),\n (0.929411764705882,0.331473046875,0.331473046875),\n (0.933333333333333,0.34988828125,0.34988828125),\n (0.937254901960784,0.368303515625,0.368303515625),\n (0.941176470588235,0.38671875,0.38671875),\n (0.945098039215686,0.4051328125,0.4051328125),\n (0.949019607843137,0.42355078125,0.42355078125),\n (0.952941176470588,0.44196484375,0.44196484375),\n (0.956862745098039,0.46037890625,0.46037890625),\n (0.96078431372549,0.47879296875,0.47879296875),\n (0.964705882352941,0.4972109375,0.4972109375),\n (0.968627450980392,0.515625,0.515625),\n (0.972549019607843,0.5340390625,0.5340390625),\n (0.976470588235294,0.55245703125,0.55245703125),\n (0.980392156862745,0.57087109375,0.57087109375),\n (0.984313725490196,0.58928515625,0.58928515625),\n (0.988235294117647,0.60769921875,0.60769921875),\n (0.992156862745098,0.6261171875,0.6261171875),\n (0.996078431372549,0.64453125,0.64453125),\n (1,0.64453125,0.64453125)),\n 'blue' : (\n (0,1,1),\n (0.00392156862745098,0.80569140625,0.80569140625),\n (0.00784313725490196,0.7964296875,0.7964296875),\n (0.0117647058823529,0.7871640625,0.7871640625),\n (0.0156862745098039,0.77790234375,0.77790234375),\n (0.0196078431372549,0.76863671875,0.76863671875),\n (0.0235294117647059,0.759375,0.759375),\n (0.0274509803921569,0.75011328125,0.75011328125),\n (0.0313725490196078,0.74084765625,0.74084765625),\n (0.0352941176470588,0.7315859375,0.7315859375),\n (0.0392156862745098,0.7223203125,0.7223203125),\n (0.0431372549019608,0.71305859375,0.71305859375),\n (0.0470588235294118,0.70379296875,0.70379296875),\n (0.0509803921568627,0.69453125,0.69453125),\n (0.0549019607843137,0.68526953125,0.68526953125),\n (0.0588235294117647,0.67600390625,0.67600390625),\n (0.0627450980392157,0.6667421875,0.6667421875),\n (0.0666666666666667,0.6574765625,0.6574765625),\n (0.0705882352941176,0.64821484375,0.64821484375),\n (0.0745098039215686,0.63894921875,0.63894921875),\n (0.0784313725490196,0.6296875,0.6296875),\n (0.0823529411764706,0.62042578125,0.62042578125),\n (0.0862745098039216,0.61116015625,0.61116015625),\n (0.0901960784313725,0.6018984375,0.6018984375),\n (0.0941176470588235,0.5926328125,0.5926328125),\n (0.0980392156862745,0.58337109375,0.58337109375),\n (0.101960784313725,0.57410546875,0.57410546875),\n (0.105882352941176,0.56484375,0.56484375),\n (0.109803921568627,0.55558203125,0.55558203125),\n (0.113725490196078,0.54631640625,0.54631640625),\n (0.117647058823529,0.5370546875,0.5370546875),\n (0.12156862745098,0.5277890625,0.5277890625),\n (0.125490196078431,0.51852734375,0.51852734375),\n (0.129411764705882,0.50926171875,0.50926171875),\n (0.133333333333333,0.5,0.5),\n (0.137254901960784,0.50901953125,0.50901953125),\n (0.141176470588235,0.5180390625,0.5180390625),\n (0.145098039215686,0.52705859375,0.52705859375),\n (0.149019607843137,0.536078125,0.536078125),\n (0.152941176470588,0.54509765625,0.54509765625),\n (0.156862745098039,0.55412109375,0.55412109375),\n (0.16078431372549,0.563140625,0.563140625),\n (0.164705882352941,0.57216015625,0.57216015625),\n (0.168627450980392,0.5811796875,0.5811796875),\n (0.172549019607843,0.59019921875,0.59019921875),\n (0.176470588235294,0.59921875,0.59921875),\n (0.180392156862745,0.60823828125,0.60823828125),\n (0.184313725490196,0.6172578125,0.6172578125),\n (0.188235294117647,0.62627734375,0.62627734375),\n (0.192156862745098,0.635296875,0.635296875),\n (0.196078431372549,0.64431640625,0.64431640625),\n (0.2,0.65333984375,0.65333984375),\n (0.203921568627451,0.662359375,0.662359375),\n (0.207843137254902,0.67137890625,0.67137890625),\n (0.211764705882353,0.6803984375,0.6803984375),\n (0.215686274509804,0.68941796875,0.68941796875),\n (0.219607843137255,0.6984375,0.6984375),\n (0.223529411764706,0.70745703125,0.70745703125),\n (0.227450980392157,0.7164765625,0.7164765625),\n (0.231372549019608,0.72549609375,0.72549609375),\n (0.235294117647059,0.734515625,0.734515625),\n (0.23921568627451,0.74353515625,0.74353515625),\n (0.243137254901961,0.75255859375,0.75255859375),\n (0.247058823529412,0.761578125,0.761578125),\n (0.250980392156863,0.77059765625,0.77059765625),\n (0.254901960784314,0.7796171875,0.7796171875),\n (0.258823529411765,0.78863671875,0.78863671875),\n (0.262745098039216,0.79765625,0.79765625),\n (0.266666666666667,0.80667578125,0.80667578125),\n (0.270588235294118,0.8156953125,0.8156953125),\n (0.274509803921569,0.82471484375,0.82471484375),\n (0.27843137254902,0.833734375,0.833734375),\n (0.282352941176471,0.84275390625,0.84275390625),\n (0.286274509803922,0.85177734375,0.85177734375),\n (0.290196078431373,0.860796875,0.860796875),\n (0.294117647058824,0.86981640625,0.86981640625),\n (0.298039215686275,0.8788359375,0.8788359375),\n (0.301960784313725,0.88785546875,0.88785546875),\n (0.305882352941176,0.896875,0.896875),\n (0.309803921568627,0.90589453125,0.90589453125),\n (0.313725490196078,0.9149140625,0.9149140625),\n (0.317647058823529,0.92393359375,0.92393359375),\n (0.32156862745098,0.932953125,0.932953125),\n (0.325490196078431,0.94197265625,0.94197265625),\n (0.329411764705882,0.95099609375,0.95099609375),\n (0.333333333333333,0.960015625,0.960015625),\n (0.337254901960784,0.96903515625,0.96903515625),\n (0.341176470588235,0.9780546875,0.9780546875),\n (0.345098039215686,0.98707421875,0.98707421875),\n (0.349019607843137,0.99609375,0.99609375),\n (0.352941176470588,0.9737734375,0.9737734375),\n (0.356862745098039,0.95144921875,0.95144921875),\n (0.36078431372549,0.92912890625,0.92912890625),\n (0.364705882352941,0.90680859375,0.90680859375),\n (0.368627450980392,0.88448828125,0.88448828125),\n (0.372549019607843,0.8621640625,0.8621640625),\n (0.376470588235294,0.83984375,0.83984375),\n (0.380392156862745,0.8175234375,0.8175234375),\n (0.384313725490196,0.79519921875,0.79519921875),\n (0.388235294117647,0.77287890625,0.77287890625),\n (0.392156862745098,0.75055859375,0.75055859375),\n (0.396078431372549,0.72823828125,0.72823828125),\n (0.4,0.7059140625,0.7059140625),\n (0.403921568627451,0.68359375,0.68359375),\n (0.407843137254902,0.6612734375,0.6612734375),\n (0.411764705882353,0.63894921875,0.63894921875),\n (0.415686274509804,0.61662890625,0.61662890625),\n (0.419607843137255,0.59430859375,0.59430859375),\n (0.423529411764706,0.57198828125,0.57198828125),\n (0.427450980392157,0.5496640625,0.5496640625),\n (0.431372549019608,0.52734375,0.52734375),\n (0.435294117647059,0.5050234375,0.5050234375),\n (0.43921568627451,0.48269921875,0.48269921875),\n (0.443137254901961,0.46037890625,0.46037890625),\n (0.447058823529412,0.43805859375,0.43805859375),\n (0.450980392156863,0.41573828125,0.41573828125),\n (0.454901960784314,0.3934140625,0.3934140625),\n (0.458823529411765,0.37109375,0.37109375),\n (0.462745098039216,0.348772265625,0.348772265625),\n (0.466666666666667,0.32645078125,0.32645078125),\n (0.470588235294118,0.304129296875,0.304129296875),\n (0.474509803921569,0.281808203125,0.281808203125),\n (0.47843137254902,0.25948671875,0.25948671875),\n (0.482352941176471,0.237165234375,0.237165234375),\n (0.486274509803922,0.21484375,0.21484375),\n (0.490196078431373,0.233370703125,0.233370703125),\n (0.494117647058824,0.251897265625,0.251897265625),\n (0.498039215686275,0.27042421875,0.27042421875),\n (0.501960784313725,0.28895078125,0.28895078125),\n (0.505882352941176,0.307477734375,0.307477734375),\n (0.509803921568627,0.326004296875,0.326004296875),\n (0.513725490196078,0.34453125,0.34453125),\n (0.517647058823529,0.363058203125,0.363058203125),\n (0.52156862745098,0.381584765625,0.381584765625),\n (0.525490196078431,0.40011328125,0.40011328125),\n (0.529411764705882,0.41863671875,0.41863671875),\n (0.533333333333333,0.4371640625,0.4371640625),\n (0.537254901960784,0.45569140625,0.45569140625),\n (0.541176470588235,0.47421875,0.47421875),\n (0.545098039215686,0.49274609375,0.49274609375),\n (0.549019607843137,0.5112734375,0.5112734375),\n (0.552941176470588,0.52980078125,0.52980078125),\n (0.556862745098039,0.54832421875,0.54832421875),\n (0.56078431372549,0.5668515625,0.5668515625),\n (0.564705882352941,0.58537890625,0.58537890625),\n (0.568627450980392,0.60390625,0.60390625),\n (0.572549019607843,0.62243359375,0.62243359375),\n (0.576470588235294,0.6409609375,0.6409609375),\n (0.580392156862745,0.65948828125,0.65948828125),\n (0.584313725490196,0.67801171875,0.67801171875),\n (0.588235294117647,0.6965390625,0.6965390625),\n (0.592156862745098,0.71506640625,0.71506640625),\n (0.596078431372549,0.73359375,0.73359375),\n (0.6,0.75212109375,0.75212109375),\n (0.603921568627451,0.7706484375,0.7706484375),\n (0.607843137254902,0.78917578125,0.78917578125),\n (0.611764705882353,0.80769921875,0.80769921875),\n (0.615686274509804,0.8262265625,0.8262265625),\n (0.619607843137255,0.84475390625,0.84475390625),\n (0.623529411764706,0.86328125,0.86328125),\n (0.627450980392157,0.84889453125,0.84889453125),\n (0.631372549019608,0.83450390625,0.83450390625),\n (0.635294117647059,0.8201171875,0.8201171875),\n (0.63921568627451,0.80573046875,0.80573046875),\n (0.643137254901961,0.79133984375,0.79133984375),\n (0.647058823529412,0.776953125,0.776953125),\n (0.650980392156863,0.76256640625,0.76256640625),\n (0.654901960784314,0.74817578125,0.74817578125),\n (0.658823529411765,0.7337890625,0.7337890625),\n (0.662745098039216,0.71940234375,0.71940234375),\n (0.666666666666667,0.70501171875,0.70501171875),\n (0.670588235294118,0.690625,0.690625),\n (0.674509803921569,0.67623828125,0.67623828125),\n (0.67843137254902,0.66184765625,0.66184765625),\n (0.682352941176471,0.6474609375,0.6474609375),\n (0.686274509803922,0.63307421875,0.63307421875),\n (0.690196078431373,0.61868359375,0.61868359375),\n (0.694117647058824,0.604296875,0.604296875),\n (0.698039215686274,0.58991015625,0.58991015625),\n (0.701960784313725,0.57551953125,0.57551953125),\n (0.705882352941177,0.5611328125,0.5611328125),\n (0.709803921568627,0.54674609375,0.54674609375),\n (0.713725490196078,0.53235546875,0.53235546875),\n (0.717647058823529,0.51796875,0.51796875),\n (0.72156862745098,0.50358203125,0.50358203125),\n (0.725490196078431,0.48919140625,0.48919140625),\n (0.729411764705882,0.4748046875,0.4748046875),\n (0.733333333333333,0.46041796875,0.46041796875),\n (0.737254901960784,0.44602734375,0.44602734375),\n (0.741176470588235,0.431640625,0.431640625),\n (0.745098039215686,0.41725390625,0.41725390625),\n (0.749019607843137,0.40286328125,0.40286328125),\n (0.752941176470588,0.3884765625,0.3884765625),\n (0.756862745098039,0.374088671875,0.374088671875),\n (0.76078431372549,0.359700390625,0.359700390625),\n (0.764705882352941,0.3453125,0.3453125),\n (0.768627450980392,0.330924609375,0.330924609375),\n (0.772549019607843,0.316536328125,0.316536328125),\n (0.776470588235294,0.3021484375,0.3021484375),\n (0.780392156862745,0.287760546875,0.287760546875),\n (0.784313725490196,0.273372265625,0.273372265625),\n (0.788235294117647,0.258984375,0.258984375),\n (0.792156862745098,0.244596484375,0.244596484375),\n (0.796078431372549,0.230208203125,0.230208203125),\n (0.8,0.2158203125,0.2158203125),\n (0.803921568627451,0.201432421875,0.201432421875),\n (0.807843137254902,0.187044140625,0.187044140625),\n (0.811764705882353,0.17265625,0.17265625),\n (0.815686274509804,0.158268359375,0.158268359375),\n (0.819607843137255,0.143880078125,0.143880078125),\n (0.823529411764706,0.1294921875,0.1294921875),\n (0.827450980392157,0.115104296875,0.115104296875),\n (0.831372549019608,0.100716015625,0.100716015625),\n (0.835294117647059,0.086328125,0.086328125),\n (0.83921568627451,0.071940234375,0.071940234375),\n (0.843137254901961,0.057551953125,0.057551953125),\n (0.847058823529412,0.0431640625,0.0431640625),\n (0.850980392156863,0.028776015625,0.028776015625),\n (0.854901960784314,0.01438796875,0.01438796875),\n (0.858823529411765,0,0),\n (0.862745098039216,0,0),\n (0.866666666666667,0,0),\n (0.870588235294118,0,0),\n (0.874509803921569,0,0),\n (0.87843137254902,0,0),\n (0.882352941176471,0,0),\n (0.886274509803922,0,0),\n (0.890196078431373,0,0),\n (0.894117647058824,0,0),\n (0.898039215686275,0,0),\n (0.901960784313726,0,0),\n (0.905882352941176,0,0),\n (0.909803921568627,0,0),\n (0.913725490196078,0,0),\n (0.917647058823529,0,0),\n (0.92156862745098,0,0),\n (0.925490196078431,0,0),\n (0.929411764705882,0,0),\n (0.933333333333333,0,0),\n (0.937254901960784,0,0),\n (0.941176470588235,0,0),\n (0.945098039215686,0,0),\n (0.949019607843137,0,0),\n (0.952941176470588,0,0),\n (0.956862745098039,0,0),\n (0.96078431372549,0,0),\n (0.964705882352941,0,0),\n (0.968627450980392,0,0),\n (0.972549019607843,0,0),\n (0.976470588235294,0,0),\n (0.980392156862745,0,0),\n (0.984313725490196,0,0),\n (0.988235294117647,0,0),\n (0.992156862745098,0,0),\n (0.996078431372549,0,0),\n (1,0,0)),\n }\n\n califa = mcol.LinearSegmentedColormap('califa', cdict)\n vcalifa = mcol.LinearSegmentedColormap('vcalifa', vcdict)\n\n if option == 'v':\n return vcalifa\n else:\n return califa\n\ndef A_l(R_v, lw):\n # From Cardelli,1989\n # F_cor = F * 10 ***(0.4*Av*A_l(R_v,l))\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = 1.0 + 0.17699*y - 0.50447*y**2 - 0.02427*y**3 + 0.72085*y**4 \\\n + 0.01979*y**5 - 0.77530*y**6 + 0.32999*y**7\n b_x = 1.41338*y + 2.28305*y**2 + 1.07233*y**3 - 5.38434*y**4 \\\n - 0.62251*y**5 + 5.30260*y**6 - 2.09002*y**7\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x/R_v\n return A_l_\n",
"import json\nimport pickle\nimport numpy as np\nimport matplotlib.colors as mcol\nimport matplotlib.pyplot as plt\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\ndef O3O1_line_c(x):\n val = 0.05612915 / (x + 0.39641533) + 0.60969495\n return val\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.4, 100)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.00302, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\ndef color_map_califa(option='v'):\n cdict = {'red': ((0.0, 0, 0), (0.00392156862745098, 0, 0), (\n 0.00784313725490196, 0, 0), (0.0117647058823529, 0, 0), (\n 0.0156862745098039, 0, 0), (0.0196078431372549, 0, 0), (\n 0.0235294117647059, 0, 0), (0.0274509803921569, 0, 0), (\n 0.0313725490196078, 0, 0), (0.0352941176470588, 0, 0), (\n 0.0392156862745098, 0, 0), (0.0431372549019608, 0, 0), (\n 0.0470588235294118, 0, 0), (0.0509803921568627, 0, 0), (\n 0.0549019607843137, 0, 0), (0.0588235294117647, 0, 0), (\n 0.0627450980392157, 0, 0), (0.0666666666666667, 0, 0), (\n 0.0705882352941176, 0, 0), (0.0745098039215686, 0, 0), (\n 0.0784313725490196, 0, 0), (0.0823529411764706, 0, 0), (\n 0.0862745098039216, 0, 0), (0.0901960784313725, 0, 0), (\n 0.0941176470588235, 0, 0), (0.0980392156862745, 0, 0), (\n 0.101960784313725, 0, 0), (0.105882352941176, 0, 0), (\n 0.109803921568627, 0, 0), (0.113725490196078, 0, 0), (\n 0.117647058823529, 0, 0), (0.12156862745098, 0, 0), (\n 0.125490196078431, 0, 0), (0.129411764705882, 0, 0), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0.019921875, 0.019921875), (0.2, 0.03984375, \n 0.03984375), (0.203921568627451, 0.059765625, 0.059765625), (\n 0.207843137254902, 0.0796875, 0.0796875), (0.211764705882353, \n 0.099609375, 0.099609375), (0.215686274509804, 0.11953125, \n 0.11953125), (0.219607843137255, 0.139453125, 0.139453125), (\n 0.223529411764706, 0.159375, 0.159375), (0.227450980392157, \n 0.179296875, 0.179296875), (0.231372549019608, 0.19921875, \n 0.19921875), (0.235294117647059, 0.219140625, 0.219140625), (\n 0.23921568627451, 0.2390625, 0.2390625), (0.243137254901961, \n 0.258984375, 0.258984375), (0.247058823529412, 0.27890625, \n 0.27890625), (0.250980392156863, 0.298828125, 0.298828125), (\n 0.254901960784314, 0.31875, 0.31875), (0.258823529411765, \n 0.338671875, 0.338671875), (0.262745098039216, 0.35859375, \n 0.35859375), (0.266666666666667, 0.378515625, 0.378515625), (\n 0.270588235294118, 0.3984375, 0.3984375), (0.274509803921569, \n 0.418359375, 0.418359375), (0.27843137254902, 0.43828125, \n 0.43828125), (0.282352941176471, 0.458203125, 0.458203125), (\n 0.286274509803922, 0.478125, 0.478125), (0.290196078431373, \n 0.498046875, 0.498046875), (0.294117647058824, 0.51796875, \n 0.51796875), (0.298039215686275, 0.537890625, 0.537890625), (\n 0.301960784313725, 0.5578125, 0.5578125), (0.305882352941176, \n 0.577734375, 0.577734375), (0.309803921568627, 0.59765625, \n 0.59765625), (0.313725490196078, 0.617578125, 0.617578125), (\n 0.317647058823529, 0.6375, 0.6375), (0.32156862745098, 0.657421875,\n 0.657421875), (0.325490196078431, 0.67734375, 0.67734375), (\n 0.329411764705882, 0.697265625, 0.697265625), (0.333333333333333, \n 0.7171875, 0.7171875), (0.337254901960784, 0.737109375, 0.737109375\n ), (0.341176470588235, 0.75703125, 0.75703125), (0.345098039215686,\n 0.776953125, 0.776953125), (0.349019607843137, 0.796875, 0.796875),\n (0.352941176470588, 0.816796875, 0.816796875), (0.356862745098039, \n 0.83671875, 0.83671875), (0.36078431372549, 0.856640625, \n 0.856640625), (0.364705882352941, 0.8765625, 0.8765625), (\n 0.368627450980392, 0.896484375, 0.896484375), (0.372549019607843, \n 0.91640625, 0.91640625), (0.376470588235294, 0.936328125, \n 0.936328125), (0.380392156862745, 0.95625, 0.95625), (\n 0.384313725490196, 0.976171875, 0.976171875), (0.388235294117647, \n 0.99609375, 0.99609375), (0.392156862745098, 0.99609375, 0.99609375\n ), (0.396078431372549, 0.99609375, 0.99609375), (0.4, 0.99609375, \n 0.99609375), (0.403921568627451, 0.99609375, 0.99609375), (\n 0.407843137254902, 0.99609375, 0.99609375), (0.411764705882353, \n 0.99609375, 0.99609375), (0.415686274509804, 0.99609375, 0.99609375\n ), (0.419607843137255, 0.99609375, 0.99609375), (0.423529411764706,\n 0.99609375, 0.99609375), (0.427450980392157, 0.99609375, 0.99609375\n ), (0.431372549019608, 0.99609375, 0.99609375), (0.435294117647059,\n 0.99609375, 0.99609375), (0.43921568627451, 0.99609375, 0.99609375),\n (0.443137254901961, 0.99609375, 0.99609375), (0.447058823529412, \n 0.99609375, 0.99609375), (0.450980392156863, 0.99609375, 0.99609375\n ), (0.454901960784314, 0.99609375, 0.99609375), (0.458823529411765,\n 0.99609375, 0.99609375), (0.462745098039216, 0.99609375, 0.99609375\n ), (0.466666666666667, 0.99609375, 0.99609375), (0.470588235294118,\n 0.99609375, 0.99609375), (0.474509803921569, 0.99609375, 0.99609375\n ), (0.47843137254902, 0.99609375, 0.99609375), (0.482352941176471, \n 0.99609375, 0.99609375), (0.486274509803922, 0.99609375, 0.99609375\n ), (0.490196078431373, 0.99609375, 0.99609375), (0.494117647058824,\n 0.99609375, 0.99609375), (0.498039215686275, 0.99609375, 0.99609375\n ), (0.501960784313725, 0.99609375, 0.99609375), (0.505882352941176,\n 0.99609375, 0.99609375), (0.509803921568627, 0.99609375, 0.99609375\n ), (0.513725490196078, 0.99609375, 0.99609375), (0.517647058823529,\n 0.99609375, 0.99609375), (0.52156862745098, 0.99609375, 0.99609375),\n (0.525490196078431, 0.99609375, 0.99609375), (0.529411764705882, \n 0.99609375, 0.99609375), (0.533333333333333, 0.99609375, 0.99609375\n ), (0.537254901960784, 0.99609375, 0.99609375), (0.541176470588235,\n 0.99609375, 0.99609375), (0.545098039215686, 0.99609375, 0.99609375\n ), (0.549019607843137, 0.99609375, 0.99609375), (0.552941176470588,\n 0.99609375, 0.99609375), (0.556862745098039, 0.99609375, 0.99609375\n ), (0.56078431372549, 0.99609375, 0.99609375), (0.564705882352941, \n 0.99609375, 0.99609375), (0.568627450980392, 0.99609375, 0.99609375\n ), (0.572549019607843, 0.99609375, 0.99609375), (0.576470588235294,\n 0.99609375, 0.99609375), (0.580392156862745, 0.99609375, 0.99609375\n ), (0.584313725490196, 0.99609375, 0.99609375), (0.588235294117647,\n 0.98046875, 0.98046875), (0.592156862745098, 0.96484375, 0.96484375\n ), (0.596078431372549, 0.94921875, 0.94921875), (0.6, 0.93359375, \n 0.93359375), (0.603921568627451, 0.91796875, 0.91796875), (\n 0.607843137254902, 0.90234375, 0.90234375), (0.611764705882353, \n 0.88671875, 0.88671875), (0.615686274509804, 0.87109375, 0.87109375\n ), (0.619607843137255, 0.85546875, 0.85546875), (0.623529411764706,\n 0.83984375, 0.83984375), (0.627450980392157, 0.82421875, 0.82421875\n ), (0.631372549019608, 0.80859375, 0.80859375), (0.635294117647059,\n 0.79296875, 0.79296875), (0.63921568627451, 0.77734375, 0.77734375),\n (0.643137254901961, 0.76171875, 0.76171875), (0.647058823529412, \n 0.74609375, 0.74609375), (0.650980392156863, 0.73046875, 0.73046875\n ), (0.654901960784314, 0.71484375, 0.71484375), (0.658823529411765,\n 0.69921875, 0.69921875), (0.662745098039216, 0.68359375, 0.68359375\n ), (0.666666666666667, 0.66796875, 0.66796875), (0.670588235294118,\n 0.65234375, 0.65234375), (0.674509803921569, 0.63671875, 0.63671875\n ), (0.67843137254902, 0.62109375, 0.62109375), (0.682352941176471, \n 0.60546875, 0.60546875), (0.686274509803922, 0.58984375, 0.58984375\n ), (0.690196078431373, 0.57421875, 0.57421875), (0.694117647058824,\n 0.55859375, 0.55859375), (0.698039215686274, 0.54296875, 0.54296875\n ), (0.701960784313725, 0.52734375, 0.52734375), (0.705882352941177,\n 0.51171875, 0.51171875), (0.709803921568627, 0.49609375, 0.49609375\n ), (0.713725490196078, 0.48046875, 0.48046875), (0.717647058823529,\n 0.46484375, 0.46484375), (0.72156862745098, 0.44921875, 0.44921875),\n (0.725490196078431, 0.43359375, 0.43359375), (0.729411764705882, \n 0.41796875, 0.41796875), (0.733333333333333, 0.40234375, 0.40234375\n ), (0.737254901960784, 0.38671875, 0.38671875), (0.741176470588235,\n 0.37109375, 0.37109375), (0.745098039215686, 0.35546875, 0.35546875\n ), (0.749019607843137, 0.33984375, 0.33984375), (0.752941176470588,\n 0.32421875, 0.32421875), (0.756862745098039, 0.30859375, 0.30859375\n ), (0.76078431372549, 0.29296875, 0.29296875), (0.764705882352941, \n 0.27734375, 0.27734375), (0.768627450980392, 0.26171875, 0.26171875\n ), (0.772549019607843, 0.24609375, 0.24609375), (0.776470588235294,\n 0.23046875, 0.23046875), (0.780392156862745, 0.21484375, 0.21484375\n ), (0.784313725490196, 0.22663359375, 0.22663359375), (\n 0.788235294117647, 0.2384234375, 0.2384234375), (0.792156862745098,\n 0.250212890625, 0.250212890625), (0.796078431372549, 0.262002734375,\n 0.262002734375), (0.8, 0.273792578125, 0.273792578125), (\n 0.803921568627451, 0.285582421875, 0.285582421875), (\n 0.807843137254902, 0.297372265625, 0.297372265625), (\n 0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125)), 'green': ((0.0, \n 0.02984375, 0.02984375), (0.00392156862745098, 0.02984375, \n 0.02984375), (0.00784313725490196, 0.044765625, 0.044765625), (\n 0.0117647058823529, 0.0596875, 0.0596875), (0.0156862745098039, \n 0.074609375, 0.074609375), (0.0196078431372549, 0.08953125, \n 0.08953125), (0.0235294117647059, 0.104453125, 0.104453125), (\n 0.0274509803921569, 0.119375, 0.119375), (0.0313725490196078, \n 0.134296875, 0.134296875), (0.0352941176470588, 0.14921875, \n 0.14921875), (0.0392156862745098, 0.164140625, 0.164140625), (\n 0.0431372549019608, 0.1790625, 0.1790625), (0.0470588235294118, \n 0.193984375, 0.193984375), (0.0509803921568627, 0.20890625, \n 0.20890625), (0.0549019607843137, 0.223828125, 0.223828125), (\n 0.0588235294117647, 0.23875, 0.23875), (0.0627450980392157, \n 0.253671875, 0.253671875), (0.0666666666666667, 0.26859375, \n 0.26859375), (0.0705882352941176, 0.283515625, 0.283515625), (\n 0.0745098039215686, 0.2984375, 0.2984375), (0.0784313725490196, \n 0.313359375, 0.313359375), (0.0823529411764706, 0.32828125, \n 0.32828125), (0.0862745098039216, 0.343203125, 0.343203125), (\n 0.0901960784313725, 0.358125, 0.358125), (0.0941176470588235, \n 0.373046875, 0.373046875), (0.0980392156862745, 0.38796875, \n 0.38796875), (0.101960784313725, 0.402890625, 0.402890625), (\n 0.105882352941176, 0.4178125, 0.4178125), (0.109803921568627, \n 0.432734375, 0.432734375), (0.113725490196078, 0.44765625, \n 0.44765625), (0.117647058823529, 0.462578125, 0.462578125), (\n 0.12156862745098, 0.4775, 0.4775), (0.125490196078431, 0.492421875,\n 0.492421875), (0.129411764705882, 0.50734375, 0.50734375), (\n 0.133333333333333, 0.522265625, 0.522265625), (0.137254901960784, \n 0.5371875, 0.5371875), (0.141176470588235, 0.552109375, 0.552109375\n ), (0.145098039215686, 0.56703125, 0.56703125), (0.149019607843137,\n 0.581953125, 0.581953125), (0.152941176470588, 0.596875, 0.596875),\n (0.156862745098039, 0.611796875, 0.611796875), (0.16078431372549, \n 0.62671875, 0.62671875), (0.164705882352941, 0.641640625, \n 0.641640625), (0.168627450980392, 0.6565625, 0.6565625), (\n 0.172549019607843, 0.671484375, 0.671484375), (0.176470588235294, \n 0.68640625, 0.68640625), (0.180392156862745, 0.701328125, \n 0.701328125), (0.184313725490196, 0.71625, 0.71625), (\n 0.188235294117647, 0.731171875, 0.731171875), (0.192156862745098, \n 0.74609375, 0.74609375), (0.196078431372549, 0.731171875, \n 0.731171875), (0.2, 0.71625, 0.71625), (0.203921568627451, \n 0.701328125, 0.701328125), (0.207843137254902, 0.68640625, \n 0.68640625), (0.211764705882353, 0.671484375, 0.671484375), (\n 0.215686274509804, 0.6565625, 0.6565625), (0.219607843137255, \n 0.641640625, 0.641640625), (0.223529411764706, 0.62671875, \n 0.62671875), (0.227450980392157, 0.611796875, 0.611796875), (\n 0.231372549019608, 0.596875, 0.596875), (0.235294117647059, \n 0.581953125, 0.581953125), (0.23921568627451, 0.56703125, \n 0.56703125), (0.243137254901961, 0.552109375, 0.552109375), (\n 0.247058823529412, 0.5371875, 0.5371875), (0.250980392156863, \n 0.522265625, 0.522265625), (0.254901960784314, 0.50734375, \n 0.50734375), (0.258823529411765, 0.492421875, 0.492421875), (\n 0.262745098039216, 0.4775, 0.4775), (0.266666666666667, 0.462578125,\n 0.462578125), (0.270588235294118, 0.44765625, 0.44765625), (\n 0.274509803921569, 0.432734375, 0.432734375), (0.27843137254902, \n 0.4178125, 0.4178125), (0.282352941176471, 0.402890625, 0.402890625\n ), (0.286274509803922, 0.38796875, 0.38796875), (0.290196078431373,\n 0.373046875, 0.373046875), (0.294117647058824, 0.358125, 0.358125),\n (0.298039215686275, 0.343203125, 0.343203125), (0.301960784313725, \n 0.32828125, 0.32828125), (0.305882352941176, 0.313359375, \n 0.313359375), (0.309803921568627, 0.2984375, 0.2984375), (\n 0.313725490196078, 0.283515625, 0.283515625), (0.317647058823529, \n 0.26859375, 0.26859375), (0.32156862745098, 0.253671875, \n 0.253671875), (0.325490196078431, 0.23875, 0.23875), (\n 0.329411764705882, 0.223828125, 0.223828125), (0.333333333333333, \n 0.20890625, 0.20890625), (0.337254901960784, 0.193984375, \n 0.193984375), (0.341176470588235, 0.1790625, 0.1790625), (\n 0.345098039215686, 0.164140625, 0.164140625), (0.349019607843137, \n 0.14921875, 0.14921875), (0.352941176470588, 0.134296875, \n 0.134296875), (0.356862745098039, 0.119375, 0.119375), (\n 0.36078431372549, 0.104453125, 0.104453125), (0.364705882352941, \n 0.08953125, 0.08953125), (0.368627450980392, 0.074609375, \n 0.074609375), (0.372549019607843, 0.0596875, 0.0596875), (\n 0.376470588235294, 0.044765625, 0.044765625), (0.380392156862745, \n 0.0298437890625, 0.0298437890625), (0.384313725490196, 0.014921875,\n 0.014921875), (0.388235294117647, 0, 0), (0.392156862745098, \n 0.012890625, 0.012890625), (0.396078431372549, 0.02578125, \n 0.02578125), (0.4, 0.038671875, 0.038671875), (0.403921568627451, \n 0.0515625, 0.0515625), (0.407843137254902, 0.064453125, 0.064453125\n ), (0.411764705882353, 0.07734375, 0.07734375), (0.415686274509804,\n 0.090234375, 0.090234375), (0.419607843137255, 0.103125, 0.103125),\n (0.423529411764706, 0.116015625, 0.116015625), (0.427450980392157, \n 0.12890625, 0.12890625), (0.431372549019608, 0.141796875, \n 0.141796875), (0.435294117647059, 0.1546875, 0.1546875), (\n 0.43921568627451, 0.167578125, 0.167578125), (0.443137254901961, \n 0.18046875, 0.18046875), (0.447058823529412, 0.193359375, \n 0.193359375), (0.450980392156863, 0.20625, 0.20625), (\n 0.454901960784314, 0.219140625, 0.219140625), (0.458823529411765, \n 0.23203125, 0.23203125), (0.462745098039216, 0.244921875, \n 0.244921875), (0.466666666666667, 0.2578125, 0.2578125), (\n 0.470588235294118, 0.270703125, 0.270703125), (0.474509803921569, \n 0.28359375, 0.28359375), (0.47843137254902, 0.296484375, \n 0.296484375), (0.482352941176471, 0.309375, 0.309375), (\n 0.486274509803922, 0.322265625, 0.322265625), (0.490196078431373, \n 0.33515625, 0.33515625), (0.494117647058824, 0.348046875, \n 0.348046875), (0.498039215686275, 0.3609375, 0.3609375), (\n 0.501960784313725, 0.373828125, 0.373828125), (0.505882352941176, \n 0.38671875, 0.38671875), (0.509803921568627, 0.399609375, \n 0.399609375), (0.513725490196078, 0.4125, 0.4125), (\n 0.517647058823529, 0.425390625, 0.425390625), (0.52156862745098, \n 0.43828125, 0.43828125), (0.525490196078431, 0.451171875, \n 0.451171875), (0.529411764705882, 0.4640625, 0.4640625), (\n 0.533333333333333, 0.476953125, 0.476953125), (0.537254901960784, \n 0.48984375, 0.48984375), (0.541176470588235, 0.502734375, \n 0.502734375), (0.545098039215686, 0.515625, 0.515625), (\n 0.549019607843137, 0.528515625, 0.528515625), (0.552941176470588, \n 0.54140625, 0.54140625), (0.556862745098039, 0.554296875, \n 0.554296875), (0.56078431372549, 0.5671875, 0.5671875), (\n 0.564705882352941, 0.580078125, 0.580078125), (0.568627450980392, \n 0.59296875, 0.59296875), (0.572549019607843, 0.605859375, \n 0.605859375), (0.576470588235294, 0.61875, 0.61875), (\n 0.580392156862745, 0.631640625, 0.631640625), (0.584313725490196, \n 0.64453125, 0.64453125), (0.588235294117647, 0.6359375, 0.6359375),\n (0.592156862745098, 0.62734375, 0.62734375), (0.596078431372549, \n 0.61875, 0.61875), (0.6, 0.61015625, 0.61015625), (\n 0.603921568627451, 0.6015625, 0.6015625), (0.607843137254902, \n 0.59296875, 0.59296875), (0.611764705882353, 0.584375, 0.584375), (\n 0.615686274509804, 0.57578125, 0.57578125), (0.619607843137255, \n 0.5671875, 0.5671875), (0.623529411764706, 0.55859375, 0.55859375),\n (0.627450980392157, 0.55, 0.55), (0.631372549019608, 0.54140625, \n 0.54140625), (0.635294117647059, 0.5328125, 0.5328125), (\n 0.63921568627451, 0.52421875, 0.52421875), (0.643137254901961, \n 0.515625, 0.515625), (0.647058823529412, 0.50703125, 0.50703125), (\n 0.650980392156863, 0.4984375, 0.4984375), (0.654901960784314, \n 0.48984375, 0.48984375), (0.658823529411765, 0.48125, 0.48125), (\n 0.662745098039216, 0.47265625, 0.47265625), (0.666666666666667, \n 0.4640625, 0.4640625), (0.670588235294118, 0.45546875, 0.45546875),\n (0.674509803921569, 0.446875, 0.446875), (0.67843137254902, \n 0.43828125, 0.43828125), (0.682352941176471, 0.4296875, 0.4296875),\n (0.686274509803922, 0.42109375, 0.42109375), (0.690196078431373, \n 0.4125, 0.4125), (0.694117647058824, 0.40390625, 0.40390625), (\n 0.698039215686274, 0.3953125, 0.3953125), (0.701960784313725, \n 0.38671875, 0.38671875), (0.705882352941177, 0.378125, 0.378125), (\n 0.709803921568627, 0.36953125, 0.36953125), (0.713725490196078, \n 0.3609375, 0.3609375), (0.717647058823529, 0.35234375, 0.35234375),\n (0.72156862745098, 0.34375, 0.34375), (0.725490196078431, \n 0.33515625, 0.33515625), (0.729411764705882, 0.3265625, 0.3265625),\n (0.733333333333333, 0.31796875, 0.31796875), (0.737254901960784, \n 0.309375, 0.309375), (0.741176470588235, 0.30078125, 0.30078125), (\n 0.745098039215686, 0.2921875, 0.2921875), (0.749019607843137, \n 0.28359375, 0.28359375), (0.752941176470588, 0.275, 0.275), (\n 0.756862745098039, 0.26640625, 0.26640625), (0.76078431372549, \n 0.2578125, 0.2578125), (0.764705882352941, 0.24921875, 0.24921875),\n (0.768627450980392, 0.240625, 0.240625), (0.772549019607843, \n 0.23203125, 0.23203125), (0.776470588235294, 0.2234375, 0.2234375),\n (0.780392156862745, 0.21484375, 0.21484375), (0.784313725490196, \n 0.222301171875, 0.222301171875), (0.788235294117647, 0.22975859375,\n 0.22975859375), (0.792156862745098, 0.237216015625, 0.237216015625),\n (0.796078431372549, 0.2446734375, 0.2446734375), (0.8, \n 0.252130859375, 0.252130859375), (0.803921568627451, 0.259587890625,\n 0.259587890625), (0.807843137254902, 0.2670453125, 0.2670453125), (\n 0.811764705882353, 0.274502734375, 0.274502734375), (\n 0.815686274509804, 0.28196015625, 0.28196015625), (\n 0.819607843137255, 0.289417578125, 0.289417578125), (\n 0.823529411764706, 0.296875, 0.296875), (0.827450980392157, \n 0.304332421875, 0.304332421875), (0.831372549019608, 0.31178984375,\n 0.31178984375), (0.835294117647059, 0.319247265625, 0.319247265625),\n (0.83921568627451, 0.3267046875, 0.3267046875), (0.843137254901961,\n 0.334162109375, 0.334162109375), (0.847058823529412, 0.34161953125,\n 0.34161953125), (0.850980392156863, 0.3490765625, 0.3490765625), (\n 0.854901960784314, 0.356533984375, 0.356533984375), (\n 0.858823529411765, 0.36399140625, 0.36399140625), (\n 0.862745098039216, 0.371448828125, 0.371448828125), (\n 0.866666666666667, 0.37890625, 0.37890625), (0.870588235294118, \n 0.386363671875, 0.386363671875), (0.874509803921569, 0.3938203125, \n 0.3938203125), (0.87843137254902, 0.40127734375, 0.40127734375), (\n 0.882352941176471, 0.408734375, 0.408734375), (0.886274509803922, \n 0.41619140625, 0.41619140625), (0.890196078431373, 0.42365234375, \n 0.42365234375), (0.894117647058824, 0.431109375, 0.431109375), (\n 0.898039215686275, 0.43856640625, 0.43856640625), (\n 0.901960784313726, 0.4460234375, 0.4460234375), (0.905882352941176,\n 0.45348046875, 0.45348046875), (0.909803921568627, 0.4609375, \n 0.4609375), (0.913725490196078, 0.46839453125, 0.46839453125), (\n 0.917647058823529, 0.4758515625, 0.4758515625), (0.92156862745098, \n 0.48330859375, 0.48330859375), (0.925490196078431, 0.490765625, \n 0.490765625), (0.929411764705882, 0.49822265625, 0.49822265625), (\n 0.933333333333333, 0.50568359375, 0.50568359375), (\n 0.937254901960784, 0.513140625, 0.513140625), (0.941176470588235, \n 0.52059765625, 0.52059765625), (0.945098039215686, 0.5280546875, \n 0.5280546875), (0.949019607843137, 0.53551171875, 0.53551171875), (\n 0.952941176470588, 0.54296875, 0.54296875), (0.956862745098039, \n 0.55042578125, 0.55042578125), (0.96078431372549, 0.5578828125, \n 0.5578828125), (0.964705882352941, 0.56533984375, 0.56533984375), (\n 0.968627450980392, 0.572796875, 0.572796875), (0.972549019607843, \n 0.58025390625, 0.58025390625), (0.976470588235294, 0.58771484375, \n 0.58771484375), (0.980392156862745, 0.595171875, 0.595171875), (\n 0.984313725490196, 0.60262890625, 0.60262890625), (\n 0.988235294117647, 0.6100859375, 0.6100859375), (0.992156862745098,\n 0.61754296875, 0.61754296875), (0.996078431372549, 0.625, 0.625), (\n 1.0, 0.625, 0.625)), 'blue': ((0.0, 0.51984375, 0.51984375), (\n 0.00392156862745098, 0.51984375, 0.51984375), (0.00784313725490196,\n 0.529765625, 0.529765625), (0.0117647058823529, 0.5396875, \n 0.5396875), (0.0156862745098039, 0.549609375, 0.549609375), (\n 0.0196078431372549, 0.55953125, 0.55953125), (0.0235294117647059, \n 0.569453125, 0.569453125), (0.0274509803921569, 0.579375, 0.579375),\n (0.0313725490196078, 0.589296875, 0.589296875), (0.0352941176470588,\n 0.59921875, 0.59921875), (0.0392156862745098, 0.609140625, \n 0.609140625), (0.0431372549019608, 0.6190625, 0.6190625), (\n 0.0470588235294118, 0.628984375, 0.628984375), (0.0509803921568627,\n 0.63890625, 0.63890625), (0.0549019607843137, 0.648828125, \n 0.648828125), (0.0588235294117647, 0.65875, 0.65875), (\n 0.0627450980392157, 0.668671875, 0.668671875), (0.0666666666666667,\n 0.67859375, 0.67859375), (0.0705882352941176, 0.688515625, \n 0.688515625), (0.0745098039215686, 0.6984375, 0.6984375), (\n 0.0784313725490196, 0.708359375, 0.708359375), (0.0823529411764706,\n 0.71828125, 0.71828125), (0.0862745098039216, 0.728203125, \n 0.728203125), (0.0901960784313725, 0.738125, 0.738125), (\n 0.0941176470588235, 0.748046875, 0.748046875), (0.0980392156862745,\n 0.75796875, 0.75796875), (0.101960784313725, 0.767890625, \n 0.767890625), (0.105882352941176, 0.7778125, 0.7778125), (\n 0.109803921568627, 0.787734375, 0.787734375), (0.113725490196078, \n 0.79765625, 0.79765625), (0.117647058823529, 0.807578125, \n 0.807578125), (0.12156862745098, 0.8175, 0.8175), (\n 0.125490196078431, 0.827421875, 0.827421875), (0.129411764705882, \n 0.83734375, 0.83734375), (0.133333333333333, 0.847265625, \n 0.847265625), (0.137254901960784, 0.8571875, 0.8571875), (\n 0.141176470588235, 0.867109375, 0.867109375), (0.145098039215686, \n 0.87703125, 0.87703125), (0.149019607843137, 0.886953125, \n 0.886953125), (0.152941176470588, 0.896875, 0.896875), (\n 0.156862745098039, 0.906796875, 0.906796875), (0.16078431372549, \n 0.91671875, 0.91671875), (0.164705882352941, 0.926640625, \n 0.926640625), (0.168627450980392, 0.9365625, 0.9365625), (\n 0.172549019607843, 0.946484375, 0.946484375), (0.176470588235294, \n 0.95640625, 0.95640625), (0.180392156862745, 0.966328125, \n 0.966328125), (0.184313725490196, 0.97625, 0.97625), (\n 0.188235294117647, 0.986171875, 0.986171875), (0.192156862745098, \n 0.99609375, 0.99609375), (0.196078431372549, 0.976171875, \n 0.976171875), (0.2, 0.95625, 0.95625), (0.203921568627451, \n 0.936328125, 0.936328125), (0.207843137254902, 0.91640625, \n 0.91640625), (0.211764705882353, 0.896484375, 0.896484375), (\n 0.215686274509804, 0.8765625, 0.8765625), (0.219607843137255, \n 0.856640625, 0.856640625), (0.223529411764706, 0.83671875, \n 0.83671875), (0.227450980392157, 0.816796875, 0.816796875), (\n 0.231372549019608, 0.796875, 0.796875), (0.235294117647059, \n 0.776953125, 0.776953125), (0.23921568627451, 0.75703125, \n 0.75703125), (0.243137254901961, 0.737109375, 0.737109375), (\n 0.247058823529412, 0.7171875, 0.7171875), (0.250980392156863, \n 0.697265625, 0.697265625), (0.254901960784314, 0.67734375, \n 0.67734375), (0.258823529411765, 0.657421875, 0.657421875), (\n 0.262745098039216, 0.6375, 0.6375), (0.266666666666667, 0.617578125,\n 0.617578125), (0.270588235294118, 0.59765625, 0.59765625), (\n 0.274509803921569, 0.577734375, 0.577734375), (0.27843137254902, \n 0.5578125, 0.5578125), (0.282352941176471, 0.537890625, 0.537890625\n ), (0.286274509803922, 0.51796875, 0.51796875), (0.290196078431373,\n 0.498046875, 0.498046875), (0.294117647058824, 0.478125, 0.478125),\n (0.298039215686275, 0.458203125, 0.458203125), (0.301960784313725, \n 0.43828125, 0.43828125), (0.305882352941176, 0.418359375, \n 0.418359375), (0.309803921568627, 0.3984375, 0.3984375), (\n 0.313725490196078, 0.378515625, 0.378515625), (0.317647058823529, \n 0.35859375, 0.35859375), (0.32156862745098, 0.338671875, \n 0.338671875), (0.325490196078431, 0.31875, 0.31875), (\n 0.329411764705882, 0.298828125, 0.298828125), (0.333333333333333, \n 0.27890625, 0.27890625), (0.337254901960784, 0.258984375, \n 0.258984375), (0.341176470588235, 0.2390625, 0.2390625), (\n 0.345098039215686, 0.219140625, 0.219140625), (0.349019607843137, \n 0.19921875, 0.19921875), (0.352941176470588, 0.179296875, \n 0.179296875), (0.356862745098039, 0.159375, 0.159375), (\n 0.36078431372549, 0.139453125, 0.139453125), (0.364705882352941, \n 0.11953125, 0.11953125), (0.368627450980392, 0.099609375, \n 0.099609375), (0.372549019607843, 0.0796875, 0.0796875), (\n 0.376470588235294, 0.059765625, 0.059765625), (0.380392156862745, \n 0.03984375, 0.03984375), (0.384313725490196, 0.019921875, \n 0.019921875), (0.388235294117647, 0, 0), (0.392156862745098, 0, 0),\n (0.396078431372549, 0, 0), (0.4, 0, 0), (0.403921568627451, 0, 0),\n (0.407843137254902, 0, 0), (0.411764705882353, 0, 0), (\n 0.415686274509804, 0, 0), (0.419607843137255, 0, 0), (\n 0.423529411764706, 0, 0), (0.427450980392157, 0, 0), (\n 0.431372549019608, 0, 0), (0.435294117647059, 0, 0), (\n 0.43921568627451, 0, 0), (0.443137254901961, 0, 0), (\n 0.447058823529412, 0, 0), (0.450980392156863, 0, 0), (\n 0.454901960784314, 0, 0), (0.458823529411765, 0, 0), (\n 0.462745098039216, 0, 0), (0.466666666666667, 0, 0), (\n 0.470588235294118, 0, 0), (0.474509803921569, 0, 0), (\n 0.47843137254902, 0, 0), (0.482352941176471, 0, 0), (\n 0.486274509803922, 0, 0), (0.490196078431373, 0, 0), (\n 0.494117647058824, 0, 0), (0.498039215686275, 0, 0), (\n 0.501960784313725, 0, 0), (0.505882352941176, 0, 0), (\n 0.509803921568627, 0, 0), (0.513725490196078, 0, 0), (\n 0.517647058823529, 0, 0), (0.52156862745098, 0, 0), (\n 0.525490196078431, 0, 0), (0.529411764705882, 0, 0), (\n 0.533333333333333, 0, 0), (0.537254901960784, 0, 0), (\n 0.541176470588235, 0, 0), (0.545098039215686, 0, 0), (\n 0.549019607843137, 0, 0), (0.552941176470588, 0, 0), (\n 0.556862745098039, 0, 0), (0.56078431372549, 0, 0), (\n 0.564705882352941, 0, 0), (0.568627450980392, 0, 0), (\n 0.572549019607843, 0, 0), (0.576470588235294, 0, 0), (\n 0.580392156862745, 0, 0), (0.584313725490196, 0, 0), (\n 0.588235294117647, 0.004296875, 0.004296875), (0.592156862745098, \n 0.00859375, 0.00859375), (0.596078431372549, 0.012890625, \n 0.012890625), (0.6, 0.0171875, 0.0171875), (0.603921568627451, \n 0.021484375, 0.021484375), (0.607843137254902, 0.02578125, \n 0.02578125), (0.611764705882353, 0.030078125, 0.030078125), (\n 0.615686274509804, 0.034375, 0.034375), (0.619607843137255, \n 0.038671875, 0.038671875), (0.623529411764706, 0.04296875, \n 0.04296875), (0.627450980392157, 0.047265625, 0.047265625), (\n 0.631372549019608, 0.0515625, 0.0515625), (0.635294117647059, \n 0.055859375, 0.055859375), (0.63921568627451, 0.06015625, \n 0.06015625), (0.643137254901961, 0.064453125, 0.064453125), (\n 0.647058823529412, 0.06875, 0.06875), (0.650980392156863, \n 0.073046875, 0.073046875), (0.654901960784314, 0.07734375, \n 0.07734375), (0.658823529411765, 0.081640625, 0.081640625), (\n 0.662745098039216, 0.0859375, 0.0859375), (0.666666666666667, \n 0.090234375, 0.090234375), (0.670588235294118, 0.09453125, \n 0.09453125), (0.674509803921569, 0.098828125, 0.098828125), (\n 0.67843137254902, 0.103125, 0.103125), (0.682352941176471, \n 0.107421875, 0.107421875), (0.686274509803922, 0.11171875, \n 0.11171875), (0.690196078431373, 0.116015625, 0.116015625), (\n 0.694117647058824, 0.1203125, 0.1203125), (0.698039215686274, \n 0.124609375, 0.124609375), (0.701960784313725, 0.12890625, \n 0.12890625), (0.705882352941177, 0.133203125, 0.133203125), (\n 0.709803921568627, 0.1375, 0.1375), (0.713725490196078, 0.141796875,\n 0.141796875), (0.717647058823529, 0.14609375, 0.14609375), (\n 0.72156862745098, 0.150390625, 0.150390625), (0.725490196078431, \n 0.1546875, 0.1546875), (0.729411764705882, 0.158984375, 0.158984375\n ), (0.733333333333333, 0.16328125, 0.16328125), (0.737254901960784,\n 0.167578125, 0.167578125), (0.741176470588235, 0.171875, 0.171875),\n (0.745098039215686, 0.176171875, 0.176171875), (0.749019607843137, \n 0.18046875, 0.18046875), (0.752941176470588, 0.184765625, \n 0.184765625), (0.756862745098039, 0.1890625, 0.1890625), (\n 0.76078431372549, 0.193359375, 0.193359375), (0.764705882352941, \n 0.19765625, 0.19765625), (0.768627450980392, 0.201953125, \n 0.201953125), (0.772549019607843, 0.20625, 0.20625), (\n 0.776470588235294, 0.210546875, 0.210546875), (0.780392156862745, \n 0.21484375, 0.21484375), (0.784313725490196, 0.22663359375, \n 0.22663359375), (0.788235294117647, 0.2384234375, 0.2384234375), (\n 0.792156862745098, 0.250212890625, 0.250212890625), (\n 0.796078431372549, 0.262002734375, 0.262002734375), (0.8, \n 0.273792578125, 0.273792578125), (0.803921568627451, 0.285582421875,\n 0.285582421875), (0.807843137254902, 0.297372265625, 0.297372265625\n ), (0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125))}\n vcdict = {'red': ((0, 1, 1), (0.00392156862745098, 0.54508984375, \n 0.54508984375), (0.00784313725490196, 0.5285703125, 0.5285703125),\n (0.0117647058823529, 0.5120546875, 0.5120546875), (\n 0.0156862745098039, 0.49553515625, 0.49553515625), (\n 0.0196078431372549, 0.47901953125, 0.47901953125), (\n 0.0235294117647059, 0.4625, 0.4625), (0.0274509803921569, \n 0.44598046875, 0.44598046875), (0.0313725490196078, 0.42946484375, \n 0.42946484375), (0.0352941176470588, 0.4129453125, 0.4129453125), (\n 0.0392156862745098, 0.3964296875, 0.3964296875), (\n 0.0431372549019608, 0.379910546875, 0.379910546875), (\n 0.0470588235294118, 0.36339296875, 0.36339296875), (\n 0.0509803921568627, 0.346875, 0.346875), (0.0549019607843137, \n 0.33035703125, 0.33035703125), (0.0588235294117647, 0.313839453125,\n 0.313839453125), (0.0627450980392157, 0.297321484375, \n 0.297321484375), (0.0666666666666667, 0.280803515625, \n 0.280803515625), (0.0705882352941176, 0.2642859375, 0.2642859375),\n (0.0745098039215686, 0.24776796875, 0.24776796875), (\n 0.0784313725490196, 0.23125, 0.23125), (0.0823529411764706, \n 0.21473203125, 0.21473203125), (0.0862745098039216, 0.198214453125,\n 0.198214453125), (0.0901960784313725, 0.181696484375, \n 0.181696484375), (0.0941176470588235, 0.165178515625, \n 0.165178515625), (0.0980392156862745, 0.148660546875, \n 0.148660546875), (0.101960784313725, 0.13214296875, 0.13214296875),\n (0.105882352941176, 0.115625, 0.115625), (0.109803921568627, \n 0.09910703125, 0.09910703125), (0.113725490196078, 0.082589453125, \n 0.082589453125), (0.117647058823529, 0.066071484375, 0.066071484375\n ), (0.12156862745098, 0.049553515625, 0.049553515625), (\n 0.125490196078431, 0.0330357421875, 0.0330357421875), (\n 0.129411764705882, 0.016517890625, 0.016517890625), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0, 0), (0.2, 0, 0), (0.203921568627451, 0, 0), (\n 0.207843137254902, 0, 0), (0.211764705882353, 0, 0), (\n 0.215686274509804, 0, 0), (0.219607843137255, 0, 0), (\n 0.223529411764706, 0, 0), (0.227450980392157, 0, 0), (\n 0.231372549019608, 0, 0), (0.235294117647059, 0, 0), (\n 0.23921568627451, 0, 0), (0.243137254901961, 0, 0), (\n 0.247058823529412, 0, 0), (0.250980392156863, 0, 0), (\n 0.254901960784314, 0, 0), (0.258823529411765, 0, 0), (\n 0.262745098039216, 0, 0), (0.266666666666667, 0, 0), (\n 0.270588235294118, 0, 0), (0.274509803921569, 0, 0), (\n 0.27843137254902, 0, 0), (0.282352941176471, 0, 0), (\n 0.286274509803922, 0, 0), (0.290196078431373, 0, 0), (\n 0.294117647058824, 0, 0), (0.298039215686275, 0, 0), (\n 0.301960784313725, 0, 0), (0.305882352941176, 0, 0), (\n 0.309803921568627, 0, 0), (0.313725490196078, 0, 0), (\n 0.317647058823529, 0, 0), (0.32156862745098, 0, 0), (\n 0.325490196078431, 0, 0), (0.329411764705882, 0, 0), (\n 0.333333333333333, 0, 0), (0.337254901960784, 0, 0), (\n 0.341176470588235, 0, 0), (0.345098039215686, 0, 0), (\n 0.349019607843137, 0, 0), (0.352941176470588, 0.0061383984375, \n 0.0061383984375), (0.356862745098039, 0.012276796875, \n 0.012276796875), (0.36078431372549, 0.0184151953125, \n 0.0184151953125), (0.364705882352941, 0.0245535546875, \n 0.0245535546875), (0.368627450980392, 0.030691953125, \n 0.030691953125), (0.372549019607843, 0.0368303515625, \n 0.0368303515625), (0.376470588235294, 0.04296875, 0.04296875), (\n 0.380392156862745, 0.04910703125, 0.04910703125), (\n 0.384313725490196, 0.055245703125, 0.055245703125), (\n 0.388235294117647, 0.061383984375, 0.061383984375), (\n 0.392156862745098, 0.067522265625, 0.067522265625), (\n 0.396078431372549, 0.073660546875, 0.073660546875), (0.4, \n 0.07979921875, 0.07979921875), (0.403921568627451, 0.0859375, \n 0.0859375), (0.407843137254902, 0.09207578125, 0.09207578125), (\n 0.411764705882353, 0.098214453125, 0.098214453125), (\n 0.415686274509804, 0.104352734375, 0.104352734375), (\n 0.419607843137255, 0.110491015625, 0.110491015625), (\n 0.423529411764706, 0.116629296875, 0.116629296875), (\n 0.427450980392157, 0.12276796875, 0.12276796875), (\n 0.431372549019608, 0.12890625, 0.12890625), (0.435294117647059, \n 0.13504453125, 0.13504453125), (0.43921568627451, 0.141183203125, \n 0.141183203125), (0.443137254901961, 0.147321484375, 0.147321484375\n ), (0.447058823529412, 0.153459765625, 0.153459765625), (\n 0.450980392156863, 0.159598046875, 0.159598046875), (\n 0.454901960784314, 0.16573671875, 0.16573671875), (\n 0.458823529411765, 0.171875, 0.171875), (0.462745098039216, \n 0.17801328125, 0.17801328125), (0.466666666666667, 0.184151953125, \n 0.184151953125), (0.470588235294118, 0.190290234375, 0.190290234375\n ), (0.474509803921569, 0.196428515625, 0.196428515625), (\n 0.47843137254902, 0.202566796875, 0.202566796875), (\n 0.482352941176471, 0.20870546875, 0.20870546875), (\n 0.486274509803922, 0.21484375, 0.21484375), (0.490196078431373, \n 0.233370703125, 0.233370703125), (0.494117647058824, 0.251897265625,\n 0.251897265625), (0.498039215686275, 0.27042421875, 0.27042421875),\n (0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.86549609375, 0.86549609375), (0.631372549019608, 0.86770703125, \n 0.86770703125), (0.635294117647059, 0.869921875, 0.869921875), (\n 0.63921568627451, 0.87213671875, 0.87213671875), (0.643137254901961,\n 0.87434765625, 0.87434765625), (0.647058823529412, 0.8765625, \n 0.8765625), (0.650980392156863, 0.87877734375, 0.87877734375), (\n 0.654901960784314, 0.88098828125, 0.88098828125), (\n 0.658823529411765, 0.883203125, 0.883203125), (0.662745098039216, \n 0.88541796875, 0.88541796875), (0.666666666666667, 0.88762890625, \n 0.88762890625), (0.670588235294118, 0.88984375, 0.88984375), (\n 0.674509803921569, 0.89205859375, 0.89205859375), (0.67843137254902,\n 0.89426953125, 0.89426953125), (0.682352941176471, 0.896484375, \n 0.896484375), (0.686274509803922, 0.89869921875, 0.89869921875), (\n 0.690196078431373, 0.90091015625, 0.90091015625), (\n 0.694117647058824, 0.903125, 0.903125), (0.698039215686274, \n 0.90533984375, 0.90533984375), (0.701960784313725, 0.90755078125, \n 0.90755078125), (0.705882352941177, 0.909765625, 0.909765625), (\n 0.709803921568627, 0.91198046875, 0.91198046875), (\n 0.713725490196078, 0.91419140625, 0.91419140625), (\n 0.717647058823529, 0.91640625, 0.91640625), (0.72156862745098, \n 0.91862109375, 0.91862109375), (0.725490196078431, 0.92083203125, \n 0.92083203125), (0.729411764705882, 0.923046875, 0.923046875), (\n 0.733333333333333, 0.92526171875, 0.92526171875), (\n 0.737254901960784, 0.92747265625, 0.92747265625), (\n 0.741176470588235, 0.9296875, 0.9296875), (0.745098039215686, \n 0.93190234375, 0.93190234375), (0.749019607843137, 0.93411328125, \n 0.93411328125), (0.752941176470588, 0.936328125, 0.936328125), (\n 0.756862745098039, 0.93854296875, 0.93854296875), (0.76078431372549,\n 0.94075390625, 0.94075390625), (0.764705882352941, 0.94296875, \n 0.94296875), (0.768627450980392, 0.94518359375, 0.94518359375), (\n 0.772549019607843, 0.94739453125, 0.94739453125), (\n 0.776470588235294, 0.949609375, 0.949609375), (0.780392156862745, \n 0.95182421875, 0.95182421875), (0.784313725490196, 0.95403515625, \n 0.95403515625), (0.788235294117647, 0.95625, 0.95625), (\n 0.792156862745098, 0.95846484375, 0.95846484375), (\n 0.796078431372549, 0.96067578125, 0.96067578125), (0.8, 0.962890625,\n 0.962890625), (0.803921568627451, 0.96510546875, 0.96510546875), (\n 0.807843137254902, 0.96731640625, 0.96731640625), (\n 0.811764705882353, 0.96953125, 0.96953125), (0.815686274509804, \n 0.97174609375, 0.97174609375), (0.819607843137255, 0.97395703125, \n 0.97395703125), (0.823529411764706, 0.976171875, 0.976171875), (\n 0.827450980392157, 0.97838671875, 0.97838671875), (\n 0.831372549019608, 0.98059765625, 0.98059765625), (\n 0.835294117647059, 0.9828125, 0.9828125), (0.83921568627451, \n 0.98502734375, 0.98502734375), (0.843137254901961, 0.98723828125, \n 0.98723828125), (0.847058823529412, 0.989453125, 0.989453125), (\n 0.850980392156863, 0.99166796875, 0.99166796875), (\n 0.854901960784314, 0.99387890625, 0.99387890625), (\n 0.858823529411765, 0.99609375, 0.99609375), (0.862745098039216, \n 0.99609375, 0.99609375), (0.866666666666667, 0.99609375, 0.99609375\n ), (0.870588235294118, 0.99609375, 0.99609375), (0.874509803921569,\n 0.99609375, 0.99609375), (0.87843137254902, 0.99609375, 0.99609375),\n (0.882352941176471, 0.99609375, 0.99609375), (0.886274509803922, \n 0.99609375, 0.99609375), (0.890196078431373, 0.99609375, 0.99609375\n ), (0.894117647058824, 0.99609375, 0.99609375), (0.898039215686275,\n 0.99609375, 0.99609375), (0.901960784313726, 0.99609375, 0.99609375\n ), (0.905882352941176, 0.99609375, 0.99609375), (0.909803921568627,\n 0.99609375, 0.99609375), (0.913725490196078, 0.99609375, 0.99609375\n ), (0.917647058823529, 0.99609375, 0.99609375), (0.92156862745098, \n 0.99609375, 0.99609375), (0.925490196078431, 0.99609375, 0.99609375\n ), (0.929411764705882, 0.99609375, 0.99609375), (0.933333333333333,\n 0.99609375, 0.99609375), (0.937254901960784, 0.99609375, 0.99609375\n ), (0.941176470588235, 0.99609375, 0.99609375), (0.945098039215686,\n 0.99609375, 0.99609375), (0.949019607843137, 0.99609375, 0.99609375\n ), (0.952941176470588, 0.99609375, 0.99609375), (0.956862745098039,\n 0.99609375, 0.99609375), (0.96078431372549, 0.99609375, 0.99609375),\n (0.964705882352941, 0.99609375, 0.99609375), (0.968627450980392, \n 0.99609375, 0.99609375), (0.972549019607843, 0.99609375, 0.99609375\n ), (0.976470588235294, 0.99609375, 0.99609375), (0.980392156862745,\n 0.99609375, 0.99609375), (0.984313725490196, 0.99609375, 0.99609375\n ), (0.988235294117647, 0.99609375, 0.99609375), (0.992156862745098,\n 0.99609375, 0.99609375), (0.996078431372549, 0.99609375, 0.99609375\n ), (1, 0.99609375, 0.99609375)), 'green': ((0, 1, 1), (\n 0.00392156862745098, 0, 0), (0.00784313725490196, 0, 0), (\n 0.0117647058823529, 0, 0), (0.0156862745098039, 0, 0), (\n 0.0196078431372549, 0, 0), (0.0235294117647059, 0, 0), (\n 0.0274509803921569, 0, 0), (0.0313725490196078, 0, 0), (\n 0.0352941176470588, 0, 0), (0.0392156862745098, 0, 0), (\n 0.0431372549019608, 0, 0), (0.0470588235294118, 0, 0), (\n 0.0509803921568627, 0, 0), (0.0549019607843137, 0, 0), (\n 0.0588235294117647, 0, 0), (0.0627450980392157, 0, 0), (\n 0.0666666666666667, 0, 0), (0.0705882352941176, 0, 0), (\n 0.0745098039215686, 0, 0), (0.0784313725490196, 0, 0), (\n 0.0823529411764706, 0, 0), (0.0862745098039216, 0, 0), (\n 0.0901960784313725, 0, 0), (0.0941176470588235, 0, 0), (\n 0.0980392156862745, 0, 0), (0.101960784313725, 0, 0), (\n 0.105882352941176, 0, 0), (0.109803921568627, 0, 0), (\n 0.113725490196078, 0, 0), (0.117647058823529, 0, 0), (\n 0.12156862745098, 0, 0), (0.125490196078431, 0, 0), (\n 0.129411764705882, 0, 0), (0.133333333333333, 0, 0), (\n 0.137254901960784, 0.0135653515625, 0.0135653515625), (\n 0.141176470588235, 0.0271306640625, 0.0271306640625), (\n 0.145098039215686, 0.04069609375, 0.04069609375), (\n 0.149019607843137, 0.054261328125, 0.054261328125), (\n 0.152941176470588, 0.0678265625, 0.0678265625), (0.156862745098039,\n 0.0813921875, 0.0813921875), (0.16078431372549, 0.094957421875, \n 0.094957421875), (0.164705882352941, 0.10852265625, 0.10852265625),\n (0.168627450980392, 0.122087890625, 0.122087890625), (\n 0.172549019607843, 0.135653515625, 0.135653515625), (\n 0.176470588235294, 0.14921875, 0.14921875), (0.180392156862745, \n 0.162783984375, 0.162783984375), (0.184313725490196, 0.176349609375,\n 0.176349609375), (0.188235294117647, 0.18991484375, 0.18991484375),\n (0.192156862745098, 0.203480078125, 0.203480078125), (\n 0.196078431372549, 0.2170453125, 0.2170453125), (0.2, 0.2306109375,\n 0.2306109375), (0.203921568627451, 0.244176171875, 0.244176171875),\n (0.207843137254902, 0.25774140625, 0.25774140625), (\n 0.211764705882353, 0.27130703125, 0.27130703125), (\n 0.215686274509804, 0.284872265625, 0.284872265625), (\n 0.219607843137255, 0.2984375, 0.2984375), (0.223529411764706, \n 0.312002734375, 0.312002734375), (0.227450980392157, 0.325568359375,\n 0.325568359375), (0.231372549019608, 0.33913359375, 0.33913359375),\n (0.235294117647059, 0.352698828125, 0.352698828125), (\n 0.23921568627451, 0.3662640625, 0.3662640625), (0.243137254901961, \n 0.3798296875, 0.3798296875), (0.247058823529412, 0.39339453125, \n 0.39339453125), (0.250980392156863, 0.4069609375, 0.4069609375), (\n 0.254901960784314, 0.42052734375, 0.42052734375), (\n 0.258823529411765, 0.43408984375, 0.43408984375), (\n 0.262745098039216, 0.44765625, 0.44765625), (0.266666666666667, \n 0.46122265625, 0.46122265625), (0.270588235294118, 0.47478515625, \n 0.47478515625), (0.274509803921569, 0.4883515625, 0.4883515625), (\n 0.27843137254902, 0.50191796875, 0.50191796875), (0.282352941176471,\n 0.515484375, 0.515484375), (0.286274509803922, 0.529046875, \n 0.529046875), (0.290196078431373, 0.54261328125, 0.54261328125), (\n 0.294117647058824, 0.5561796875, 0.5561796875), (0.298039215686275,\n 0.56974609375, 0.56974609375), (0.301960784313725, 0.58330859375, \n 0.58330859375), (0.305882352941176, 0.596875, 0.596875), (\n 0.309803921568627, 0.61044140625, 0.61044140625), (\n 0.313725490196078, 0.62400390625, 0.62400390625), (\n 0.317647058823529, 0.6375703125, 0.6375703125), (0.32156862745098, \n 0.65113671875, 0.65113671875), (0.325490196078431, 0.664703125, \n 0.664703125), (0.329411764705882, 0.678265625, 0.678265625), (\n 0.333333333333333, 0.69183203125, 0.69183203125), (\n 0.337254901960784, 0.7053984375, 0.7053984375), (0.341176470588235,\n 0.71896484375, 0.71896484375), (0.345098039215686, 0.73252734375, \n 0.73252734375), (0.349019607843137, 0.74609375, 0.74609375), (\n 0.352941176470588, 0.7309140625, 0.7309140625), (0.356862745098039,\n 0.71573828125, 0.71573828125), (0.36078431372549, 0.70055859375, \n 0.70055859375), (0.364705882352941, 0.68537890625, 0.68537890625),\n (0.368627450980392, 0.67019921875, 0.67019921875), (\n 0.372549019607843, 0.6550234375, 0.6550234375), (0.376470588235294,\n 0.63984375, 0.63984375), (0.380392156862745, 0.6246640625, \n 0.6246640625), (0.384313725490196, 0.60948828125, 0.60948828125), (\n 0.388235294117647, 0.59430859375, 0.59430859375), (\n 0.392156862745098, 0.57912890625, 0.57912890625), (\n 0.396078431372549, 0.56394921875, 0.56394921875), (0.4, \n 0.5487734375, 0.5487734375), (0.403921568627451, 0.53359375, \n 0.53359375), (0.407843137254902, 0.5184140625, 0.5184140625), (\n 0.411764705882353, 0.50323828125, 0.50323828125), (\n 0.415686274509804, 0.48805859375, 0.48805859375), (\n 0.419607843137255, 0.47287890625, 0.47287890625), (\n 0.423529411764706, 0.45769921875, 0.45769921875), (\n 0.427450980392157, 0.4425234375, 0.4425234375), (0.431372549019608,\n 0.42734375, 0.42734375), (0.435294117647059, 0.4121640625, \n 0.4121640625), (0.43921568627451, 0.39698828125, 0.39698828125), (\n 0.443137254901961, 0.381808203125, 0.381808203125), (\n 0.447058823529412, 0.366629296875, 0.366629296875), (\n 0.450980392156863, 0.35145078125, 0.35145078125), (\n 0.454901960784314, 0.336272265625, 0.336272265625), (\n 0.458823529411765, 0.32109375, 0.32109375), (0.462745098039216, \n 0.305915234375, 0.305915234375), (0.466666666666667, 0.29073671875,\n 0.29073671875), (0.470588235294118, 0.2755578125, 0.2755578125), (\n 0.474509803921569, 0.260379296875, 0.260379296875), (\n 0.47843137254902, 0.24520078125, 0.24520078125), (0.482352941176471,\n 0.230022265625, 0.230022265625), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.2265625, 0.2265625), (\n 0.494117647058824, 0.23828125, 0.23828125), (0.498039215686275, \n 0.25, 0.25), (0.501960784313725, 0.26171875, 0.26171875), (\n 0.505882352941176, 0.2734375, 0.2734375), (0.509803921568627, \n 0.28515625, 0.28515625), (0.513725490196078, 0.296875, 0.296875), (\n 0.517647058823529, 0.30859375, 0.30859375), (0.52156862745098, \n 0.3203125, 0.3203125), (0.525490196078431, 0.33203125, 0.33203125),\n (0.529411764705882, 0.34375, 0.34375), (0.533333333333333, \n 0.35546875, 0.35546875), (0.537254901960784, 0.3671875, 0.3671875),\n (0.541176470588235, 0.37890625, 0.37890625), (0.545098039215686, \n 0.390625, 0.390625), (0.549019607843137, 0.40234375, 0.40234375), (\n 0.552941176470588, 0.4140625, 0.4140625), (0.556862745098039, \n 0.42578125, 0.42578125), (0.56078431372549, 0.4375, 0.4375), (\n 0.564705882352941, 0.44921875, 0.44921875), (0.568627450980392, \n 0.4609375, 0.4609375), (0.572549019607843, 0.47265625, 0.47265625),\n (0.576470588235294, 0.484375, 0.484375), (0.580392156862745, \n 0.49609375, 0.49609375), (0.584313725490196, 0.5078125, 0.5078125),\n (0.588235294117647, 0.51953125, 0.51953125), (0.592156862745098, \n 0.53125, 0.53125), (0.596078431372549, 0.54296875, 0.54296875), (\n 0.6, 0.5546875, 0.5546875), (0.603921568627451, 0.56640625, \n 0.56640625), (0.607843137254902, 0.578125, 0.578125), (\n 0.611764705882353, 0.58984375, 0.58984375), (0.615686274509804, \n 0.6015625, 0.6015625), (0.619607843137255, 0.61328125, 0.61328125),\n (0.623529411764706, 0.625, 0.625), (0.627450980392157, \n 0.61458203125, 0.61458203125), (0.631372549019608, 0.60416796875, \n 0.60416796875), (0.635294117647059, 0.59375, 0.59375), (\n 0.63921568627451, 0.58333203125, 0.58333203125), (0.643137254901961,\n 0.57291796875, 0.57291796875), (0.647058823529412, 0.5625, 0.5625),\n (0.650980392156863, 0.55208203125, 0.55208203125), (\n 0.654901960784314, 0.54166796875, 0.54166796875), (\n 0.658823529411765, 0.53125, 0.53125), (0.662745098039216, \n 0.52083203125, 0.52083203125), (0.666666666666667, 0.51041796875, \n 0.51041796875), (0.670588235294118, 0.5, 0.5), (0.674509803921569, \n 0.48958203125, 0.48958203125), (0.67843137254902, 0.47916796875, \n 0.47916796875), (0.682352941176471, 0.46875, 0.46875), (\n 0.686274509803922, 0.45833203125, 0.45833203125), (\n 0.690196078431373, 0.44791796875, 0.44791796875), (\n 0.694117647058824, 0.4375, 0.4375), (0.698039215686274, \n 0.42708203125, 0.42708203125), (0.701960784313725, 0.41666796875, \n 0.41666796875), (0.705882352941177, 0.40625, 0.40625), (\n 0.709803921568627, 0.39583203125, 0.39583203125), (\n 0.713725490196078, 0.385416796875, 0.385416796875), (\n 0.717647058823529, 0.375, 0.375), (0.72156862745098, 0.364583203125,\n 0.364583203125), (0.725490196078431, 0.354166796875, 0.354166796875\n ), (0.729411764705882, 0.34375, 0.34375), (0.733333333333333, \n 0.333333203125, 0.333333203125), (0.737254901960784, 0.322916796875,\n 0.322916796875), (0.741176470588235, 0.3125, 0.3125), (\n 0.745098039215686, 0.302083203125, 0.302083203125), (\n 0.749019607843137, 0.291666796875, 0.291666796875), (\n 0.752941176470588, 0.28125, 0.28125), (0.756862745098039, \n 0.270833203125, 0.270833203125), (0.76078431372549, 0.260416796875,\n 0.260416796875), (0.764705882352941, 0.25, 0.25), (\n 0.768627450980392, 0.239583203125, 0.239583203125), (\n 0.772549019607843, 0.229166796875, 0.229166796875), (\n 0.776470588235294, 0.21875, 0.21875), (0.780392156862745, \n 0.208333203125, 0.208333203125), (0.784313725490196, 0.197916796875,\n 0.197916796875), (0.788235294117647, 0.1875, 0.1875), (\n 0.792156862745098, 0.177083203125, 0.177083203125), (\n 0.796078431372549, 0.166666796875, 0.166666796875), (0.8, 0.15625, \n 0.15625), (0.803921568627451, 0.145833203125, 0.145833203125), (\n 0.807843137254902, 0.135416796875, 0.135416796875), (\n 0.811764705882353, 0.125, 0.125), (0.815686274509804, \n 0.114583203125, 0.114583203125), (0.819607843137255, 0.104166796875,\n 0.104166796875), (0.823529411764706, 0.09375, 0.09375), (\n 0.827450980392157, 0.083333203125, 0.083333203125), (\n 0.831372549019608, 0.072916796875, 0.072916796875), (\n 0.835294117647059, 0.0625, 0.0625), (0.83921568627451, \n 0.052083203125, 0.052083203125), (0.843137254901961, 0.041666796875,\n 0.041666796875), (0.847058823529412, 0.03125, 0.03125), (\n 0.850980392156863, 0.0208333203125, 0.0208333203125), (\n 0.854901960784314, 0.0104166796875, 0.0104166796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0.0184151953125, \n 0.0184151953125), (0.866666666666667, 0.0368303515625, \n 0.0368303515625), (0.870588235294118, 0.055245703125, \n 0.055245703125), (0.874509803921569, 0.073660546875, 0.073660546875\n ), (0.87843137254902, 0.09207578125, 0.09207578125), (\n 0.882352941176471, 0.110491015625, 0.110491015625), (\n 0.886274509803922, 0.12890625, 0.12890625), (0.890196078431373, \n 0.147321484375, 0.147321484375), (0.894117647058824, 0.16573671875,\n 0.16573671875), (0.898039215686275, 0.184151953125, 0.184151953125),\n (0.901960784313726, 0.202566796875, 0.202566796875), (\n 0.905882352941176, 0.22098203125, 0.22098203125), (\n 0.909803921568627, 0.239397265625, 0.239397265625), (\n 0.913725490196078, 0.2578125, 0.2578125), (0.917647058823529, \n 0.276227734375, 0.276227734375), (0.92156862745098, 0.29464296875, \n 0.29464296875), (0.925490196078431, 0.313058203125, 0.313058203125),\n (0.929411764705882, 0.331473046875, 0.331473046875), (\n 0.933333333333333, 0.34988828125, 0.34988828125), (\n 0.937254901960784, 0.368303515625, 0.368303515625), (\n 0.941176470588235, 0.38671875, 0.38671875), (0.945098039215686, \n 0.4051328125, 0.4051328125), (0.949019607843137, 0.42355078125, \n 0.42355078125), (0.952941176470588, 0.44196484375, 0.44196484375),\n (0.956862745098039, 0.46037890625, 0.46037890625), (\n 0.96078431372549, 0.47879296875, 0.47879296875), (0.964705882352941,\n 0.4972109375, 0.4972109375), (0.968627450980392, 0.515625, 0.515625\n ), (0.972549019607843, 0.5340390625, 0.5340390625), (\n 0.976470588235294, 0.55245703125, 0.55245703125), (\n 0.980392156862745, 0.57087109375, 0.57087109375), (\n 0.984313725490196, 0.58928515625, 0.58928515625), (\n 0.988235294117647, 0.60769921875, 0.60769921875), (\n 0.992156862745098, 0.6261171875, 0.6261171875), (0.996078431372549,\n 0.64453125, 0.64453125), (1, 0.64453125, 0.64453125)), 'blue': ((0,\n 1, 1), (0.00392156862745098, 0.80569140625, 0.80569140625), (\n 0.00784313725490196, 0.7964296875, 0.7964296875), (\n 0.0117647058823529, 0.7871640625, 0.7871640625), (\n 0.0156862745098039, 0.77790234375, 0.77790234375), (\n 0.0196078431372549, 0.76863671875, 0.76863671875), (\n 0.0235294117647059, 0.759375, 0.759375), (0.0274509803921569, \n 0.75011328125, 0.75011328125), (0.0313725490196078, 0.74084765625, \n 0.74084765625), (0.0352941176470588, 0.7315859375, 0.7315859375), (\n 0.0392156862745098, 0.7223203125, 0.7223203125), (\n 0.0431372549019608, 0.71305859375, 0.71305859375), (\n 0.0470588235294118, 0.70379296875, 0.70379296875), (\n 0.0509803921568627, 0.69453125, 0.69453125), (0.0549019607843137, \n 0.68526953125, 0.68526953125), (0.0588235294117647, 0.67600390625, \n 0.67600390625), (0.0627450980392157, 0.6667421875, 0.6667421875), (\n 0.0666666666666667, 0.6574765625, 0.6574765625), (\n 0.0705882352941176, 0.64821484375, 0.64821484375), (\n 0.0745098039215686, 0.63894921875, 0.63894921875), (\n 0.0784313725490196, 0.6296875, 0.6296875), (0.0823529411764706, \n 0.62042578125, 0.62042578125), (0.0862745098039216, 0.61116015625, \n 0.61116015625), (0.0901960784313725, 0.6018984375, 0.6018984375), (\n 0.0941176470588235, 0.5926328125, 0.5926328125), (\n 0.0980392156862745, 0.58337109375, 0.58337109375), (\n 0.101960784313725, 0.57410546875, 0.57410546875), (\n 0.105882352941176, 0.56484375, 0.56484375), (0.109803921568627, \n 0.55558203125, 0.55558203125), (0.113725490196078, 0.54631640625, \n 0.54631640625), (0.117647058823529, 0.5370546875, 0.5370546875), (\n 0.12156862745098, 0.5277890625, 0.5277890625), (0.125490196078431, \n 0.51852734375, 0.51852734375), (0.129411764705882, 0.50926171875, \n 0.50926171875), (0.133333333333333, 0.5, 0.5), (0.137254901960784, \n 0.50901953125, 0.50901953125), (0.141176470588235, 0.5180390625, \n 0.5180390625), (0.145098039215686, 0.52705859375, 0.52705859375), (\n 0.149019607843137, 0.536078125, 0.536078125), (0.152941176470588, \n 0.54509765625, 0.54509765625), (0.156862745098039, 0.55412109375, \n 0.55412109375), (0.16078431372549, 0.563140625, 0.563140625), (\n 0.164705882352941, 0.57216015625, 0.57216015625), (\n 0.168627450980392, 0.5811796875, 0.5811796875), (0.172549019607843,\n 0.59019921875, 0.59019921875), (0.176470588235294, 0.59921875, \n 0.59921875), (0.180392156862745, 0.60823828125, 0.60823828125), (\n 0.184313725490196, 0.6172578125, 0.6172578125), (0.188235294117647,\n 0.62627734375, 0.62627734375), (0.192156862745098, 0.635296875, \n 0.635296875), (0.196078431372549, 0.64431640625, 0.64431640625), (\n 0.2, 0.65333984375, 0.65333984375), (0.203921568627451, 0.662359375,\n 0.662359375), (0.207843137254902, 0.67137890625, 0.67137890625), (\n 0.211764705882353, 0.6803984375, 0.6803984375), (0.215686274509804,\n 0.68941796875, 0.68941796875), (0.219607843137255, 0.6984375, \n 0.6984375), (0.223529411764706, 0.70745703125, 0.70745703125), (\n 0.227450980392157, 0.7164765625, 0.7164765625), (0.231372549019608,\n 0.72549609375, 0.72549609375), (0.235294117647059, 0.734515625, \n 0.734515625), (0.23921568627451, 0.74353515625, 0.74353515625), (\n 0.243137254901961, 0.75255859375, 0.75255859375), (\n 0.247058823529412, 0.761578125, 0.761578125), (0.250980392156863, \n 0.77059765625, 0.77059765625), (0.254901960784314, 0.7796171875, \n 0.7796171875), (0.258823529411765, 0.78863671875, 0.78863671875), (\n 0.262745098039216, 0.79765625, 0.79765625), (0.266666666666667, \n 0.80667578125, 0.80667578125), (0.270588235294118, 0.8156953125, \n 0.8156953125), (0.274509803921569, 0.82471484375, 0.82471484375), (\n 0.27843137254902, 0.833734375, 0.833734375), (0.282352941176471, \n 0.84275390625, 0.84275390625), (0.286274509803922, 0.85177734375, \n 0.85177734375), (0.290196078431373, 0.860796875, 0.860796875), (\n 0.294117647058824, 0.86981640625, 0.86981640625), (\n 0.298039215686275, 0.8788359375, 0.8788359375), (0.301960784313725,\n 0.88785546875, 0.88785546875), (0.305882352941176, 0.896875, \n 0.896875), (0.309803921568627, 0.90589453125, 0.90589453125), (\n 0.313725490196078, 0.9149140625, 0.9149140625), (0.317647058823529,\n 0.92393359375, 0.92393359375), (0.32156862745098, 0.932953125, \n 0.932953125), (0.325490196078431, 0.94197265625, 0.94197265625), (\n 0.329411764705882, 0.95099609375, 0.95099609375), (\n 0.333333333333333, 0.960015625, 0.960015625), (0.337254901960784, \n 0.96903515625, 0.96903515625), (0.341176470588235, 0.9780546875, \n 0.9780546875), (0.345098039215686, 0.98707421875, 0.98707421875), (\n 0.349019607843137, 0.99609375, 0.99609375), (0.352941176470588, \n 0.9737734375, 0.9737734375), (0.356862745098039, 0.95144921875, \n 0.95144921875), (0.36078431372549, 0.92912890625, 0.92912890625), (\n 0.364705882352941, 0.90680859375, 0.90680859375), (\n 0.368627450980392, 0.88448828125, 0.88448828125), (\n 0.372549019607843, 0.8621640625, 0.8621640625), (0.376470588235294,\n 0.83984375, 0.83984375), (0.380392156862745, 0.8175234375, \n 0.8175234375), (0.384313725490196, 0.79519921875, 0.79519921875), (\n 0.388235294117647, 0.77287890625, 0.77287890625), (\n 0.392156862745098, 0.75055859375, 0.75055859375), (\n 0.396078431372549, 0.72823828125, 0.72823828125), (0.4, \n 0.7059140625, 0.7059140625), (0.403921568627451, 0.68359375, \n 0.68359375), (0.407843137254902, 0.6612734375, 0.6612734375), (\n 0.411764705882353, 0.63894921875, 0.63894921875), (\n 0.415686274509804, 0.61662890625, 0.61662890625), (\n 0.419607843137255, 0.59430859375, 0.59430859375), (\n 0.423529411764706, 0.57198828125, 0.57198828125), (\n 0.427450980392157, 0.5496640625, 0.5496640625), (0.431372549019608,\n 0.52734375, 0.52734375), (0.435294117647059, 0.5050234375, \n 0.5050234375), (0.43921568627451, 0.48269921875, 0.48269921875), (\n 0.443137254901961, 0.46037890625, 0.46037890625), (\n 0.447058823529412, 0.43805859375, 0.43805859375), (\n 0.450980392156863, 0.41573828125, 0.41573828125), (\n 0.454901960784314, 0.3934140625, 0.3934140625), (0.458823529411765,\n 0.37109375, 0.37109375), (0.462745098039216, 0.348772265625, \n 0.348772265625), (0.466666666666667, 0.32645078125, 0.32645078125),\n (0.470588235294118, 0.304129296875, 0.304129296875), (\n 0.474509803921569, 0.281808203125, 0.281808203125), (\n 0.47843137254902, 0.25948671875, 0.25948671875), (0.482352941176471,\n 0.237165234375, 0.237165234375), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.233370703125, 0.233370703125), (\n 0.494117647058824, 0.251897265625, 0.251897265625), (\n 0.498039215686275, 0.27042421875, 0.27042421875), (\n 0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.84889453125, 0.84889453125), (0.631372549019608, 0.83450390625, \n 0.83450390625), (0.635294117647059, 0.8201171875, 0.8201171875), (\n 0.63921568627451, 0.80573046875, 0.80573046875), (0.643137254901961,\n 0.79133984375, 0.79133984375), (0.647058823529412, 0.776953125, \n 0.776953125), (0.650980392156863, 0.76256640625, 0.76256640625), (\n 0.654901960784314, 0.74817578125, 0.74817578125), (\n 0.658823529411765, 0.7337890625, 0.7337890625), (0.662745098039216,\n 0.71940234375, 0.71940234375), (0.666666666666667, 0.70501171875, \n 0.70501171875), (0.670588235294118, 0.690625, 0.690625), (\n 0.674509803921569, 0.67623828125, 0.67623828125), (0.67843137254902,\n 0.66184765625, 0.66184765625), (0.682352941176471, 0.6474609375, \n 0.6474609375), (0.686274509803922, 0.63307421875, 0.63307421875), (\n 0.690196078431373, 0.61868359375, 0.61868359375), (\n 0.694117647058824, 0.604296875, 0.604296875), (0.698039215686274, \n 0.58991015625, 0.58991015625), (0.701960784313725, 0.57551953125, \n 0.57551953125), (0.705882352941177, 0.5611328125, 0.5611328125), (\n 0.709803921568627, 0.54674609375, 0.54674609375), (\n 0.713725490196078, 0.53235546875, 0.53235546875), (\n 0.717647058823529, 0.51796875, 0.51796875), (0.72156862745098, \n 0.50358203125, 0.50358203125), (0.725490196078431, 0.48919140625, \n 0.48919140625), (0.729411764705882, 0.4748046875, 0.4748046875), (\n 0.733333333333333, 0.46041796875, 0.46041796875), (\n 0.737254901960784, 0.44602734375, 0.44602734375), (\n 0.741176470588235, 0.431640625, 0.431640625), (0.745098039215686, \n 0.41725390625, 0.41725390625), (0.749019607843137, 0.40286328125, \n 0.40286328125), (0.752941176470588, 0.3884765625, 0.3884765625), (\n 0.756862745098039, 0.374088671875, 0.374088671875), (\n 0.76078431372549, 0.359700390625, 0.359700390625), (\n 0.764705882352941, 0.3453125, 0.3453125), (0.768627450980392, \n 0.330924609375, 0.330924609375), (0.772549019607843, 0.316536328125,\n 0.316536328125), (0.776470588235294, 0.3021484375, 0.3021484375), (\n 0.780392156862745, 0.287760546875, 0.287760546875), (\n 0.784313725490196, 0.273372265625, 0.273372265625), (\n 0.788235294117647, 0.258984375, 0.258984375), (0.792156862745098, \n 0.244596484375, 0.244596484375), (0.796078431372549, 0.230208203125,\n 0.230208203125), (0.8, 0.2158203125, 0.2158203125), (\n 0.803921568627451, 0.201432421875, 0.201432421875), (\n 0.807843137254902, 0.187044140625, 0.187044140625), (\n 0.811764705882353, 0.17265625, 0.17265625), (0.815686274509804, \n 0.158268359375, 0.158268359375), (0.819607843137255, 0.143880078125,\n 0.143880078125), (0.823529411764706, 0.1294921875, 0.1294921875), (\n 0.827450980392157, 0.115104296875, 0.115104296875), (\n 0.831372549019608, 0.100716015625, 0.100716015625), (\n 0.835294117647059, 0.086328125, 0.086328125), (0.83921568627451, \n 0.071940234375, 0.071940234375), (0.843137254901961, 0.057551953125,\n 0.057551953125), (0.847058823529412, 0.0431640625, 0.0431640625), (\n 0.850980392156863, 0.028776015625, 0.028776015625), (\n 0.854901960784314, 0.01438796875, 0.01438796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0, 0), (\n 0.866666666666667, 0, 0), (0.870588235294118, 0, 0), (\n 0.874509803921569, 0, 0), (0.87843137254902, 0, 0), (\n 0.882352941176471, 0, 0), (0.886274509803922, 0, 0), (\n 0.890196078431373, 0, 0), (0.894117647058824, 0, 0), (\n 0.898039215686275, 0, 0), (0.901960784313726, 0, 0), (\n 0.905882352941176, 0, 0), (0.909803921568627, 0, 0), (\n 0.913725490196078, 0, 0), (0.917647058823529, 0, 0), (\n 0.92156862745098, 0, 0), (0.925490196078431, 0, 0), (\n 0.929411764705882, 0, 0), (0.933333333333333, 0, 0), (\n 0.937254901960784, 0, 0), (0.941176470588235, 0, 0), (\n 0.945098039215686, 0, 0), (0.949019607843137, 0, 0), (\n 0.952941176470588, 0, 0), (0.956862745098039, 0, 0), (\n 0.96078431372549, 0, 0), (0.964705882352941, 0, 0), (\n 0.968627450980392, 0, 0), (0.972549019607843, 0, 0), (\n 0.976470588235294, 0, 0), (0.980392156862745, 0, 0), (\n 0.984313725490196, 0, 0), (0.988235294117647, 0, 0), (\n 0.992156862745098, 0, 0), (0.996078431372549, 0, 0), (1, 0, 0))}\n califa = mcol.LinearSegmentedColormap('califa', cdict)\n vcalifa = mcol.LinearSegmentedColormap('vcalifa', vcdict)\n if option == 'v':\n return vcalifa\n else:\n return califa\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\ndef O3O1_line_c(x):\n val = 0.05612915 / (x + 0.39641533) + 0.60969495\n return val\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.4, 100)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.00302, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\ndef color_map_califa(option='v'):\n cdict = {'red': ((0.0, 0, 0), (0.00392156862745098, 0, 0), (\n 0.00784313725490196, 0, 0), (0.0117647058823529, 0, 0), (\n 0.0156862745098039, 0, 0), (0.0196078431372549, 0, 0), (\n 0.0235294117647059, 0, 0), (0.0274509803921569, 0, 0), (\n 0.0313725490196078, 0, 0), (0.0352941176470588, 0, 0), (\n 0.0392156862745098, 0, 0), (0.0431372549019608, 0, 0), (\n 0.0470588235294118, 0, 0), (0.0509803921568627, 0, 0), (\n 0.0549019607843137, 0, 0), (0.0588235294117647, 0, 0), (\n 0.0627450980392157, 0, 0), (0.0666666666666667, 0, 0), (\n 0.0705882352941176, 0, 0), (0.0745098039215686, 0, 0), (\n 0.0784313725490196, 0, 0), (0.0823529411764706, 0, 0), (\n 0.0862745098039216, 0, 0), (0.0901960784313725, 0, 0), (\n 0.0941176470588235, 0, 0), (0.0980392156862745, 0, 0), (\n 0.101960784313725, 0, 0), (0.105882352941176, 0, 0), (\n 0.109803921568627, 0, 0), (0.113725490196078, 0, 0), (\n 0.117647058823529, 0, 0), (0.12156862745098, 0, 0), (\n 0.125490196078431, 0, 0), (0.129411764705882, 0, 0), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0.019921875, 0.019921875), (0.2, 0.03984375, \n 0.03984375), (0.203921568627451, 0.059765625, 0.059765625), (\n 0.207843137254902, 0.0796875, 0.0796875), (0.211764705882353, \n 0.099609375, 0.099609375), (0.215686274509804, 0.11953125, \n 0.11953125), (0.219607843137255, 0.139453125, 0.139453125), (\n 0.223529411764706, 0.159375, 0.159375), (0.227450980392157, \n 0.179296875, 0.179296875), (0.231372549019608, 0.19921875, \n 0.19921875), (0.235294117647059, 0.219140625, 0.219140625), (\n 0.23921568627451, 0.2390625, 0.2390625), (0.243137254901961, \n 0.258984375, 0.258984375), (0.247058823529412, 0.27890625, \n 0.27890625), (0.250980392156863, 0.298828125, 0.298828125), (\n 0.254901960784314, 0.31875, 0.31875), (0.258823529411765, \n 0.338671875, 0.338671875), (0.262745098039216, 0.35859375, \n 0.35859375), (0.266666666666667, 0.378515625, 0.378515625), (\n 0.270588235294118, 0.3984375, 0.3984375), (0.274509803921569, \n 0.418359375, 0.418359375), (0.27843137254902, 0.43828125, \n 0.43828125), (0.282352941176471, 0.458203125, 0.458203125), (\n 0.286274509803922, 0.478125, 0.478125), (0.290196078431373, \n 0.498046875, 0.498046875), (0.294117647058824, 0.51796875, \n 0.51796875), (0.298039215686275, 0.537890625, 0.537890625), (\n 0.301960784313725, 0.5578125, 0.5578125), (0.305882352941176, \n 0.577734375, 0.577734375), (0.309803921568627, 0.59765625, \n 0.59765625), (0.313725490196078, 0.617578125, 0.617578125), (\n 0.317647058823529, 0.6375, 0.6375), (0.32156862745098, 0.657421875,\n 0.657421875), (0.325490196078431, 0.67734375, 0.67734375), (\n 0.329411764705882, 0.697265625, 0.697265625), (0.333333333333333, \n 0.7171875, 0.7171875), (0.337254901960784, 0.737109375, 0.737109375\n ), (0.341176470588235, 0.75703125, 0.75703125), (0.345098039215686,\n 0.776953125, 0.776953125), (0.349019607843137, 0.796875, 0.796875),\n (0.352941176470588, 0.816796875, 0.816796875), (0.356862745098039, \n 0.83671875, 0.83671875), (0.36078431372549, 0.856640625, \n 0.856640625), (0.364705882352941, 0.8765625, 0.8765625), (\n 0.368627450980392, 0.896484375, 0.896484375), (0.372549019607843, \n 0.91640625, 0.91640625), (0.376470588235294, 0.936328125, \n 0.936328125), (0.380392156862745, 0.95625, 0.95625), (\n 0.384313725490196, 0.976171875, 0.976171875), (0.388235294117647, \n 0.99609375, 0.99609375), (0.392156862745098, 0.99609375, 0.99609375\n ), (0.396078431372549, 0.99609375, 0.99609375), (0.4, 0.99609375, \n 0.99609375), (0.403921568627451, 0.99609375, 0.99609375), (\n 0.407843137254902, 0.99609375, 0.99609375), (0.411764705882353, \n 0.99609375, 0.99609375), (0.415686274509804, 0.99609375, 0.99609375\n ), (0.419607843137255, 0.99609375, 0.99609375), (0.423529411764706,\n 0.99609375, 0.99609375), (0.427450980392157, 0.99609375, 0.99609375\n ), (0.431372549019608, 0.99609375, 0.99609375), (0.435294117647059,\n 0.99609375, 0.99609375), (0.43921568627451, 0.99609375, 0.99609375),\n (0.443137254901961, 0.99609375, 0.99609375), (0.447058823529412, \n 0.99609375, 0.99609375), (0.450980392156863, 0.99609375, 0.99609375\n ), (0.454901960784314, 0.99609375, 0.99609375), (0.458823529411765,\n 0.99609375, 0.99609375), (0.462745098039216, 0.99609375, 0.99609375\n ), (0.466666666666667, 0.99609375, 0.99609375), (0.470588235294118,\n 0.99609375, 0.99609375), (0.474509803921569, 0.99609375, 0.99609375\n ), (0.47843137254902, 0.99609375, 0.99609375), (0.482352941176471, \n 0.99609375, 0.99609375), (0.486274509803922, 0.99609375, 0.99609375\n ), (0.490196078431373, 0.99609375, 0.99609375), (0.494117647058824,\n 0.99609375, 0.99609375), (0.498039215686275, 0.99609375, 0.99609375\n ), (0.501960784313725, 0.99609375, 0.99609375), (0.505882352941176,\n 0.99609375, 0.99609375), (0.509803921568627, 0.99609375, 0.99609375\n ), (0.513725490196078, 0.99609375, 0.99609375), (0.517647058823529,\n 0.99609375, 0.99609375), (0.52156862745098, 0.99609375, 0.99609375),\n (0.525490196078431, 0.99609375, 0.99609375), (0.529411764705882, \n 0.99609375, 0.99609375), (0.533333333333333, 0.99609375, 0.99609375\n ), (0.537254901960784, 0.99609375, 0.99609375), (0.541176470588235,\n 0.99609375, 0.99609375), (0.545098039215686, 0.99609375, 0.99609375\n ), (0.549019607843137, 0.99609375, 0.99609375), (0.552941176470588,\n 0.99609375, 0.99609375), (0.556862745098039, 0.99609375, 0.99609375\n ), (0.56078431372549, 0.99609375, 0.99609375), (0.564705882352941, \n 0.99609375, 0.99609375), (0.568627450980392, 0.99609375, 0.99609375\n ), (0.572549019607843, 0.99609375, 0.99609375), (0.576470588235294,\n 0.99609375, 0.99609375), (0.580392156862745, 0.99609375, 0.99609375\n ), (0.584313725490196, 0.99609375, 0.99609375), (0.588235294117647,\n 0.98046875, 0.98046875), (0.592156862745098, 0.96484375, 0.96484375\n ), (0.596078431372549, 0.94921875, 0.94921875), (0.6, 0.93359375, \n 0.93359375), (0.603921568627451, 0.91796875, 0.91796875), (\n 0.607843137254902, 0.90234375, 0.90234375), (0.611764705882353, \n 0.88671875, 0.88671875), (0.615686274509804, 0.87109375, 0.87109375\n ), (0.619607843137255, 0.85546875, 0.85546875), (0.623529411764706,\n 0.83984375, 0.83984375), (0.627450980392157, 0.82421875, 0.82421875\n ), (0.631372549019608, 0.80859375, 0.80859375), (0.635294117647059,\n 0.79296875, 0.79296875), (0.63921568627451, 0.77734375, 0.77734375),\n (0.643137254901961, 0.76171875, 0.76171875), (0.647058823529412, \n 0.74609375, 0.74609375), (0.650980392156863, 0.73046875, 0.73046875\n ), (0.654901960784314, 0.71484375, 0.71484375), (0.658823529411765,\n 0.69921875, 0.69921875), (0.662745098039216, 0.68359375, 0.68359375\n ), (0.666666666666667, 0.66796875, 0.66796875), (0.670588235294118,\n 0.65234375, 0.65234375), (0.674509803921569, 0.63671875, 0.63671875\n ), (0.67843137254902, 0.62109375, 0.62109375), (0.682352941176471, \n 0.60546875, 0.60546875), (0.686274509803922, 0.58984375, 0.58984375\n ), (0.690196078431373, 0.57421875, 0.57421875), (0.694117647058824,\n 0.55859375, 0.55859375), (0.698039215686274, 0.54296875, 0.54296875\n ), (0.701960784313725, 0.52734375, 0.52734375), (0.705882352941177,\n 0.51171875, 0.51171875), (0.709803921568627, 0.49609375, 0.49609375\n ), (0.713725490196078, 0.48046875, 0.48046875), (0.717647058823529,\n 0.46484375, 0.46484375), (0.72156862745098, 0.44921875, 0.44921875),\n (0.725490196078431, 0.43359375, 0.43359375), (0.729411764705882, \n 0.41796875, 0.41796875), (0.733333333333333, 0.40234375, 0.40234375\n ), (0.737254901960784, 0.38671875, 0.38671875), (0.741176470588235,\n 0.37109375, 0.37109375), (0.745098039215686, 0.35546875, 0.35546875\n ), (0.749019607843137, 0.33984375, 0.33984375), (0.752941176470588,\n 0.32421875, 0.32421875), (0.756862745098039, 0.30859375, 0.30859375\n ), (0.76078431372549, 0.29296875, 0.29296875), (0.764705882352941, \n 0.27734375, 0.27734375), (0.768627450980392, 0.26171875, 0.26171875\n ), (0.772549019607843, 0.24609375, 0.24609375), (0.776470588235294,\n 0.23046875, 0.23046875), (0.780392156862745, 0.21484375, 0.21484375\n ), (0.784313725490196, 0.22663359375, 0.22663359375), (\n 0.788235294117647, 0.2384234375, 0.2384234375), (0.792156862745098,\n 0.250212890625, 0.250212890625), (0.796078431372549, 0.262002734375,\n 0.262002734375), (0.8, 0.273792578125, 0.273792578125), (\n 0.803921568627451, 0.285582421875, 0.285582421875), (\n 0.807843137254902, 0.297372265625, 0.297372265625), (\n 0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125)), 'green': ((0.0, \n 0.02984375, 0.02984375), (0.00392156862745098, 0.02984375, \n 0.02984375), (0.00784313725490196, 0.044765625, 0.044765625), (\n 0.0117647058823529, 0.0596875, 0.0596875), (0.0156862745098039, \n 0.074609375, 0.074609375), (0.0196078431372549, 0.08953125, \n 0.08953125), (0.0235294117647059, 0.104453125, 0.104453125), (\n 0.0274509803921569, 0.119375, 0.119375), (0.0313725490196078, \n 0.134296875, 0.134296875), (0.0352941176470588, 0.14921875, \n 0.14921875), (0.0392156862745098, 0.164140625, 0.164140625), (\n 0.0431372549019608, 0.1790625, 0.1790625), (0.0470588235294118, \n 0.193984375, 0.193984375), (0.0509803921568627, 0.20890625, \n 0.20890625), (0.0549019607843137, 0.223828125, 0.223828125), (\n 0.0588235294117647, 0.23875, 0.23875), (0.0627450980392157, \n 0.253671875, 0.253671875), (0.0666666666666667, 0.26859375, \n 0.26859375), (0.0705882352941176, 0.283515625, 0.283515625), (\n 0.0745098039215686, 0.2984375, 0.2984375), (0.0784313725490196, \n 0.313359375, 0.313359375), (0.0823529411764706, 0.32828125, \n 0.32828125), (0.0862745098039216, 0.343203125, 0.343203125), (\n 0.0901960784313725, 0.358125, 0.358125), (0.0941176470588235, \n 0.373046875, 0.373046875), (0.0980392156862745, 0.38796875, \n 0.38796875), (0.101960784313725, 0.402890625, 0.402890625), (\n 0.105882352941176, 0.4178125, 0.4178125), (0.109803921568627, \n 0.432734375, 0.432734375), (0.113725490196078, 0.44765625, \n 0.44765625), (0.117647058823529, 0.462578125, 0.462578125), (\n 0.12156862745098, 0.4775, 0.4775), (0.125490196078431, 0.492421875,\n 0.492421875), (0.129411764705882, 0.50734375, 0.50734375), (\n 0.133333333333333, 0.522265625, 0.522265625), (0.137254901960784, \n 0.5371875, 0.5371875), (0.141176470588235, 0.552109375, 0.552109375\n ), (0.145098039215686, 0.56703125, 0.56703125), (0.149019607843137,\n 0.581953125, 0.581953125), (0.152941176470588, 0.596875, 0.596875),\n (0.156862745098039, 0.611796875, 0.611796875), (0.16078431372549, \n 0.62671875, 0.62671875), (0.164705882352941, 0.641640625, \n 0.641640625), (0.168627450980392, 0.6565625, 0.6565625), (\n 0.172549019607843, 0.671484375, 0.671484375), (0.176470588235294, \n 0.68640625, 0.68640625), (0.180392156862745, 0.701328125, \n 0.701328125), (0.184313725490196, 0.71625, 0.71625), (\n 0.188235294117647, 0.731171875, 0.731171875), (0.192156862745098, \n 0.74609375, 0.74609375), (0.196078431372549, 0.731171875, \n 0.731171875), (0.2, 0.71625, 0.71625), (0.203921568627451, \n 0.701328125, 0.701328125), (0.207843137254902, 0.68640625, \n 0.68640625), (0.211764705882353, 0.671484375, 0.671484375), (\n 0.215686274509804, 0.6565625, 0.6565625), (0.219607843137255, \n 0.641640625, 0.641640625), (0.223529411764706, 0.62671875, \n 0.62671875), (0.227450980392157, 0.611796875, 0.611796875), (\n 0.231372549019608, 0.596875, 0.596875), (0.235294117647059, \n 0.581953125, 0.581953125), (0.23921568627451, 0.56703125, \n 0.56703125), (0.243137254901961, 0.552109375, 0.552109375), (\n 0.247058823529412, 0.5371875, 0.5371875), (0.250980392156863, \n 0.522265625, 0.522265625), (0.254901960784314, 0.50734375, \n 0.50734375), (0.258823529411765, 0.492421875, 0.492421875), (\n 0.262745098039216, 0.4775, 0.4775), (0.266666666666667, 0.462578125,\n 0.462578125), (0.270588235294118, 0.44765625, 0.44765625), (\n 0.274509803921569, 0.432734375, 0.432734375), (0.27843137254902, \n 0.4178125, 0.4178125), (0.282352941176471, 0.402890625, 0.402890625\n ), (0.286274509803922, 0.38796875, 0.38796875), (0.290196078431373,\n 0.373046875, 0.373046875), (0.294117647058824, 0.358125, 0.358125),\n (0.298039215686275, 0.343203125, 0.343203125), (0.301960784313725, \n 0.32828125, 0.32828125), (0.305882352941176, 0.313359375, \n 0.313359375), (0.309803921568627, 0.2984375, 0.2984375), (\n 0.313725490196078, 0.283515625, 0.283515625), (0.317647058823529, \n 0.26859375, 0.26859375), (0.32156862745098, 0.253671875, \n 0.253671875), (0.325490196078431, 0.23875, 0.23875), (\n 0.329411764705882, 0.223828125, 0.223828125), (0.333333333333333, \n 0.20890625, 0.20890625), (0.337254901960784, 0.193984375, \n 0.193984375), (0.341176470588235, 0.1790625, 0.1790625), (\n 0.345098039215686, 0.164140625, 0.164140625), (0.349019607843137, \n 0.14921875, 0.14921875), (0.352941176470588, 0.134296875, \n 0.134296875), (0.356862745098039, 0.119375, 0.119375), (\n 0.36078431372549, 0.104453125, 0.104453125), (0.364705882352941, \n 0.08953125, 0.08953125), (0.368627450980392, 0.074609375, \n 0.074609375), (0.372549019607843, 0.0596875, 0.0596875), (\n 0.376470588235294, 0.044765625, 0.044765625), (0.380392156862745, \n 0.0298437890625, 0.0298437890625), (0.384313725490196, 0.014921875,\n 0.014921875), (0.388235294117647, 0, 0), (0.392156862745098, \n 0.012890625, 0.012890625), (0.396078431372549, 0.02578125, \n 0.02578125), (0.4, 0.038671875, 0.038671875), (0.403921568627451, \n 0.0515625, 0.0515625), (0.407843137254902, 0.064453125, 0.064453125\n ), (0.411764705882353, 0.07734375, 0.07734375), (0.415686274509804,\n 0.090234375, 0.090234375), (0.419607843137255, 0.103125, 0.103125),\n (0.423529411764706, 0.116015625, 0.116015625), (0.427450980392157, \n 0.12890625, 0.12890625), (0.431372549019608, 0.141796875, \n 0.141796875), (0.435294117647059, 0.1546875, 0.1546875), (\n 0.43921568627451, 0.167578125, 0.167578125), (0.443137254901961, \n 0.18046875, 0.18046875), (0.447058823529412, 0.193359375, \n 0.193359375), (0.450980392156863, 0.20625, 0.20625), (\n 0.454901960784314, 0.219140625, 0.219140625), (0.458823529411765, \n 0.23203125, 0.23203125), (0.462745098039216, 0.244921875, \n 0.244921875), (0.466666666666667, 0.2578125, 0.2578125), (\n 0.470588235294118, 0.270703125, 0.270703125), (0.474509803921569, \n 0.28359375, 0.28359375), (0.47843137254902, 0.296484375, \n 0.296484375), (0.482352941176471, 0.309375, 0.309375), (\n 0.486274509803922, 0.322265625, 0.322265625), (0.490196078431373, \n 0.33515625, 0.33515625), (0.494117647058824, 0.348046875, \n 0.348046875), (0.498039215686275, 0.3609375, 0.3609375), (\n 0.501960784313725, 0.373828125, 0.373828125), (0.505882352941176, \n 0.38671875, 0.38671875), (0.509803921568627, 0.399609375, \n 0.399609375), (0.513725490196078, 0.4125, 0.4125), (\n 0.517647058823529, 0.425390625, 0.425390625), (0.52156862745098, \n 0.43828125, 0.43828125), (0.525490196078431, 0.451171875, \n 0.451171875), (0.529411764705882, 0.4640625, 0.4640625), (\n 0.533333333333333, 0.476953125, 0.476953125), (0.537254901960784, \n 0.48984375, 0.48984375), (0.541176470588235, 0.502734375, \n 0.502734375), (0.545098039215686, 0.515625, 0.515625), (\n 0.549019607843137, 0.528515625, 0.528515625), (0.552941176470588, \n 0.54140625, 0.54140625), (0.556862745098039, 0.554296875, \n 0.554296875), (0.56078431372549, 0.5671875, 0.5671875), (\n 0.564705882352941, 0.580078125, 0.580078125), (0.568627450980392, \n 0.59296875, 0.59296875), (0.572549019607843, 0.605859375, \n 0.605859375), (0.576470588235294, 0.61875, 0.61875), (\n 0.580392156862745, 0.631640625, 0.631640625), (0.584313725490196, \n 0.64453125, 0.64453125), (0.588235294117647, 0.6359375, 0.6359375),\n (0.592156862745098, 0.62734375, 0.62734375), (0.596078431372549, \n 0.61875, 0.61875), (0.6, 0.61015625, 0.61015625), (\n 0.603921568627451, 0.6015625, 0.6015625), (0.607843137254902, \n 0.59296875, 0.59296875), (0.611764705882353, 0.584375, 0.584375), (\n 0.615686274509804, 0.57578125, 0.57578125), (0.619607843137255, \n 0.5671875, 0.5671875), (0.623529411764706, 0.55859375, 0.55859375),\n (0.627450980392157, 0.55, 0.55), (0.631372549019608, 0.54140625, \n 0.54140625), (0.635294117647059, 0.5328125, 0.5328125), (\n 0.63921568627451, 0.52421875, 0.52421875), (0.643137254901961, \n 0.515625, 0.515625), (0.647058823529412, 0.50703125, 0.50703125), (\n 0.650980392156863, 0.4984375, 0.4984375), (0.654901960784314, \n 0.48984375, 0.48984375), (0.658823529411765, 0.48125, 0.48125), (\n 0.662745098039216, 0.47265625, 0.47265625), (0.666666666666667, \n 0.4640625, 0.4640625), (0.670588235294118, 0.45546875, 0.45546875),\n (0.674509803921569, 0.446875, 0.446875), (0.67843137254902, \n 0.43828125, 0.43828125), (0.682352941176471, 0.4296875, 0.4296875),\n (0.686274509803922, 0.42109375, 0.42109375), (0.690196078431373, \n 0.4125, 0.4125), (0.694117647058824, 0.40390625, 0.40390625), (\n 0.698039215686274, 0.3953125, 0.3953125), (0.701960784313725, \n 0.38671875, 0.38671875), (0.705882352941177, 0.378125, 0.378125), (\n 0.709803921568627, 0.36953125, 0.36953125), (0.713725490196078, \n 0.3609375, 0.3609375), (0.717647058823529, 0.35234375, 0.35234375),\n (0.72156862745098, 0.34375, 0.34375), (0.725490196078431, \n 0.33515625, 0.33515625), (0.729411764705882, 0.3265625, 0.3265625),\n (0.733333333333333, 0.31796875, 0.31796875), (0.737254901960784, \n 0.309375, 0.309375), (0.741176470588235, 0.30078125, 0.30078125), (\n 0.745098039215686, 0.2921875, 0.2921875), (0.749019607843137, \n 0.28359375, 0.28359375), (0.752941176470588, 0.275, 0.275), (\n 0.756862745098039, 0.26640625, 0.26640625), (0.76078431372549, \n 0.2578125, 0.2578125), (0.764705882352941, 0.24921875, 0.24921875),\n (0.768627450980392, 0.240625, 0.240625), (0.772549019607843, \n 0.23203125, 0.23203125), (0.776470588235294, 0.2234375, 0.2234375),\n (0.780392156862745, 0.21484375, 0.21484375), (0.784313725490196, \n 0.222301171875, 0.222301171875), (0.788235294117647, 0.22975859375,\n 0.22975859375), (0.792156862745098, 0.237216015625, 0.237216015625),\n (0.796078431372549, 0.2446734375, 0.2446734375), (0.8, \n 0.252130859375, 0.252130859375), (0.803921568627451, 0.259587890625,\n 0.259587890625), (0.807843137254902, 0.2670453125, 0.2670453125), (\n 0.811764705882353, 0.274502734375, 0.274502734375), (\n 0.815686274509804, 0.28196015625, 0.28196015625), (\n 0.819607843137255, 0.289417578125, 0.289417578125), (\n 0.823529411764706, 0.296875, 0.296875), (0.827450980392157, \n 0.304332421875, 0.304332421875), (0.831372549019608, 0.31178984375,\n 0.31178984375), (0.835294117647059, 0.319247265625, 0.319247265625),\n (0.83921568627451, 0.3267046875, 0.3267046875), (0.843137254901961,\n 0.334162109375, 0.334162109375), (0.847058823529412, 0.34161953125,\n 0.34161953125), (0.850980392156863, 0.3490765625, 0.3490765625), (\n 0.854901960784314, 0.356533984375, 0.356533984375), (\n 0.858823529411765, 0.36399140625, 0.36399140625), (\n 0.862745098039216, 0.371448828125, 0.371448828125), (\n 0.866666666666667, 0.37890625, 0.37890625), (0.870588235294118, \n 0.386363671875, 0.386363671875), (0.874509803921569, 0.3938203125, \n 0.3938203125), (0.87843137254902, 0.40127734375, 0.40127734375), (\n 0.882352941176471, 0.408734375, 0.408734375), (0.886274509803922, \n 0.41619140625, 0.41619140625), (0.890196078431373, 0.42365234375, \n 0.42365234375), (0.894117647058824, 0.431109375, 0.431109375), (\n 0.898039215686275, 0.43856640625, 0.43856640625), (\n 0.901960784313726, 0.4460234375, 0.4460234375), (0.905882352941176,\n 0.45348046875, 0.45348046875), (0.909803921568627, 0.4609375, \n 0.4609375), (0.913725490196078, 0.46839453125, 0.46839453125), (\n 0.917647058823529, 0.4758515625, 0.4758515625), (0.92156862745098, \n 0.48330859375, 0.48330859375), (0.925490196078431, 0.490765625, \n 0.490765625), (0.929411764705882, 0.49822265625, 0.49822265625), (\n 0.933333333333333, 0.50568359375, 0.50568359375), (\n 0.937254901960784, 0.513140625, 0.513140625), (0.941176470588235, \n 0.52059765625, 0.52059765625), (0.945098039215686, 0.5280546875, \n 0.5280546875), (0.949019607843137, 0.53551171875, 0.53551171875), (\n 0.952941176470588, 0.54296875, 0.54296875), (0.956862745098039, \n 0.55042578125, 0.55042578125), (0.96078431372549, 0.5578828125, \n 0.5578828125), (0.964705882352941, 0.56533984375, 0.56533984375), (\n 0.968627450980392, 0.572796875, 0.572796875), (0.972549019607843, \n 0.58025390625, 0.58025390625), (0.976470588235294, 0.58771484375, \n 0.58771484375), (0.980392156862745, 0.595171875, 0.595171875), (\n 0.984313725490196, 0.60262890625, 0.60262890625), (\n 0.988235294117647, 0.6100859375, 0.6100859375), (0.992156862745098,\n 0.61754296875, 0.61754296875), (0.996078431372549, 0.625, 0.625), (\n 1.0, 0.625, 0.625)), 'blue': ((0.0, 0.51984375, 0.51984375), (\n 0.00392156862745098, 0.51984375, 0.51984375), (0.00784313725490196,\n 0.529765625, 0.529765625), (0.0117647058823529, 0.5396875, \n 0.5396875), (0.0156862745098039, 0.549609375, 0.549609375), (\n 0.0196078431372549, 0.55953125, 0.55953125), (0.0235294117647059, \n 0.569453125, 0.569453125), (0.0274509803921569, 0.579375, 0.579375),\n (0.0313725490196078, 0.589296875, 0.589296875), (0.0352941176470588,\n 0.59921875, 0.59921875), (0.0392156862745098, 0.609140625, \n 0.609140625), (0.0431372549019608, 0.6190625, 0.6190625), (\n 0.0470588235294118, 0.628984375, 0.628984375), (0.0509803921568627,\n 0.63890625, 0.63890625), (0.0549019607843137, 0.648828125, \n 0.648828125), (0.0588235294117647, 0.65875, 0.65875), (\n 0.0627450980392157, 0.668671875, 0.668671875), (0.0666666666666667,\n 0.67859375, 0.67859375), (0.0705882352941176, 0.688515625, \n 0.688515625), (0.0745098039215686, 0.6984375, 0.6984375), (\n 0.0784313725490196, 0.708359375, 0.708359375), (0.0823529411764706,\n 0.71828125, 0.71828125), (0.0862745098039216, 0.728203125, \n 0.728203125), (0.0901960784313725, 0.738125, 0.738125), (\n 0.0941176470588235, 0.748046875, 0.748046875), (0.0980392156862745,\n 0.75796875, 0.75796875), (0.101960784313725, 0.767890625, \n 0.767890625), (0.105882352941176, 0.7778125, 0.7778125), (\n 0.109803921568627, 0.787734375, 0.787734375), (0.113725490196078, \n 0.79765625, 0.79765625), (0.117647058823529, 0.807578125, \n 0.807578125), (0.12156862745098, 0.8175, 0.8175), (\n 0.125490196078431, 0.827421875, 0.827421875), (0.129411764705882, \n 0.83734375, 0.83734375), (0.133333333333333, 0.847265625, \n 0.847265625), (0.137254901960784, 0.8571875, 0.8571875), (\n 0.141176470588235, 0.867109375, 0.867109375), (0.145098039215686, \n 0.87703125, 0.87703125), (0.149019607843137, 0.886953125, \n 0.886953125), (0.152941176470588, 0.896875, 0.896875), (\n 0.156862745098039, 0.906796875, 0.906796875), (0.16078431372549, \n 0.91671875, 0.91671875), (0.164705882352941, 0.926640625, \n 0.926640625), (0.168627450980392, 0.9365625, 0.9365625), (\n 0.172549019607843, 0.946484375, 0.946484375), (0.176470588235294, \n 0.95640625, 0.95640625), (0.180392156862745, 0.966328125, \n 0.966328125), (0.184313725490196, 0.97625, 0.97625), (\n 0.188235294117647, 0.986171875, 0.986171875), (0.192156862745098, \n 0.99609375, 0.99609375), (0.196078431372549, 0.976171875, \n 0.976171875), (0.2, 0.95625, 0.95625), (0.203921568627451, \n 0.936328125, 0.936328125), (0.207843137254902, 0.91640625, \n 0.91640625), (0.211764705882353, 0.896484375, 0.896484375), (\n 0.215686274509804, 0.8765625, 0.8765625), (0.219607843137255, \n 0.856640625, 0.856640625), (0.223529411764706, 0.83671875, \n 0.83671875), (0.227450980392157, 0.816796875, 0.816796875), (\n 0.231372549019608, 0.796875, 0.796875), (0.235294117647059, \n 0.776953125, 0.776953125), (0.23921568627451, 0.75703125, \n 0.75703125), (0.243137254901961, 0.737109375, 0.737109375), (\n 0.247058823529412, 0.7171875, 0.7171875), (0.250980392156863, \n 0.697265625, 0.697265625), (0.254901960784314, 0.67734375, \n 0.67734375), (0.258823529411765, 0.657421875, 0.657421875), (\n 0.262745098039216, 0.6375, 0.6375), (0.266666666666667, 0.617578125,\n 0.617578125), (0.270588235294118, 0.59765625, 0.59765625), (\n 0.274509803921569, 0.577734375, 0.577734375), (0.27843137254902, \n 0.5578125, 0.5578125), (0.282352941176471, 0.537890625, 0.537890625\n ), (0.286274509803922, 0.51796875, 0.51796875), (0.290196078431373,\n 0.498046875, 0.498046875), (0.294117647058824, 0.478125, 0.478125),\n (0.298039215686275, 0.458203125, 0.458203125), (0.301960784313725, \n 0.43828125, 0.43828125), (0.305882352941176, 0.418359375, \n 0.418359375), (0.309803921568627, 0.3984375, 0.3984375), (\n 0.313725490196078, 0.378515625, 0.378515625), (0.317647058823529, \n 0.35859375, 0.35859375), (0.32156862745098, 0.338671875, \n 0.338671875), (0.325490196078431, 0.31875, 0.31875), (\n 0.329411764705882, 0.298828125, 0.298828125), (0.333333333333333, \n 0.27890625, 0.27890625), (0.337254901960784, 0.258984375, \n 0.258984375), (0.341176470588235, 0.2390625, 0.2390625), (\n 0.345098039215686, 0.219140625, 0.219140625), (0.349019607843137, \n 0.19921875, 0.19921875), (0.352941176470588, 0.179296875, \n 0.179296875), (0.356862745098039, 0.159375, 0.159375), (\n 0.36078431372549, 0.139453125, 0.139453125), (0.364705882352941, \n 0.11953125, 0.11953125), (0.368627450980392, 0.099609375, \n 0.099609375), (0.372549019607843, 0.0796875, 0.0796875), (\n 0.376470588235294, 0.059765625, 0.059765625), (0.380392156862745, \n 0.03984375, 0.03984375), (0.384313725490196, 0.019921875, \n 0.019921875), (0.388235294117647, 0, 0), (0.392156862745098, 0, 0),\n (0.396078431372549, 0, 0), (0.4, 0, 0), (0.403921568627451, 0, 0),\n (0.407843137254902, 0, 0), (0.411764705882353, 0, 0), (\n 0.415686274509804, 0, 0), (0.419607843137255, 0, 0), (\n 0.423529411764706, 0, 0), (0.427450980392157, 0, 0), (\n 0.431372549019608, 0, 0), (0.435294117647059, 0, 0), (\n 0.43921568627451, 0, 0), (0.443137254901961, 0, 0), (\n 0.447058823529412, 0, 0), (0.450980392156863, 0, 0), (\n 0.454901960784314, 0, 0), (0.458823529411765, 0, 0), (\n 0.462745098039216, 0, 0), (0.466666666666667, 0, 0), (\n 0.470588235294118, 0, 0), (0.474509803921569, 0, 0), (\n 0.47843137254902, 0, 0), (0.482352941176471, 0, 0), (\n 0.486274509803922, 0, 0), (0.490196078431373, 0, 0), (\n 0.494117647058824, 0, 0), (0.498039215686275, 0, 0), (\n 0.501960784313725, 0, 0), (0.505882352941176, 0, 0), (\n 0.509803921568627, 0, 0), (0.513725490196078, 0, 0), (\n 0.517647058823529, 0, 0), (0.52156862745098, 0, 0), (\n 0.525490196078431, 0, 0), (0.529411764705882, 0, 0), (\n 0.533333333333333, 0, 0), (0.537254901960784, 0, 0), (\n 0.541176470588235, 0, 0), (0.545098039215686, 0, 0), (\n 0.549019607843137, 0, 0), (0.552941176470588, 0, 0), (\n 0.556862745098039, 0, 0), (0.56078431372549, 0, 0), (\n 0.564705882352941, 0, 0), (0.568627450980392, 0, 0), (\n 0.572549019607843, 0, 0), (0.576470588235294, 0, 0), (\n 0.580392156862745, 0, 0), (0.584313725490196, 0, 0), (\n 0.588235294117647, 0.004296875, 0.004296875), (0.592156862745098, \n 0.00859375, 0.00859375), (0.596078431372549, 0.012890625, \n 0.012890625), (0.6, 0.0171875, 0.0171875), (0.603921568627451, \n 0.021484375, 0.021484375), (0.607843137254902, 0.02578125, \n 0.02578125), (0.611764705882353, 0.030078125, 0.030078125), (\n 0.615686274509804, 0.034375, 0.034375), (0.619607843137255, \n 0.038671875, 0.038671875), (0.623529411764706, 0.04296875, \n 0.04296875), (0.627450980392157, 0.047265625, 0.047265625), (\n 0.631372549019608, 0.0515625, 0.0515625), (0.635294117647059, \n 0.055859375, 0.055859375), (0.63921568627451, 0.06015625, \n 0.06015625), (0.643137254901961, 0.064453125, 0.064453125), (\n 0.647058823529412, 0.06875, 0.06875), (0.650980392156863, \n 0.073046875, 0.073046875), (0.654901960784314, 0.07734375, \n 0.07734375), (0.658823529411765, 0.081640625, 0.081640625), (\n 0.662745098039216, 0.0859375, 0.0859375), (0.666666666666667, \n 0.090234375, 0.090234375), (0.670588235294118, 0.09453125, \n 0.09453125), (0.674509803921569, 0.098828125, 0.098828125), (\n 0.67843137254902, 0.103125, 0.103125), (0.682352941176471, \n 0.107421875, 0.107421875), (0.686274509803922, 0.11171875, \n 0.11171875), (0.690196078431373, 0.116015625, 0.116015625), (\n 0.694117647058824, 0.1203125, 0.1203125), (0.698039215686274, \n 0.124609375, 0.124609375), (0.701960784313725, 0.12890625, \n 0.12890625), (0.705882352941177, 0.133203125, 0.133203125), (\n 0.709803921568627, 0.1375, 0.1375), (0.713725490196078, 0.141796875,\n 0.141796875), (0.717647058823529, 0.14609375, 0.14609375), (\n 0.72156862745098, 0.150390625, 0.150390625), (0.725490196078431, \n 0.1546875, 0.1546875), (0.729411764705882, 0.158984375, 0.158984375\n ), (0.733333333333333, 0.16328125, 0.16328125), (0.737254901960784,\n 0.167578125, 0.167578125), (0.741176470588235, 0.171875, 0.171875),\n (0.745098039215686, 0.176171875, 0.176171875), (0.749019607843137, \n 0.18046875, 0.18046875), (0.752941176470588, 0.184765625, \n 0.184765625), (0.756862745098039, 0.1890625, 0.1890625), (\n 0.76078431372549, 0.193359375, 0.193359375), (0.764705882352941, \n 0.19765625, 0.19765625), (0.768627450980392, 0.201953125, \n 0.201953125), (0.772549019607843, 0.20625, 0.20625), (\n 0.776470588235294, 0.210546875, 0.210546875), (0.780392156862745, \n 0.21484375, 0.21484375), (0.784313725490196, 0.22663359375, \n 0.22663359375), (0.788235294117647, 0.2384234375, 0.2384234375), (\n 0.792156862745098, 0.250212890625, 0.250212890625), (\n 0.796078431372549, 0.262002734375, 0.262002734375), (0.8, \n 0.273792578125, 0.273792578125), (0.803921568627451, 0.285582421875,\n 0.285582421875), (0.807843137254902, 0.297372265625, 0.297372265625\n ), (0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125))}\n vcdict = {'red': ((0, 1, 1), (0.00392156862745098, 0.54508984375, \n 0.54508984375), (0.00784313725490196, 0.5285703125, 0.5285703125),\n (0.0117647058823529, 0.5120546875, 0.5120546875), (\n 0.0156862745098039, 0.49553515625, 0.49553515625), (\n 0.0196078431372549, 0.47901953125, 0.47901953125), (\n 0.0235294117647059, 0.4625, 0.4625), (0.0274509803921569, \n 0.44598046875, 0.44598046875), (0.0313725490196078, 0.42946484375, \n 0.42946484375), (0.0352941176470588, 0.4129453125, 0.4129453125), (\n 0.0392156862745098, 0.3964296875, 0.3964296875), (\n 0.0431372549019608, 0.379910546875, 0.379910546875), (\n 0.0470588235294118, 0.36339296875, 0.36339296875), (\n 0.0509803921568627, 0.346875, 0.346875), (0.0549019607843137, \n 0.33035703125, 0.33035703125), (0.0588235294117647, 0.313839453125,\n 0.313839453125), (0.0627450980392157, 0.297321484375, \n 0.297321484375), (0.0666666666666667, 0.280803515625, \n 0.280803515625), (0.0705882352941176, 0.2642859375, 0.2642859375),\n (0.0745098039215686, 0.24776796875, 0.24776796875), (\n 0.0784313725490196, 0.23125, 0.23125), (0.0823529411764706, \n 0.21473203125, 0.21473203125), (0.0862745098039216, 0.198214453125,\n 0.198214453125), (0.0901960784313725, 0.181696484375, \n 0.181696484375), (0.0941176470588235, 0.165178515625, \n 0.165178515625), (0.0980392156862745, 0.148660546875, \n 0.148660546875), (0.101960784313725, 0.13214296875, 0.13214296875),\n (0.105882352941176, 0.115625, 0.115625), (0.109803921568627, \n 0.09910703125, 0.09910703125), (0.113725490196078, 0.082589453125, \n 0.082589453125), (0.117647058823529, 0.066071484375, 0.066071484375\n ), (0.12156862745098, 0.049553515625, 0.049553515625), (\n 0.125490196078431, 0.0330357421875, 0.0330357421875), (\n 0.129411764705882, 0.016517890625, 0.016517890625), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0, 0), (0.2, 0, 0), (0.203921568627451, 0, 0), (\n 0.207843137254902, 0, 0), (0.211764705882353, 0, 0), (\n 0.215686274509804, 0, 0), (0.219607843137255, 0, 0), (\n 0.223529411764706, 0, 0), (0.227450980392157, 0, 0), (\n 0.231372549019608, 0, 0), (0.235294117647059, 0, 0), (\n 0.23921568627451, 0, 0), (0.243137254901961, 0, 0), (\n 0.247058823529412, 0, 0), (0.250980392156863, 0, 0), (\n 0.254901960784314, 0, 0), (0.258823529411765, 0, 0), (\n 0.262745098039216, 0, 0), (0.266666666666667, 0, 0), (\n 0.270588235294118, 0, 0), (0.274509803921569, 0, 0), (\n 0.27843137254902, 0, 0), (0.282352941176471, 0, 0), (\n 0.286274509803922, 0, 0), (0.290196078431373, 0, 0), (\n 0.294117647058824, 0, 0), (0.298039215686275, 0, 0), (\n 0.301960784313725, 0, 0), (0.305882352941176, 0, 0), (\n 0.309803921568627, 0, 0), (0.313725490196078, 0, 0), (\n 0.317647058823529, 0, 0), (0.32156862745098, 0, 0), (\n 0.325490196078431, 0, 0), (0.329411764705882, 0, 0), (\n 0.333333333333333, 0, 0), (0.337254901960784, 0, 0), (\n 0.341176470588235, 0, 0), (0.345098039215686, 0, 0), (\n 0.349019607843137, 0, 0), (0.352941176470588, 0.0061383984375, \n 0.0061383984375), (0.356862745098039, 0.012276796875, \n 0.012276796875), (0.36078431372549, 0.0184151953125, \n 0.0184151953125), (0.364705882352941, 0.0245535546875, \n 0.0245535546875), (0.368627450980392, 0.030691953125, \n 0.030691953125), (0.372549019607843, 0.0368303515625, \n 0.0368303515625), (0.376470588235294, 0.04296875, 0.04296875), (\n 0.380392156862745, 0.04910703125, 0.04910703125), (\n 0.384313725490196, 0.055245703125, 0.055245703125), (\n 0.388235294117647, 0.061383984375, 0.061383984375), (\n 0.392156862745098, 0.067522265625, 0.067522265625), (\n 0.396078431372549, 0.073660546875, 0.073660546875), (0.4, \n 0.07979921875, 0.07979921875), (0.403921568627451, 0.0859375, \n 0.0859375), (0.407843137254902, 0.09207578125, 0.09207578125), (\n 0.411764705882353, 0.098214453125, 0.098214453125), (\n 0.415686274509804, 0.104352734375, 0.104352734375), (\n 0.419607843137255, 0.110491015625, 0.110491015625), (\n 0.423529411764706, 0.116629296875, 0.116629296875), (\n 0.427450980392157, 0.12276796875, 0.12276796875), (\n 0.431372549019608, 0.12890625, 0.12890625), (0.435294117647059, \n 0.13504453125, 0.13504453125), (0.43921568627451, 0.141183203125, \n 0.141183203125), (0.443137254901961, 0.147321484375, 0.147321484375\n ), (0.447058823529412, 0.153459765625, 0.153459765625), (\n 0.450980392156863, 0.159598046875, 0.159598046875), (\n 0.454901960784314, 0.16573671875, 0.16573671875), (\n 0.458823529411765, 0.171875, 0.171875), (0.462745098039216, \n 0.17801328125, 0.17801328125), (0.466666666666667, 0.184151953125, \n 0.184151953125), (0.470588235294118, 0.190290234375, 0.190290234375\n ), (0.474509803921569, 0.196428515625, 0.196428515625), (\n 0.47843137254902, 0.202566796875, 0.202566796875), (\n 0.482352941176471, 0.20870546875, 0.20870546875), (\n 0.486274509803922, 0.21484375, 0.21484375), (0.490196078431373, \n 0.233370703125, 0.233370703125), (0.494117647058824, 0.251897265625,\n 0.251897265625), (0.498039215686275, 0.27042421875, 0.27042421875),\n (0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.86549609375, 0.86549609375), (0.631372549019608, 0.86770703125, \n 0.86770703125), (0.635294117647059, 0.869921875, 0.869921875), (\n 0.63921568627451, 0.87213671875, 0.87213671875), (0.643137254901961,\n 0.87434765625, 0.87434765625), (0.647058823529412, 0.8765625, \n 0.8765625), (0.650980392156863, 0.87877734375, 0.87877734375), (\n 0.654901960784314, 0.88098828125, 0.88098828125), (\n 0.658823529411765, 0.883203125, 0.883203125), (0.662745098039216, \n 0.88541796875, 0.88541796875), (0.666666666666667, 0.88762890625, \n 0.88762890625), (0.670588235294118, 0.88984375, 0.88984375), (\n 0.674509803921569, 0.89205859375, 0.89205859375), (0.67843137254902,\n 0.89426953125, 0.89426953125), (0.682352941176471, 0.896484375, \n 0.896484375), (0.686274509803922, 0.89869921875, 0.89869921875), (\n 0.690196078431373, 0.90091015625, 0.90091015625), (\n 0.694117647058824, 0.903125, 0.903125), (0.698039215686274, \n 0.90533984375, 0.90533984375), (0.701960784313725, 0.90755078125, \n 0.90755078125), (0.705882352941177, 0.909765625, 0.909765625), (\n 0.709803921568627, 0.91198046875, 0.91198046875), (\n 0.713725490196078, 0.91419140625, 0.91419140625), (\n 0.717647058823529, 0.91640625, 0.91640625), (0.72156862745098, \n 0.91862109375, 0.91862109375), (0.725490196078431, 0.92083203125, \n 0.92083203125), (0.729411764705882, 0.923046875, 0.923046875), (\n 0.733333333333333, 0.92526171875, 0.92526171875), (\n 0.737254901960784, 0.92747265625, 0.92747265625), (\n 0.741176470588235, 0.9296875, 0.9296875), (0.745098039215686, \n 0.93190234375, 0.93190234375), (0.749019607843137, 0.93411328125, \n 0.93411328125), (0.752941176470588, 0.936328125, 0.936328125), (\n 0.756862745098039, 0.93854296875, 0.93854296875), (0.76078431372549,\n 0.94075390625, 0.94075390625), (0.764705882352941, 0.94296875, \n 0.94296875), (0.768627450980392, 0.94518359375, 0.94518359375), (\n 0.772549019607843, 0.94739453125, 0.94739453125), (\n 0.776470588235294, 0.949609375, 0.949609375), (0.780392156862745, \n 0.95182421875, 0.95182421875), (0.784313725490196, 0.95403515625, \n 0.95403515625), (0.788235294117647, 0.95625, 0.95625), (\n 0.792156862745098, 0.95846484375, 0.95846484375), (\n 0.796078431372549, 0.96067578125, 0.96067578125), (0.8, 0.962890625,\n 0.962890625), (0.803921568627451, 0.96510546875, 0.96510546875), (\n 0.807843137254902, 0.96731640625, 0.96731640625), (\n 0.811764705882353, 0.96953125, 0.96953125), (0.815686274509804, \n 0.97174609375, 0.97174609375), (0.819607843137255, 0.97395703125, \n 0.97395703125), (0.823529411764706, 0.976171875, 0.976171875), (\n 0.827450980392157, 0.97838671875, 0.97838671875), (\n 0.831372549019608, 0.98059765625, 0.98059765625), (\n 0.835294117647059, 0.9828125, 0.9828125), (0.83921568627451, \n 0.98502734375, 0.98502734375), (0.843137254901961, 0.98723828125, \n 0.98723828125), (0.847058823529412, 0.989453125, 0.989453125), (\n 0.850980392156863, 0.99166796875, 0.99166796875), (\n 0.854901960784314, 0.99387890625, 0.99387890625), (\n 0.858823529411765, 0.99609375, 0.99609375), (0.862745098039216, \n 0.99609375, 0.99609375), (0.866666666666667, 0.99609375, 0.99609375\n ), (0.870588235294118, 0.99609375, 0.99609375), (0.874509803921569,\n 0.99609375, 0.99609375), (0.87843137254902, 0.99609375, 0.99609375),\n (0.882352941176471, 0.99609375, 0.99609375), (0.886274509803922, \n 0.99609375, 0.99609375), (0.890196078431373, 0.99609375, 0.99609375\n ), (0.894117647058824, 0.99609375, 0.99609375), (0.898039215686275,\n 0.99609375, 0.99609375), (0.901960784313726, 0.99609375, 0.99609375\n ), (0.905882352941176, 0.99609375, 0.99609375), (0.909803921568627,\n 0.99609375, 0.99609375), (0.913725490196078, 0.99609375, 0.99609375\n ), (0.917647058823529, 0.99609375, 0.99609375), (0.92156862745098, \n 0.99609375, 0.99609375), (0.925490196078431, 0.99609375, 0.99609375\n ), (0.929411764705882, 0.99609375, 0.99609375), (0.933333333333333,\n 0.99609375, 0.99609375), (0.937254901960784, 0.99609375, 0.99609375\n ), (0.941176470588235, 0.99609375, 0.99609375), (0.945098039215686,\n 0.99609375, 0.99609375), (0.949019607843137, 0.99609375, 0.99609375\n ), (0.952941176470588, 0.99609375, 0.99609375), (0.956862745098039,\n 0.99609375, 0.99609375), (0.96078431372549, 0.99609375, 0.99609375),\n (0.964705882352941, 0.99609375, 0.99609375), (0.968627450980392, \n 0.99609375, 0.99609375), (0.972549019607843, 0.99609375, 0.99609375\n ), (0.976470588235294, 0.99609375, 0.99609375), (0.980392156862745,\n 0.99609375, 0.99609375), (0.984313725490196, 0.99609375, 0.99609375\n ), (0.988235294117647, 0.99609375, 0.99609375), (0.992156862745098,\n 0.99609375, 0.99609375), (0.996078431372549, 0.99609375, 0.99609375\n ), (1, 0.99609375, 0.99609375)), 'green': ((0, 1, 1), (\n 0.00392156862745098, 0, 0), (0.00784313725490196, 0, 0), (\n 0.0117647058823529, 0, 0), (0.0156862745098039, 0, 0), (\n 0.0196078431372549, 0, 0), (0.0235294117647059, 0, 0), (\n 0.0274509803921569, 0, 0), (0.0313725490196078, 0, 0), (\n 0.0352941176470588, 0, 0), (0.0392156862745098, 0, 0), (\n 0.0431372549019608, 0, 0), (0.0470588235294118, 0, 0), (\n 0.0509803921568627, 0, 0), (0.0549019607843137, 0, 0), (\n 0.0588235294117647, 0, 0), (0.0627450980392157, 0, 0), (\n 0.0666666666666667, 0, 0), (0.0705882352941176, 0, 0), (\n 0.0745098039215686, 0, 0), (0.0784313725490196, 0, 0), (\n 0.0823529411764706, 0, 0), (0.0862745098039216, 0, 0), (\n 0.0901960784313725, 0, 0), (0.0941176470588235, 0, 0), (\n 0.0980392156862745, 0, 0), (0.101960784313725, 0, 0), (\n 0.105882352941176, 0, 0), (0.109803921568627, 0, 0), (\n 0.113725490196078, 0, 0), (0.117647058823529, 0, 0), (\n 0.12156862745098, 0, 0), (0.125490196078431, 0, 0), (\n 0.129411764705882, 0, 0), (0.133333333333333, 0, 0), (\n 0.137254901960784, 0.0135653515625, 0.0135653515625), (\n 0.141176470588235, 0.0271306640625, 0.0271306640625), (\n 0.145098039215686, 0.04069609375, 0.04069609375), (\n 0.149019607843137, 0.054261328125, 0.054261328125), (\n 0.152941176470588, 0.0678265625, 0.0678265625), (0.156862745098039,\n 0.0813921875, 0.0813921875), (0.16078431372549, 0.094957421875, \n 0.094957421875), (0.164705882352941, 0.10852265625, 0.10852265625),\n (0.168627450980392, 0.122087890625, 0.122087890625), (\n 0.172549019607843, 0.135653515625, 0.135653515625), (\n 0.176470588235294, 0.14921875, 0.14921875), (0.180392156862745, \n 0.162783984375, 0.162783984375), (0.184313725490196, 0.176349609375,\n 0.176349609375), (0.188235294117647, 0.18991484375, 0.18991484375),\n (0.192156862745098, 0.203480078125, 0.203480078125), (\n 0.196078431372549, 0.2170453125, 0.2170453125), (0.2, 0.2306109375,\n 0.2306109375), (0.203921568627451, 0.244176171875, 0.244176171875),\n (0.207843137254902, 0.25774140625, 0.25774140625), (\n 0.211764705882353, 0.27130703125, 0.27130703125), (\n 0.215686274509804, 0.284872265625, 0.284872265625), (\n 0.219607843137255, 0.2984375, 0.2984375), (0.223529411764706, \n 0.312002734375, 0.312002734375), (0.227450980392157, 0.325568359375,\n 0.325568359375), (0.231372549019608, 0.33913359375, 0.33913359375),\n (0.235294117647059, 0.352698828125, 0.352698828125), (\n 0.23921568627451, 0.3662640625, 0.3662640625), (0.243137254901961, \n 0.3798296875, 0.3798296875), (0.247058823529412, 0.39339453125, \n 0.39339453125), (0.250980392156863, 0.4069609375, 0.4069609375), (\n 0.254901960784314, 0.42052734375, 0.42052734375), (\n 0.258823529411765, 0.43408984375, 0.43408984375), (\n 0.262745098039216, 0.44765625, 0.44765625), (0.266666666666667, \n 0.46122265625, 0.46122265625), (0.270588235294118, 0.47478515625, \n 0.47478515625), (0.274509803921569, 0.4883515625, 0.4883515625), (\n 0.27843137254902, 0.50191796875, 0.50191796875), (0.282352941176471,\n 0.515484375, 0.515484375), (0.286274509803922, 0.529046875, \n 0.529046875), (0.290196078431373, 0.54261328125, 0.54261328125), (\n 0.294117647058824, 0.5561796875, 0.5561796875), (0.298039215686275,\n 0.56974609375, 0.56974609375), (0.301960784313725, 0.58330859375, \n 0.58330859375), (0.305882352941176, 0.596875, 0.596875), (\n 0.309803921568627, 0.61044140625, 0.61044140625), (\n 0.313725490196078, 0.62400390625, 0.62400390625), (\n 0.317647058823529, 0.6375703125, 0.6375703125), (0.32156862745098, \n 0.65113671875, 0.65113671875), (0.325490196078431, 0.664703125, \n 0.664703125), (0.329411764705882, 0.678265625, 0.678265625), (\n 0.333333333333333, 0.69183203125, 0.69183203125), (\n 0.337254901960784, 0.7053984375, 0.7053984375), (0.341176470588235,\n 0.71896484375, 0.71896484375), (0.345098039215686, 0.73252734375, \n 0.73252734375), (0.349019607843137, 0.74609375, 0.74609375), (\n 0.352941176470588, 0.7309140625, 0.7309140625), (0.356862745098039,\n 0.71573828125, 0.71573828125), (0.36078431372549, 0.70055859375, \n 0.70055859375), (0.364705882352941, 0.68537890625, 0.68537890625),\n (0.368627450980392, 0.67019921875, 0.67019921875), (\n 0.372549019607843, 0.6550234375, 0.6550234375), (0.376470588235294,\n 0.63984375, 0.63984375), (0.380392156862745, 0.6246640625, \n 0.6246640625), (0.384313725490196, 0.60948828125, 0.60948828125), (\n 0.388235294117647, 0.59430859375, 0.59430859375), (\n 0.392156862745098, 0.57912890625, 0.57912890625), (\n 0.396078431372549, 0.56394921875, 0.56394921875), (0.4, \n 0.5487734375, 0.5487734375), (0.403921568627451, 0.53359375, \n 0.53359375), (0.407843137254902, 0.5184140625, 0.5184140625), (\n 0.411764705882353, 0.50323828125, 0.50323828125), (\n 0.415686274509804, 0.48805859375, 0.48805859375), (\n 0.419607843137255, 0.47287890625, 0.47287890625), (\n 0.423529411764706, 0.45769921875, 0.45769921875), (\n 0.427450980392157, 0.4425234375, 0.4425234375), (0.431372549019608,\n 0.42734375, 0.42734375), (0.435294117647059, 0.4121640625, \n 0.4121640625), (0.43921568627451, 0.39698828125, 0.39698828125), (\n 0.443137254901961, 0.381808203125, 0.381808203125), (\n 0.447058823529412, 0.366629296875, 0.366629296875), (\n 0.450980392156863, 0.35145078125, 0.35145078125), (\n 0.454901960784314, 0.336272265625, 0.336272265625), (\n 0.458823529411765, 0.32109375, 0.32109375), (0.462745098039216, \n 0.305915234375, 0.305915234375), (0.466666666666667, 0.29073671875,\n 0.29073671875), (0.470588235294118, 0.2755578125, 0.2755578125), (\n 0.474509803921569, 0.260379296875, 0.260379296875), (\n 0.47843137254902, 0.24520078125, 0.24520078125), (0.482352941176471,\n 0.230022265625, 0.230022265625), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.2265625, 0.2265625), (\n 0.494117647058824, 0.23828125, 0.23828125), (0.498039215686275, \n 0.25, 0.25), (0.501960784313725, 0.26171875, 0.26171875), (\n 0.505882352941176, 0.2734375, 0.2734375), (0.509803921568627, \n 0.28515625, 0.28515625), (0.513725490196078, 0.296875, 0.296875), (\n 0.517647058823529, 0.30859375, 0.30859375), (0.52156862745098, \n 0.3203125, 0.3203125), (0.525490196078431, 0.33203125, 0.33203125),\n (0.529411764705882, 0.34375, 0.34375), (0.533333333333333, \n 0.35546875, 0.35546875), (0.537254901960784, 0.3671875, 0.3671875),\n (0.541176470588235, 0.37890625, 0.37890625), (0.545098039215686, \n 0.390625, 0.390625), (0.549019607843137, 0.40234375, 0.40234375), (\n 0.552941176470588, 0.4140625, 0.4140625), (0.556862745098039, \n 0.42578125, 0.42578125), (0.56078431372549, 0.4375, 0.4375), (\n 0.564705882352941, 0.44921875, 0.44921875), (0.568627450980392, \n 0.4609375, 0.4609375), (0.572549019607843, 0.47265625, 0.47265625),\n (0.576470588235294, 0.484375, 0.484375), (0.580392156862745, \n 0.49609375, 0.49609375), (0.584313725490196, 0.5078125, 0.5078125),\n (0.588235294117647, 0.51953125, 0.51953125), (0.592156862745098, \n 0.53125, 0.53125), (0.596078431372549, 0.54296875, 0.54296875), (\n 0.6, 0.5546875, 0.5546875), (0.603921568627451, 0.56640625, \n 0.56640625), (0.607843137254902, 0.578125, 0.578125), (\n 0.611764705882353, 0.58984375, 0.58984375), (0.615686274509804, \n 0.6015625, 0.6015625), (0.619607843137255, 0.61328125, 0.61328125),\n (0.623529411764706, 0.625, 0.625), (0.627450980392157, \n 0.61458203125, 0.61458203125), (0.631372549019608, 0.60416796875, \n 0.60416796875), (0.635294117647059, 0.59375, 0.59375), (\n 0.63921568627451, 0.58333203125, 0.58333203125), (0.643137254901961,\n 0.57291796875, 0.57291796875), (0.647058823529412, 0.5625, 0.5625),\n (0.650980392156863, 0.55208203125, 0.55208203125), (\n 0.654901960784314, 0.54166796875, 0.54166796875), (\n 0.658823529411765, 0.53125, 0.53125), (0.662745098039216, \n 0.52083203125, 0.52083203125), (0.666666666666667, 0.51041796875, \n 0.51041796875), (0.670588235294118, 0.5, 0.5), (0.674509803921569, \n 0.48958203125, 0.48958203125), (0.67843137254902, 0.47916796875, \n 0.47916796875), (0.682352941176471, 0.46875, 0.46875), (\n 0.686274509803922, 0.45833203125, 0.45833203125), (\n 0.690196078431373, 0.44791796875, 0.44791796875), (\n 0.694117647058824, 0.4375, 0.4375), (0.698039215686274, \n 0.42708203125, 0.42708203125), (0.701960784313725, 0.41666796875, \n 0.41666796875), (0.705882352941177, 0.40625, 0.40625), (\n 0.709803921568627, 0.39583203125, 0.39583203125), (\n 0.713725490196078, 0.385416796875, 0.385416796875), (\n 0.717647058823529, 0.375, 0.375), (0.72156862745098, 0.364583203125,\n 0.364583203125), (0.725490196078431, 0.354166796875, 0.354166796875\n ), (0.729411764705882, 0.34375, 0.34375), (0.733333333333333, \n 0.333333203125, 0.333333203125), (0.737254901960784, 0.322916796875,\n 0.322916796875), (0.741176470588235, 0.3125, 0.3125), (\n 0.745098039215686, 0.302083203125, 0.302083203125), (\n 0.749019607843137, 0.291666796875, 0.291666796875), (\n 0.752941176470588, 0.28125, 0.28125), (0.756862745098039, \n 0.270833203125, 0.270833203125), (0.76078431372549, 0.260416796875,\n 0.260416796875), (0.764705882352941, 0.25, 0.25), (\n 0.768627450980392, 0.239583203125, 0.239583203125), (\n 0.772549019607843, 0.229166796875, 0.229166796875), (\n 0.776470588235294, 0.21875, 0.21875), (0.780392156862745, \n 0.208333203125, 0.208333203125), (0.784313725490196, 0.197916796875,\n 0.197916796875), (0.788235294117647, 0.1875, 0.1875), (\n 0.792156862745098, 0.177083203125, 0.177083203125), (\n 0.796078431372549, 0.166666796875, 0.166666796875), (0.8, 0.15625, \n 0.15625), (0.803921568627451, 0.145833203125, 0.145833203125), (\n 0.807843137254902, 0.135416796875, 0.135416796875), (\n 0.811764705882353, 0.125, 0.125), (0.815686274509804, \n 0.114583203125, 0.114583203125), (0.819607843137255, 0.104166796875,\n 0.104166796875), (0.823529411764706, 0.09375, 0.09375), (\n 0.827450980392157, 0.083333203125, 0.083333203125), (\n 0.831372549019608, 0.072916796875, 0.072916796875), (\n 0.835294117647059, 0.0625, 0.0625), (0.83921568627451, \n 0.052083203125, 0.052083203125), (0.843137254901961, 0.041666796875,\n 0.041666796875), (0.847058823529412, 0.03125, 0.03125), (\n 0.850980392156863, 0.0208333203125, 0.0208333203125), (\n 0.854901960784314, 0.0104166796875, 0.0104166796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0.0184151953125, \n 0.0184151953125), (0.866666666666667, 0.0368303515625, \n 0.0368303515625), (0.870588235294118, 0.055245703125, \n 0.055245703125), (0.874509803921569, 0.073660546875, 0.073660546875\n ), (0.87843137254902, 0.09207578125, 0.09207578125), (\n 0.882352941176471, 0.110491015625, 0.110491015625), (\n 0.886274509803922, 0.12890625, 0.12890625), (0.890196078431373, \n 0.147321484375, 0.147321484375), (0.894117647058824, 0.16573671875,\n 0.16573671875), (0.898039215686275, 0.184151953125, 0.184151953125),\n (0.901960784313726, 0.202566796875, 0.202566796875), (\n 0.905882352941176, 0.22098203125, 0.22098203125), (\n 0.909803921568627, 0.239397265625, 0.239397265625), (\n 0.913725490196078, 0.2578125, 0.2578125), (0.917647058823529, \n 0.276227734375, 0.276227734375), (0.92156862745098, 0.29464296875, \n 0.29464296875), (0.925490196078431, 0.313058203125, 0.313058203125),\n (0.929411764705882, 0.331473046875, 0.331473046875), (\n 0.933333333333333, 0.34988828125, 0.34988828125), (\n 0.937254901960784, 0.368303515625, 0.368303515625), (\n 0.941176470588235, 0.38671875, 0.38671875), (0.945098039215686, \n 0.4051328125, 0.4051328125), (0.949019607843137, 0.42355078125, \n 0.42355078125), (0.952941176470588, 0.44196484375, 0.44196484375),\n (0.956862745098039, 0.46037890625, 0.46037890625), (\n 0.96078431372549, 0.47879296875, 0.47879296875), (0.964705882352941,\n 0.4972109375, 0.4972109375), (0.968627450980392, 0.515625, 0.515625\n ), (0.972549019607843, 0.5340390625, 0.5340390625), (\n 0.976470588235294, 0.55245703125, 0.55245703125), (\n 0.980392156862745, 0.57087109375, 0.57087109375), (\n 0.984313725490196, 0.58928515625, 0.58928515625), (\n 0.988235294117647, 0.60769921875, 0.60769921875), (\n 0.992156862745098, 0.6261171875, 0.6261171875), (0.996078431372549,\n 0.64453125, 0.64453125), (1, 0.64453125, 0.64453125)), 'blue': ((0,\n 1, 1), (0.00392156862745098, 0.80569140625, 0.80569140625), (\n 0.00784313725490196, 0.7964296875, 0.7964296875), (\n 0.0117647058823529, 0.7871640625, 0.7871640625), (\n 0.0156862745098039, 0.77790234375, 0.77790234375), (\n 0.0196078431372549, 0.76863671875, 0.76863671875), (\n 0.0235294117647059, 0.759375, 0.759375), (0.0274509803921569, \n 0.75011328125, 0.75011328125), (0.0313725490196078, 0.74084765625, \n 0.74084765625), (0.0352941176470588, 0.7315859375, 0.7315859375), (\n 0.0392156862745098, 0.7223203125, 0.7223203125), (\n 0.0431372549019608, 0.71305859375, 0.71305859375), (\n 0.0470588235294118, 0.70379296875, 0.70379296875), (\n 0.0509803921568627, 0.69453125, 0.69453125), (0.0549019607843137, \n 0.68526953125, 0.68526953125), (0.0588235294117647, 0.67600390625, \n 0.67600390625), (0.0627450980392157, 0.6667421875, 0.6667421875), (\n 0.0666666666666667, 0.6574765625, 0.6574765625), (\n 0.0705882352941176, 0.64821484375, 0.64821484375), (\n 0.0745098039215686, 0.63894921875, 0.63894921875), (\n 0.0784313725490196, 0.6296875, 0.6296875), (0.0823529411764706, \n 0.62042578125, 0.62042578125), (0.0862745098039216, 0.61116015625, \n 0.61116015625), (0.0901960784313725, 0.6018984375, 0.6018984375), (\n 0.0941176470588235, 0.5926328125, 0.5926328125), (\n 0.0980392156862745, 0.58337109375, 0.58337109375), (\n 0.101960784313725, 0.57410546875, 0.57410546875), (\n 0.105882352941176, 0.56484375, 0.56484375), (0.109803921568627, \n 0.55558203125, 0.55558203125), (0.113725490196078, 0.54631640625, \n 0.54631640625), (0.117647058823529, 0.5370546875, 0.5370546875), (\n 0.12156862745098, 0.5277890625, 0.5277890625), (0.125490196078431, \n 0.51852734375, 0.51852734375), (0.129411764705882, 0.50926171875, \n 0.50926171875), (0.133333333333333, 0.5, 0.5), (0.137254901960784, \n 0.50901953125, 0.50901953125), (0.141176470588235, 0.5180390625, \n 0.5180390625), (0.145098039215686, 0.52705859375, 0.52705859375), (\n 0.149019607843137, 0.536078125, 0.536078125), (0.152941176470588, \n 0.54509765625, 0.54509765625), (0.156862745098039, 0.55412109375, \n 0.55412109375), (0.16078431372549, 0.563140625, 0.563140625), (\n 0.164705882352941, 0.57216015625, 0.57216015625), (\n 0.168627450980392, 0.5811796875, 0.5811796875), (0.172549019607843,\n 0.59019921875, 0.59019921875), (0.176470588235294, 0.59921875, \n 0.59921875), (0.180392156862745, 0.60823828125, 0.60823828125), (\n 0.184313725490196, 0.6172578125, 0.6172578125), (0.188235294117647,\n 0.62627734375, 0.62627734375), (0.192156862745098, 0.635296875, \n 0.635296875), (0.196078431372549, 0.64431640625, 0.64431640625), (\n 0.2, 0.65333984375, 0.65333984375), (0.203921568627451, 0.662359375,\n 0.662359375), (0.207843137254902, 0.67137890625, 0.67137890625), (\n 0.211764705882353, 0.6803984375, 0.6803984375), (0.215686274509804,\n 0.68941796875, 0.68941796875), (0.219607843137255, 0.6984375, \n 0.6984375), (0.223529411764706, 0.70745703125, 0.70745703125), (\n 0.227450980392157, 0.7164765625, 0.7164765625), (0.231372549019608,\n 0.72549609375, 0.72549609375), (0.235294117647059, 0.734515625, \n 0.734515625), (0.23921568627451, 0.74353515625, 0.74353515625), (\n 0.243137254901961, 0.75255859375, 0.75255859375), (\n 0.247058823529412, 0.761578125, 0.761578125), (0.250980392156863, \n 0.77059765625, 0.77059765625), (0.254901960784314, 0.7796171875, \n 0.7796171875), (0.258823529411765, 0.78863671875, 0.78863671875), (\n 0.262745098039216, 0.79765625, 0.79765625), (0.266666666666667, \n 0.80667578125, 0.80667578125), (0.270588235294118, 0.8156953125, \n 0.8156953125), (0.274509803921569, 0.82471484375, 0.82471484375), (\n 0.27843137254902, 0.833734375, 0.833734375), (0.282352941176471, \n 0.84275390625, 0.84275390625), (0.286274509803922, 0.85177734375, \n 0.85177734375), (0.290196078431373, 0.860796875, 0.860796875), (\n 0.294117647058824, 0.86981640625, 0.86981640625), (\n 0.298039215686275, 0.8788359375, 0.8788359375), (0.301960784313725,\n 0.88785546875, 0.88785546875), (0.305882352941176, 0.896875, \n 0.896875), (0.309803921568627, 0.90589453125, 0.90589453125), (\n 0.313725490196078, 0.9149140625, 0.9149140625), (0.317647058823529,\n 0.92393359375, 0.92393359375), (0.32156862745098, 0.932953125, \n 0.932953125), (0.325490196078431, 0.94197265625, 0.94197265625), (\n 0.329411764705882, 0.95099609375, 0.95099609375), (\n 0.333333333333333, 0.960015625, 0.960015625), (0.337254901960784, \n 0.96903515625, 0.96903515625), (0.341176470588235, 0.9780546875, \n 0.9780546875), (0.345098039215686, 0.98707421875, 0.98707421875), (\n 0.349019607843137, 0.99609375, 0.99609375), (0.352941176470588, \n 0.9737734375, 0.9737734375), (0.356862745098039, 0.95144921875, \n 0.95144921875), (0.36078431372549, 0.92912890625, 0.92912890625), (\n 0.364705882352941, 0.90680859375, 0.90680859375), (\n 0.368627450980392, 0.88448828125, 0.88448828125), (\n 0.372549019607843, 0.8621640625, 0.8621640625), (0.376470588235294,\n 0.83984375, 0.83984375), (0.380392156862745, 0.8175234375, \n 0.8175234375), (0.384313725490196, 0.79519921875, 0.79519921875), (\n 0.388235294117647, 0.77287890625, 0.77287890625), (\n 0.392156862745098, 0.75055859375, 0.75055859375), (\n 0.396078431372549, 0.72823828125, 0.72823828125), (0.4, \n 0.7059140625, 0.7059140625), (0.403921568627451, 0.68359375, \n 0.68359375), (0.407843137254902, 0.6612734375, 0.6612734375), (\n 0.411764705882353, 0.63894921875, 0.63894921875), (\n 0.415686274509804, 0.61662890625, 0.61662890625), (\n 0.419607843137255, 0.59430859375, 0.59430859375), (\n 0.423529411764706, 0.57198828125, 0.57198828125), (\n 0.427450980392157, 0.5496640625, 0.5496640625), (0.431372549019608,\n 0.52734375, 0.52734375), (0.435294117647059, 0.5050234375, \n 0.5050234375), (0.43921568627451, 0.48269921875, 0.48269921875), (\n 0.443137254901961, 0.46037890625, 0.46037890625), (\n 0.447058823529412, 0.43805859375, 0.43805859375), (\n 0.450980392156863, 0.41573828125, 0.41573828125), (\n 0.454901960784314, 0.3934140625, 0.3934140625), (0.458823529411765,\n 0.37109375, 0.37109375), (0.462745098039216, 0.348772265625, \n 0.348772265625), (0.466666666666667, 0.32645078125, 0.32645078125),\n (0.470588235294118, 0.304129296875, 0.304129296875), (\n 0.474509803921569, 0.281808203125, 0.281808203125), (\n 0.47843137254902, 0.25948671875, 0.25948671875), (0.482352941176471,\n 0.237165234375, 0.237165234375), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.233370703125, 0.233370703125), (\n 0.494117647058824, 0.251897265625, 0.251897265625), (\n 0.498039215686275, 0.27042421875, 0.27042421875), (\n 0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.84889453125, 0.84889453125), (0.631372549019608, 0.83450390625, \n 0.83450390625), (0.635294117647059, 0.8201171875, 0.8201171875), (\n 0.63921568627451, 0.80573046875, 0.80573046875), (0.643137254901961,\n 0.79133984375, 0.79133984375), (0.647058823529412, 0.776953125, \n 0.776953125), (0.650980392156863, 0.76256640625, 0.76256640625), (\n 0.654901960784314, 0.74817578125, 0.74817578125), (\n 0.658823529411765, 0.7337890625, 0.7337890625), (0.662745098039216,\n 0.71940234375, 0.71940234375), (0.666666666666667, 0.70501171875, \n 0.70501171875), (0.670588235294118, 0.690625, 0.690625), (\n 0.674509803921569, 0.67623828125, 0.67623828125), (0.67843137254902,\n 0.66184765625, 0.66184765625), (0.682352941176471, 0.6474609375, \n 0.6474609375), (0.686274509803922, 0.63307421875, 0.63307421875), (\n 0.690196078431373, 0.61868359375, 0.61868359375), (\n 0.694117647058824, 0.604296875, 0.604296875), (0.698039215686274, \n 0.58991015625, 0.58991015625), (0.701960784313725, 0.57551953125, \n 0.57551953125), (0.705882352941177, 0.5611328125, 0.5611328125), (\n 0.709803921568627, 0.54674609375, 0.54674609375), (\n 0.713725490196078, 0.53235546875, 0.53235546875), (\n 0.717647058823529, 0.51796875, 0.51796875), (0.72156862745098, \n 0.50358203125, 0.50358203125), (0.725490196078431, 0.48919140625, \n 0.48919140625), (0.729411764705882, 0.4748046875, 0.4748046875), (\n 0.733333333333333, 0.46041796875, 0.46041796875), (\n 0.737254901960784, 0.44602734375, 0.44602734375), (\n 0.741176470588235, 0.431640625, 0.431640625), (0.745098039215686, \n 0.41725390625, 0.41725390625), (0.749019607843137, 0.40286328125, \n 0.40286328125), (0.752941176470588, 0.3884765625, 0.3884765625), (\n 0.756862745098039, 0.374088671875, 0.374088671875), (\n 0.76078431372549, 0.359700390625, 0.359700390625), (\n 0.764705882352941, 0.3453125, 0.3453125), (0.768627450980392, \n 0.330924609375, 0.330924609375), (0.772549019607843, 0.316536328125,\n 0.316536328125), (0.776470588235294, 0.3021484375, 0.3021484375), (\n 0.780392156862745, 0.287760546875, 0.287760546875), (\n 0.784313725490196, 0.273372265625, 0.273372265625), (\n 0.788235294117647, 0.258984375, 0.258984375), (0.792156862745098, \n 0.244596484375, 0.244596484375), (0.796078431372549, 0.230208203125,\n 0.230208203125), (0.8, 0.2158203125, 0.2158203125), (\n 0.803921568627451, 0.201432421875, 0.201432421875), (\n 0.807843137254902, 0.187044140625, 0.187044140625), (\n 0.811764705882353, 0.17265625, 0.17265625), (0.815686274509804, \n 0.158268359375, 0.158268359375), (0.819607843137255, 0.143880078125,\n 0.143880078125), (0.823529411764706, 0.1294921875, 0.1294921875), (\n 0.827450980392157, 0.115104296875, 0.115104296875), (\n 0.831372549019608, 0.100716015625, 0.100716015625), (\n 0.835294117647059, 0.086328125, 0.086328125), (0.83921568627451, \n 0.071940234375, 0.071940234375), (0.843137254901961, 0.057551953125,\n 0.057551953125), (0.847058823529412, 0.0431640625, 0.0431640625), (\n 0.850980392156863, 0.028776015625, 0.028776015625), (\n 0.854901960784314, 0.01438796875, 0.01438796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0, 0), (\n 0.866666666666667, 0, 0), (0.870588235294118, 0, 0), (\n 0.874509803921569, 0, 0), (0.87843137254902, 0, 0), (\n 0.882352941176471, 0, 0), (0.886274509803922, 0, 0), (\n 0.890196078431373, 0, 0), (0.894117647058824, 0, 0), (\n 0.898039215686275, 0, 0), (0.901960784313726, 0, 0), (\n 0.905882352941176, 0, 0), (0.909803921568627, 0, 0), (\n 0.913725490196078, 0, 0), (0.917647058823529, 0, 0), (\n 0.92156862745098, 0, 0), (0.925490196078431, 0, 0), (\n 0.929411764705882, 0, 0), (0.933333333333333, 0, 0), (\n 0.937254901960784, 0, 0), (0.941176470588235, 0, 0), (\n 0.945098039215686, 0, 0), (0.949019607843137, 0, 0), (\n 0.952941176470588, 0, 0), (0.956862745098039, 0, 0), (\n 0.96078431372549, 0, 0), (0.964705882352941, 0, 0), (\n 0.968627450980392, 0, 0), (0.972549019607843, 0, 0), (\n 0.976470588235294, 0, 0), (0.980392156862745, 0, 0), (\n 0.984313725490196, 0, 0), (0.988235294117647, 0, 0), (\n 0.992156862745098, 0, 0), (0.996078431372549, 0, 0), (1, 0, 0))}\n califa = mcol.LinearSegmentedColormap('califa', cdict)\n vcalifa = mcol.LinearSegmentedColormap('vcalifa', vcdict)\n if option == 'v':\n return vcalifa\n else:\n return califa\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\ndef O3O1_line_c(x):\n val = 0.05612915 / (x + 0.39641533) + 0.60969495\n return val\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.00302, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\ndef color_map_califa(option='v'):\n cdict = {'red': ((0.0, 0, 0), (0.00392156862745098, 0, 0), (\n 0.00784313725490196, 0, 0), (0.0117647058823529, 0, 0), (\n 0.0156862745098039, 0, 0), (0.0196078431372549, 0, 0), (\n 0.0235294117647059, 0, 0), (0.0274509803921569, 0, 0), (\n 0.0313725490196078, 0, 0), (0.0352941176470588, 0, 0), (\n 0.0392156862745098, 0, 0), (0.0431372549019608, 0, 0), (\n 0.0470588235294118, 0, 0), (0.0509803921568627, 0, 0), (\n 0.0549019607843137, 0, 0), (0.0588235294117647, 0, 0), (\n 0.0627450980392157, 0, 0), (0.0666666666666667, 0, 0), (\n 0.0705882352941176, 0, 0), (0.0745098039215686, 0, 0), (\n 0.0784313725490196, 0, 0), (0.0823529411764706, 0, 0), (\n 0.0862745098039216, 0, 0), (0.0901960784313725, 0, 0), (\n 0.0941176470588235, 0, 0), (0.0980392156862745, 0, 0), (\n 0.101960784313725, 0, 0), (0.105882352941176, 0, 0), (\n 0.109803921568627, 0, 0), (0.113725490196078, 0, 0), (\n 0.117647058823529, 0, 0), (0.12156862745098, 0, 0), (\n 0.125490196078431, 0, 0), (0.129411764705882, 0, 0), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0.019921875, 0.019921875), (0.2, 0.03984375, \n 0.03984375), (0.203921568627451, 0.059765625, 0.059765625), (\n 0.207843137254902, 0.0796875, 0.0796875), (0.211764705882353, \n 0.099609375, 0.099609375), (0.215686274509804, 0.11953125, \n 0.11953125), (0.219607843137255, 0.139453125, 0.139453125), (\n 0.223529411764706, 0.159375, 0.159375), (0.227450980392157, \n 0.179296875, 0.179296875), (0.231372549019608, 0.19921875, \n 0.19921875), (0.235294117647059, 0.219140625, 0.219140625), (\n 0.23921568627451, 0.2390625, 0.2390625), (0.243137254901961, \n 0.258984375, 0.258984375), (0.247058823529412, 0.27890625, \n 0.27890625), (0.250980392156863, 0.298828125, 0.298828125), (\n 0.254901960784314, 0.31875, 0.31875), (0.258823529411765, \n 0.338671875, 0.338671875), (0.262745098039216, 0.35859375, \n 0.35859375), (0.266666666666667, 0.378515625, 0.378515625), (\n 0.270588235294118, 0.3984375, 0.3984375), (0.274509803921569, \n 0.418359375, 0.418359375), (0.27843137254902, 0.43828125, \n 0.43828125), (0.282352941176471, 0.458203125, 0.458203125), (\n 0.286274509803922, 0.478125, 0.478125), (0.290196078431373, \n 0.498046875, 0.498046875), (0.294117647058824, 0.51796875, \n 0.51796875), (0.298039215686275, 0.537890625, 0.537890625), (\n 0.301960784313725, 0.5578125, 0.5578125), (0.305882352941176, \n 0.577734375, 0.577734375), (0.309803921568627, 0.59765625, \n 0.59765625), (0.313725490196078, 0.617578125, 0.617578125), (\n 0.317647058823529, 0.6375, 0.6375), (0.32156862745098, 0.657421875,\n 0.657421875), (0.325490196078431, 0.67734375, 0.67734375), (\n 0.329411764705882, 0.697265625, 0.697265625), (0.333333333333333, \n 0.7171875, 0.7171875), (0.337254901960784, 0.737109375, 0.737109375\n ), (0.341176470588235, 0.75703125, 0.75703125), (0.345098039215686,\n 0.776953125, 0.776953125), (0.349019607843137, 0.796875, 0.796875),\n (0.352941176470588, 0.816796875, 0.816796875), (0.356862745098039, \n 0.83671875, 0.83671875), (0.36078431372549, 0.856640625, \n 0.856640625), (0.364705882352941, 0.8765625, 0.8765625), (\n 0.368627450980392, 0.896484375, 0.896484375), (0.372549019607843, \n 0.91640625, 0.91640625), (0.376470588235294, 0.936328125, \n 0.936328125), (0.380392156862745, 0.95625, 0.95625), (\n 0.384313725490196, 0.976171875, 0.976171875), (0.388235294117647, \n 0.99609375, 0.99609375), (0.392156862745098, 0.99609375, 0.99609375\n ), (0.396078431372549, 0.99609375, 0.99609375), (0.4, 0.99609375, \n 0.99609375), (0.403921568627451, 0.99609375, 0.99609375), (\n 0.407843137254902, 0.99609375, 0.99609375), (0.411764705882353, \n 0.99609375, 0.99609375), (0.415686274509804, 0.99609375, 0.99609375\n ), (0.419607843137255, 0.99609375, 0.99609375), (0.423529411764706,\n 0.99609375, 0.99609375), (0.427450980392157, 0.99609375, 0.99609375\n ), (0.431372549019608, 0.99609375, 0.99609375), (0.435294117647059,\n 0.99609375, 0.99609375), (0.43921568627451, 0.99609375, 0.99609375),\n (0.443137254901961, 0.99609375, 0.99609375), (0.447058823529412, \n 0.99609375, 0.99609375), (0.450980392156863, 0.99609375, 0.99609375\n ), (0.454901960784314, 0.99609375, 0.99609375), (0.458823529411765,\n 0.99609375, 0.99609375), (0.462745098039216, 0.99609375, 0.99609375\n ), (0.466666666666667, 0.99609375, 0.99609375), (0.470588235294118,\n 0.99609375, 0.99609375), (0.474509803921569, 0.99609375, 0.99609375\n ), (0.47843137254902, 0.99609375, 0.99609375), (0.482352941176471, \n 0.99609375, 0.99609375), (0.486274509803922, 0.99609375, 0.99609375\n ), (0.490196078431373, 0.99609375, 0.99609375), (0.494117647058824,\n 0.99609375, 0.99609375), (0.498039215686275, 0.99609375, 0.99609375\n ), (0.501960784313725, 0.99609375, 0.99609375), (0.505882352941176,\n 0.99609375, 0.99609375), (0.509803921568627, 0.99609375, 0.99609375\n ), (0.513725490196078, 0.99609375, 0.99609375), (0.517647058823529,\n 0.99609375, 0.99609375), (0.52156862745098, 0.99609375, 0.99609375),\n (0.525490196078431, 0.99609375, 0.99609375), (0.529411764705882, \n 0.99609375, 0.99609375), (0.533333333333333, 0.99609375, 0.99609375\n ), (0.537254901960784, 0.99609375, 0.99609375), (0.541176470588235,\n 0.99609375, 0.99609375), (0.545098039215686, 0.99609375, 0.99609375\n ), (0.549019607843137, 0.99609375, 0.99609375), (0.552941176470588,\n 0.99609375, 0.99609375), (0.556862745098039, 0.99609375, 0.99609375\n ), (0.56078431372549, 0.99609375, 0.99609375), (0.564705882352941, \n 0.99609375, 0.99609375), (0.568627450980392, 0.99609375, 0.99609375\n ), (0.572549019607843, 0.99609375, 0.99609375), (0.576470588235294,\n 0.99609375, 0.99609375), (0.580392156862745, 0.99609375, 0.99609375\n ), (0.584313725490196, 0.99609375, 0.99609375), (0.588235294117647,\n 0.98046875, 0.98046875), (0.592156862745098, 0.96484375, 0.96484375\n ), (0.596078431372549, 0.94921875, 0.94921875), (0.6, 0.93359375, \n 0.93359375), (0.603921568627451, 0.91796875, 0.91796875), (\n 0.607843137254902, 0.90234375, 0.90234375), (0.611764705882353, \n 0.88671875, 0.88671875), (0.615686274509804, 0.87109375, 0.87109375\n ), (0.619607843137255, 0.85546875, 0.85546875), (0.623529411764706,\n 0.83984375, 0.83984375), (0.627450980392157, 0.82421875, 0.82421875\n ), (0.631372549019608, 0.80859375, 0.80859375), (0.635294117647059,\n 0.79296875, 0.79296875), (0.63921568627451, 0.77734375, 0.77734375),\n (0.643137254901961, 0.76171875, 0.76171875), (0.647058823529412, \n 0.74609375, 0.74609375), (0.650980392156863, 0.73046875, 0.73046875\n ), (0.654901960784314, 0.71484375, 0.71484375), (0.658823529411765,\n 0.69921875, 0.69921875), (0.662745098039216, 0.68359375, 0.68359375\n ), (0.666666666666667, 0.66796875, 0.66796875), (0.670588235294118,\n 0.65234375, 0.65234375), (0.674509803921569, 0.63671875, 0.63671875\n ), (0.67843137254902, 0.62109375, 0.62109375), (0.682352941176471, \n 0.60546875, 0.60546875), (0.686274509803922, 0.58984375, 0.58984375\n ), (0.690196078431373, 0.57421875, 0.57421875), (0.694117647058824,\n 0.55859375, 0.55859375), (0.698039215686274, 0.54296875, 0.54296875\n ), (0.701960784313725, 0.52734375, 0.52734375), (0.705882352941177,\n 0.51171875, 0.51171875), (0.709803921568627, 0.49609375, 0.49609375\n ), (0.713725490196078, 0.48046875, 0.48046875), (0.717647058823529,\n 0.46484375, 0.46484375), (0.72156862745098, 0.44921875, 0.44921875),\n (0.725490196078431, 0.43359375, 0.43359375), (0.729411764705882, \n 0.41796875, 0.41796875), (0.733333333333333, 0.40234375, 0.40234375\n ), (0.737254901960784, 0.38671875, 0.38671875), (0.741176470588235,\n 0.37109375, 0.37109375), (0.745098039215686, 0.35546875, 0.35546875\n ), (0.749019607843137, 0.33984375, 0.33984375), (0.752941176470588,\n 0.32421875, 0.32421875), (0.756862745098039, 0.30859375, 0.30859375\n ), (0.76078431372549, 0.29296875, 0.29296875), (0.764705882352941, \n 0.27734375, 0.27734375), (0.768627450980392, 0.26171875, 0.26171875\n ), (0.772549019607843, 0.24609375, 0.24609375), (0.776470588235294,\n 0.23046875, 0.23046875), (0.780392156862745, 0.21484375, 0.21484375\n ), (0.784313725490196, 0.22663359375, 0.22663359375), (\n 0.788235294117647, 0.2384234375, 0.2384234375), (0.792156862745098,\n 0.250212890625, 0.250212890625), (0.796078431372549, 0.262002734375,\n 0.262002734375), (0.8, 0.273792578125, 0.273792578125), (\n 0.803921568627451, 0.285582421875, 0.285582421875), (\n 0.807843137254902, 0.297372265625, 0.297372265625), (\n 0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125)), 'green': ((0.0, \n 0.02984375, 0.02984375), (0.00392156862745098, 0.02984375, \n 0.02984375), (0.00784313725490196, 0.044765625, 0.044765625), (\n 0.0117647058823529, 0.0596875, 0.0596875), (0.0156862745098039, \n 0.074609375, 0.074609375), (0.0196078431372549, 0.08953125, \n 0.08953125), (0.0235294117647059, 0.104453125, 0.104453125), (\n 0.0274509803921569, 0.119375, 0.119375), (0.0313725490196078, \n 0.134296875, 0.134296875), (0.0352941176470588, 0.14921875, \n 0.14921875), (0.0392156862745098, 0.164140625, 0.164140625), (\n 0.0431372549019608, 0.1790625, 0.1790625), (0.0470588235294118, \n 0.193984375, 0.193984375), (0.0509803921568627, 0.20890625, \n 0.20890625), (0.0549019607843137, 0.223828125, 0.223828125), (\n 0.0588235294117647, 0.23875, 0.23875), (0.0627450980392157, \n 0.253671875, 0.253671875), (0.0666666666666667, 0.26859375, \n 0.26859375), (0.0705882352941176, 0.283515625, 0.283515625), (\n 0.0745098039215686, 0.2984375, 0.2984375), (0.0784313725490196, \n 0.313359375, 0.313359375), (0.0823529411764706, 0.32828125, \n 0.32828125), (0.0862745098039216, 0.343203125, 0.343203125), (\n 0.0901960784313725, 0.358125, 0.358125), (0.0941176470588235, \n 0.373046875, 0.373046875), (0.0980392156862745, 0.38796875, \n 0.38796875), (0.101960784313725, 0.402890625, 0.402890625), (\n 0.105882352941176, 0.4178125, 0.4178125), (0.109803921568627, \n 0.432734375, 0.432734375), (0.113725490196078, 0.44765625, \n 0.44765625), (0.117647058823529, 0.462578125, 0.462578125), (\n 0.12156862745098, 0.4775, 0.4775), (0.125490196078431, 0.492421875,\n 0.492421875), (0.129411764705882, 0.50734375, 0.50734375), (\n 0.133333333333333, 0.522265625, 0.522265625), (0.137254901960784, \n 0.5371875, 0.5371875), (0.141176470588235, 0.552109375, 0.552109375\n ), (0.145098039215686, 0.56703125, 0.56703125), (0.149019607843137,\n 0.581953125, 0.581953125), (0.152941176470588, 0.596875, 0.596875),\n (0.156862745098039, 0.611796875, 0.611796875), (0.16078431372549, \n 0.62671875, 0.62671875), (0.164705882352941, 0.641640625, \n 0.641640625), (0.168627450980392, 0.6565625, 0.6565625), (\n 0.172549019607843, 0.671484375, 0.671484375), (0.176470588235294, \n 0.68640625, 0.68640625), (0.180392156862745, 0.701328125, \n 0.701328125), (0.184313725490196, 0.71625, 0.71625), (\n 0.188235294117647, 0.731171875, 0.731171875), (0.192156862745098, \n 0.74609375, 0.74609375), (0.196078431372549, 0.731171875, \n 0.731171875), (0.2, 0.71625, 0.71625), (0.203921568627451, \n 0.701328125, 0.701328125), (0.207843137254902, 0.68640625, \n 0.68640625), (0.211764705882353, 0.671484375, 0.671484375), (\n 0.215686274509804, 0.6565625, 0.6565625), (0.219607843137255, \n 0.641640625, 0.641640625), (0.223529411764706, 0.62671875, \n 0.62671875), (0.227450980392157, 0.611796875, 0.611796875), (\n 0.231372549019608, 0.596875, 0.596875), (0.235294117647059, \n 0.581953125, 0.581953125), (0.23921568627451, 0.56703125, \n 0.56703125), (0.243137254901961, 0.552109375, 0.552109375), (\n 0.247058823529412, 0.5371875, 0.5371875), (0.250980392156863, \n 0.522265625, 0.522265625), (0.254901960784314, 0.50734375, \n 0.50734375), (0.258823529411765, 0.492421875, 0.492421875), (\n 0.262745098039216, 0.4775, 0.4775), (0.266666666666667, 0.462578125,\n 0.462578125), (0.270588235294118, 0.44765625, 0.44765625), (\n 0.274509803921569, 0.432734375, 0.432734375), (0.27843137254902, \n 0.4178125, 0.4178125), (0.282352941176471, 0.402890625, 0.402890625\n ), (0.286274509803922, 0.38796875, 0.38796875), (0.290196078431373,\n 0.373046875, 0.373046875), (0.294117647058824, 0.358125, 0.358125),\n (0.298039215686275, 0.343203125, 0.343203125), (0.301960784313725, \n 0.32828125, 0.32828125), (0.305882352941176, 0.313359375, \n 0.313359375), (0.309803921568627, 0.2984375, 0.2984375), (\n 0.313725490196078, 0.283515625, 0.283515625), (0.317647058823529, \n 0.26859375, 0.26859375), (0.32156862745098, 0.253671875, \n 0.253671875), (0.325490196078431, 0.23875, 0.23875), (\n 0.329411764705882, 0.223828125, 0.223828125), (0.333333333333333, \n 0.20890625, 0.20890625), (0.337254901960784, 0.193984375, \n 0.193984375), (0.341176470588235, 0.1790625, 0.1790625), (\n 0.345098039215686, 0.164140625, 0.164140625), (0.349019607843137, \n 0.14921875, 0.14921875), (0.352941176470588, 0.134296875, \n 0.134296875), (0.356862745098039, 0.119375, 0.119375), (\n 0.36078431372549, 0.104453125, 0.104453125), (0.364705882352941, \n 0.08953125, 0.08953125), (0.368627450980392, 0.074609375, \n 0.074609375), (0.372549019607843, 0.0596875, 0.0596875), (\n 0.376470588235294, 0.044765625, 0.044765625), (0.380392156862745, \n 0.0298437890625, 0.0298437890625), (0.384313725490196, 0.014921875,\n 0.014921875), (0.388235294117647, 0, 0), (0.392156862745098, \n 0.012890625, 0.012890625), (0.396078431372549, 0.02578125, \n 0.02578125), (0.4, 0.038671875, 0.038671875), (0.403921568627451, \n 0.0515625, 0.0515625), (0.407843137254902, 0.064453125, 0.064453125\n ), (0.411764705882353, 0.07734375, 0.07734375), (0.415686274509804,\n 0.090234375, 0.090234375), (0.419607843137255, 0.103125, 0.103125),\n (0.423529411764706, 0.116015625, 0.116015625), (0.427450980392157, \n 0.12890625, 0.12890625), (0.431372549019608, 0.141796875, \n 0.141796875), (0.435294117647059, 0.1546875, 0.1546875), (\n 0.43921568627451, 0.167578125, 0.167578125), (0.443137254901961, \n 0.18046875, 0.18046875), (0.447058823529412, 0.193359375, \n 0.193359375), (0.450980392156863, 0.20625, 0.20625), (\n 0.454901960784314, 0.219140625, 0.219140625), (0.458823529411765, \n 0.23203125, 0.23203125), (0.462745098039216, 0.244921875, \n 0.244921875), (0.466666666666667, 0.2578125, 0.2578125), (\n 0.470588235294118, 0.270703125, 0.270703125), (0.474509803921569, \n 0.28359375, 0.28359375), (0.47843137254902, 0.296484375, \n 0.296484375), (0.482352941176471, 0.309375, 0.309375), (\n 0.486274509803922, 0.322265625, 0.322265625), (0.490196078431373, \n 0.33515625, 0.33515625), (0.494117647058824, 0.348046875, \n 0.348046875), (0.498039215686275, 0.3609375, 0.3609375), (\n 0.501960784313725, 0.373828125, 0.373828125), (0.505882352941176, \n 0.38671875, 0.38671875), (0.509803921568627, 0.399609375, \n 0.399609375), (0.513725490196078, 0.4125, 0.4125), (\n 0.517647058823529, 0.425390625, 0.425390625), (0.52156862745098, \n 0.43828125, 0.43828125), (0.525490196078431, 0.451171875, \n 0.451171875), (0.529411764705882, 0.4640625, 0.4640625), (\n 0.533333333333333, 0.476953125, 0.476953125), (0.537254901960784, \n 0.48984375, 0.48984375), (0.541176470588235, 0.502734375, \n 0.502734375), (0.545098039215686, 0.515625, 0.515625), (\n 0.549019607843137, 0.528515625, 0.528515625), (0.552941176470588, \n 0.54140625, 0.54140625), (0.556862745098039, 0.554296875, \n 0.554296875), (0.56078431372549, 0.5671875, 0.5671875), (\n 0.564705882352941, 0.580078125, 0.580078125), (0.568627450980392, \n 0.59296875, 0.59296875), (0.572549019607843, 0.605859375, \n 0.605859375), (0.576470588235294, 0.61875, 0.61875), (\n 0.580392156862745, 0.631640625, 0.631640625), (0.584313725490196, \n 0.64453125, 0.64453125), (0.588235294117647, 0.6359375, 0.6359375),\n (0.592156862745098, 0.62734375, 0.62734375), (0.596078431372549, \n 0.61875, 0.61875), (0.6, 0.61015625, 0.61015625), (\n 0.603921568627451, 0.6015625, 0.6015625), (0.607843137254902, \n 0.59296875, 0.59296875), (0.611764705882353, 0.584375, 0.584375), (\n 0.615686274509804, 0.57578125, 0.57578125), (0.619607843137255, \n 0.5671875, 0.5671875), (0.623529411764706, 0.55859375, 0.55859375),\n (0.627450980392157, 0.55, 0.55), (0.631372549019608, 0.54140625, \n 0.54140625), (0.635294117647059, 0.5328125, 0.5328125), (\n 0.63921568627451, 0.52421875, 0.52421875), (0.643137254901961, \n 0.515625, 0.515625), (0.647058823529412, 0.50703125, 0.50703125), (\n 0.650980392156863, 0.4984375, 0.4984375), (0.654901960784314, \n 0.48984375, 0.48984375), (0.658823529411765, 0.48125, 0.48125), (\n 0.662745098039216, 0.47265625, 0.47265625), (0.666666666666667, \n 0.4640625, 0.4640625), (0.670588235294118, 0.45546875, 0.45546875),\n (0.674509803921569, 0.446875, 0.446875), (0.67843137254902, \n 0.43828125, 0.43828125), (0.682352941176471, 0.4296875, 0.4296875),\n (0.686274509803922, 0.42109375, 0.42109375), (0.690196078431373, \n 0.4125, 0.4125), (0.694117647058824, 0.40390625, 0.40390625), (\n 0.698039215686274, 0.3953125, 0.3953125), (0.701960784313725, \n 0.38671875, 0.38671875), (0.705882352941177, 0.378125, 0.378125), (\n 0.709803921568627, 0.36953125, 0.36953125), (0.713725490196078, \n 0.3609375, 0.3609375), (0.717647058823529, 0.35234375, 0.35234375),\n (0.72156862745098, 0.34375, 0.34375), (0.725490196078431, \n 0.33515625, 0.33515625), (0.729411764705882, 0.3265625, 0.3265625),\n (0.733333333333333, 0.31796875, 0.31796875), (0.737254901960784, \n 0.309375, 0.309375), (0.741176470588235, 0.30078125, 0.30078125), (\n 0.745098039215686, 0.2921875, 0.2921875), (0.749019607843137, \n 0.28359375, 0.28359375), (0.752941176470588, 0.275, 0.275), (\n 0.756862745098039, 0.26640625, 0.26640625), (0.76078431372549, \n 0.2578125, 0.2578125), (0.764705882352941, 0.24921875, 0.24921875),\n (0.768627450980392, 0.240625, 0.240625), (0.772549019607843, \n 0.23203125, 0.23203125), (0.776470588235294, 0.2234375, 0.2234375),\n (0.780392156862745, 0.21484375, 0.21484375), (0.784313725490196, \n 0.222301171875, 0.222301171875), (0.788235294117647, 0.22975859375,\n 0.22975859375), (0.792156862745098, 0.237216015625, 0.237216015625),\n (0.796078431372549, 0.2446734375, 0.2446734375), (0.8, \n 0.252130859375, 0.252130859375), (0.803921568627451, 0.259587890625,\n 0.259587890625), (0.807843137254902, 0.2670453125, 0.2670453125), (\n 0.811764705882353, 0.274502734375, 0.274502734375), (\n 0.815686274509804, 0.28196015625, 0.28196015625), (\n 0.819607843137255, 0.289417578125, 0.289417578125), (\n 0.823529411764706, 0.296875, 0.296875), (0.827450980392157, \n 0.304332421875, 0.304332421875), (0.831372549019608, 0.31178984375,\n 0.31178984375), (0.835294117647059, 0.319247265625, 0.319247265625),\n (0.83921568627451, 0.3267046875, 0.3267046875), (0.843137254901961,\n 0.334162109375, 0.334162109375), (0.847058823529412, 0.34161953125,\n 0.34161953125), (0.850980392156863, 0.3490765625, 0.3490765625), (\n 0.854901960784314, 0.356533984375, 0.356533984375), (\n 0.858823529411765, 0.36399140625, 0.36399140625), (\n 0.862745098039216, 0.371448828125, 0.371448828125), (\n 0.866666666666667, 0.37890625, 0.37890625), (0.870588235294118, \n 0.386363671875, 0.386363671875), (0.874509803921569, 0.3938203125, \n 0.3938203125), (0.87843137254902, 0.40127734375, 0.40127734375), (\n 0.882352941176471, 0.408734375, 0.408734375), (0.886274509803922, \n 0.41619140625, 0.41619140625), (0.890196078431373, 0.42365234375, \n 0.42365234375), (0.894117647058824, 0.431109375, 0.431109375), (\n 0.898039215686275, 0.43856640625, 0.43856640625), (\n 0.901960784313726, 0.4460234375, 0.4460234375), (0.905882352941176,\n 0.45348046875, 0.45348046875), (0.909803921568627, 0.4609375, \n 0.4609375), (0.913725490196078, 0.46839453125, 0.46839453125), (\n 0.917647058823529, 0.4758515625, 0.4758515625), (0.92156862745098, \n 0.48330859375, 0.48330859375), (0.925490196078431, 0.490765625, \n 0.490765625), (0.929411764705882, 0.49822265625, 0.49822265625), (\n 0.933333333333333, 0.50568359375, 0.50568359375), (\n 0.937254901960784, 0.513140625, 0.513140625), (0.941176470588235, \n 0.52059765625, 0.52059765625), (0.945098039215686, 0.5280546875, \n 0.5280546875), (0.949019607843137, 0.53551171875, 0.53551171875), (\n 0.952941176470588, 0.54296875, 0.54296875), (0.956862745098039, \n 0.55042578125, 0.55042578125), (0.96078431372549, 0.5578828125, \n 0.5578828125), (0.964705882352941, 0.56533984375, 0.56533984375), (\n 0.968627450980392, 0.572796875, 0.572796875), (0.972549019607843, \n 0.58025390625, 0.58025390625), (0.976470588235294, 0.58771484375, \n 0.58771484375), (0.980392156862745, 0.595171875, 0.595171875), (\n 0.984313725490196, 0.60262890625, 0.60262890625), (\n 0.988235294117647, 0.6100859375, 0.6100859375), (0.992156862745098,\n 0.61754296875, 0.61754296875), (0.996078431372549, 0.625, 0.625), (\n 1.0, 0.625, 0.625)), 'blue': ((0.0, 0.51984375, 0.51984375), (\n 0.00392156862745098, 0.51984375, 0.51984375), (0.00784313725490196,\n 0.529765625, 0.529765625), (0.0117647058823529, 0.5396875, \n 0.5396875), (0.0156862745098039, 0.549609375, 0.549609375), (\n 0.0196078431372549, 0.55953125, 0.55953125), (0.0235294117647059, \n 0.569453125, 0.569453125), (0.0274509803921569, 0.579375, 0.579375),\n (0.0313725490196078, 0.589296875, 0.589296875), (0.0352941176470588,\n 0.59921875, 0.59921875), (0.0392156862745098, 0.609140625, \n 0.609140625), (0.0431372549019608, 0.6190625, 0.6190625), (\n 0.0470588235294118, 0.628984375, 0.628984375), (0.0509803921568627,\n 0.63890625, 0.63890625), (0.0549019607843137, 0.648828125, \n 0.648828125), (0.0588235294117647, 0.65875, 0.65875), (\n 0.0627450980392157, 0.668671875, 0.668671875), (0.0666666666666667,\n 0.67859375, 0.67859375), (0.0705882352941176, 0.688515625, \n 0.688515625), (0.0745098039215686, 0.6984375, 0.6984375), (\n 0.0784313725490196, 0.708359375, 0.708359375), (0.0823529411764706,\n 0.71828125, 0.71828125), (0.0862745098039216, 0.728203125, \n 0.728203125), (0.0901960784313725, 0.738125, 0.738125), (\n 0.0941176470588235, 0.748046875, 0.748046875), (0.0980392156862745,\n 0.75796875, 0.75796875), (0.101960784313725, 0.767890625, \n 0.767890625), (0.105882352941176, 0.7778125, 0.7778125), (\n 0.109803921568627, 0.787734375, 0.787734375), (0.113725490196078, \n 0.79765625, 0.79765625), (0.117647058823529, 0.807578125, \n 0.807578125), (0.12156862745098, 0.8175, 0.8175), (\n 0.125490196078431, 0.827421875, 0.827421875), (0.129411764705882, \n 0.83734375, 0.83734375), (0.133333333333333, 0.847265625, \n 0.847265625), (0.137254901960784, 0.8571875, 0.8571875), (\n 0.141176470588235, 0.867109375, 0.867109375), (0.145098039215686, \n 0.87703125, 0.87703125), (0.149019607843137, 0.886953125, \n 0.886953125), (0.152941176470588, 0.896875, 0.896875), (\n 0.156862745098039, 0.906796875, 0.906796875), (0.16078431372549, \n 0.91671875, 0.91671875), (0.164705882352941, 0.926640625, \n 0.926640625), (0.168627450980392, 0.9365625, 0.9365625), (\n 0.172549019607843, 0.946484375, 0.946484375), (0.176470588235294, \n 0.95640625, 0.95640625), (0.180392156862745, 0.966328125, \n 0.966328125), (0.184313725490196, 0.97625, 0.97625), (\n 0.188235294117647, 0.986171875, 0.986171875), (0.192156862745098, \n 0.99609375, 0.99609375), (0.196078431372549, 0.976171875, \n 0.976171875), (0.2, 0.95625, 0.95625), (0.203921568627451, \n 0.936328125, 0.936328125), (0.207843137254902, 0.91640625, \n 0.91640625), (0.211764705882353, 0.896484375, 0.896484375), (\n 0.215686274509804, 0.8765625, 0.8765625), (0.219607843137255, \n 0.856640625, 0.856640625), (0.223529411764706, 0.83671875, \n 0.83671875), (0.227450980392157, 0.816796875, 0.816796875), (\n 0.231372549019608, 0.796875, 0.796875), (0.235294117647059, \n 0.776953125, 0.776953125), (0.23921568627451, 0.75703125, \n 0.75703125), (0.243137254901961, 0.737109375, 0.737109375), (\n 0.247058823529412, 0.7171875, 0.7171875), (0.250980392156863, \n 0.697265625, 0.697265625), (0.254901960784314, 0.67734375, \n 0.67734375), (0.258823529411765, 0.657421875, 0.657421875), (\n 0.262745098039216, 0.6375, 0.6375), (0.266666666666667, 0.617578125,\n 0.617578125), (0.270588235294118, 0.59765625, 0.59765625), (\n 0.274509803921569, 0.577734375, 0.577734375), (0.27843137254902, \n 0.5578125, 0.5578125), (0.282352941176471, 0.537890625, 0.537890625\n ), (0.286274509803922, 0.51796875, 0.51796875), (0.290196078431373,\n 0.498046875, 0.498046875), (0.294117647058824, 0.478125, 0.478125),\n (0.298039215686275, 0.458203125, 0.458203125), (0.301960784313725, \n 0.43828125, 0.43828125), (0.305882352941176, 0.418359375, \n 0.418359375), (0.309803921568627, 0.3984375, 0.3984375), (\n 0.313725490196078, 0.378515625, 0.378515625), (0.317647058823529, \n 0.35859375, 0.35859375), (0.32156862745098, 0.338671875, \n 0.338671875), (0.325490196078431, 0.31875, 0.31875), (\n 0.329411764705882, 0.298828125, 0.298828125), (0.333333333333333, \n 0.27890625, 0.27890625), (0.337254901960784, 0.258984375, \n 0.258984375), (0.341176470588235, 0.2390625, 0.2390625), (\n 0.345098039215686, 0.219140625, 0.219140625), (0.349019607843137, \n 0.19921875, 0.19921875), (0.352941176470588, 0.179296875, \n 0.179296875), (0.356862745098039, 0.159375, 0.159375), (\n 0.36078431372549, 0.139453125, 0.139453125), (0.364705882352941, \n 0.11953125, 0.11953125), (0.368627450980392, 0.099609375, \n 0.099609375), (0.372549019607843, 0.0796875, 0.0796875), (\n 0.376470588235294, 0.059765625, 0.059765625), (0.380392156862745, \n 0.03984375, 0.03984375), (0.384313725490196, 0.019921875, \n 0.019921875), (0.388235294117647, 0, 0), (0.392156862745098, 0, 0),\n (0.396078431372549, 0, 0), (0.4, 0, 0), (0.403921568627451, 0, 0),\n (0.407843137254902, 0, 0), (0.411764705882353, 0, 0), (\n 0.415686274509804, 0, 0), (0.419607843137255, 0, 0), (\n 0.423529411764706, 0, 0), (0.427450980392157, 0, 0), (\n 0.431372549019608, 0, 0), (0.435294117647059, 0, 0), (\n 0.43921568627451, 0, 0), (0.443137254901961, 0, 0), (\n 0.447058823529412, 0, 0), (0.450980392156863, 0, 0), (\n 0.454901960784314, 0, 0), (0.458823529411765, 0, 0), (\n 0.462745098039216, 0, 0), (0.466666666666667, 0, 0), (\n 0.470588235294118, 0, 0), (0.474509803921569, 0, 0), (\n 0.47843137254902, 0, 0), (0.482352941176471, 0, 0), (\n 0.486274509803922, 0, 0), (0.490196078431373, 0, 0), (\n 0.494117647058824, 0, 0), (0.498039215686275, 0, 0), (\n 0.501960784313725, 0, 0), (0.505882352941176, 0, 0), (\n 0.509803921568627, 0, 0), (0.513725490196078, 0, 0), (\n 0.517647058823529, 0, 0), (0.52156862745098, 0, 0), (\n 0.525490196078431, 0, 0), (0.529411764705882, 0, 0), (\n 0.533333333333333, 0, 0), (0.537254901960784, 0, 0), (\n 0.541176470588235, 0, 0), (0.545098039215686, 0, 0), (\n 0.549019607843137, 0, 0), (0.552941176470588, 0, 0), (\n 0.556862745098039, 0, 0), (0.56078431372549, 0, 0), (\n 0.564705882352941, 0, 0), (0.568627450980392, 0, 0), (\n 0.572549019607843, 0, 0), (0.576470588235294, 0, 0), (\n 0.580392156862745, 0, 0), (0.584313725490196, 0, 0), (\n 0.588235294117647, 0.004296875, 0.004296875), (0.592156862745098, \n 0.00859375, 0.00859375), (0.596078431372549, 0.012890625, \n 0.012890625), (0.6, 0.0171875, 0.0171875), (0.603921568627451, \n 0.021484375, 0.021484375), (0.607843137254902, 0.02578125, \n 0.02578125), (0.611764705882353, 0.030078125, 0.030078125), (\n 0.615686274509804, 0.034375, 0.034375), (0.619607843137255, \n 0.038671875, 0.038671875), (0.623529411764706, 0.04296875, \n 0.04296875), (0.627450980392157, 0.047265625, 0.047265625), (\n 0.631372549019608, 0.0515625, 0.0515625), (0.635294117647059, \n 0.055859375, 0.055859375), (0.63921568627451, 0.06015625, \n 0.06015625), (0.643137254901961, 0.064453125, 0.064453125), (\n 0.647058823529412, 0.06875, 0.06875), (0.650980392156863, \n 0.073046875, 0.073046875), (0.654901960784314, 0.07734375, \n 0.07734375), (0.658823529411765, 0.081640625, 0.081640625), (\n 0.662745098039216, 0.0859375, 0.0859375), (0.666666666666667, \n 0.090234375, 0.090234375), (0.670588235294118, 0.09453125, \n 0.09453125), (0.674509803921569, 0.098828125, 0.098828125), (\n 0.67843137254902, 0.103125, 0.103125), (0.682352941176471, \n 0.107421875, 0.107421875), (0.686274509803922, 0.11171875, \n 0.11171875), (0.690196078431373, 0.116015625, 0.116015625), (\n 0.694117647058824, 0.1203125, 0.1203125), (0.698039215686274, \n 0.124609375, 0.124609375), (0.701960784313725, 0.12890625, \n 0.12890625), (0.705882352941177, 0.133203125, 0.133203125), (\n 0.709803921568627, 0.1375, 0.1375), (0.713725490196078, 0.141796875,\n 0.141796875), (0.717647058823529, 0.14609375, 0.14609375), (\n 0.72156862745098, 0.150390625, 0.150390625), (0.725490196078431, \n 0.1546875, 0.1546875), (0.729411764705882, 0.158984375, 0.158984375\n ), (0.733333333333333, 0.16328125, 0.16328125), (0.737254901960784,\n 0.167578125, 0.167578125), (0.741176470588235, 0.171875, 0.171875),\n (0.745098039215686, 0.176171875, 0.176171875), (0.749019607843137, \n 0.18046875, 0.18046875), (0.752941176470588, 0.184765625, \n 0.184765625), (0.756862745098039, 0.1890625, 0.1890625), (\n 0.76078431372549, 0.193359375, 0.193359375), (0.764705882352941, \n 0.19765625, 0.19765625), (0.768627450980392, 0.201953125, \n 0.201953125), (0.772549019607843, 0.20625, 0.20625), (\n 0.776470588235294, 0.210546875, 0.210546875), (0.780392156862745, \n 0.21484375, 0.21484375), (0.784313725490196, 0.22663359375, \n 0.22663359375), (0.788235294117647, 0.2384234375, 0.2384234375), (\n 0.792156862745098, 0.250212890625, 0.250212890625), (\n 0.796078431372549, 0.262002734375, 0.262002734375), (0.8, \n 0.273792578125, 0.273792578125), (0.803921568627451, 0.285582421875,\n 0.285582421875), (0.807843137254902, 0.297372265625, 0.297372265625\n ), (0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125))}\n vcdict = {'red': ((0, 1, 1), (0.00392156862745098, 0.54508984375, \n 0.54508984375), (0.00784313725490196, 0.5285703125, 0.5285703125),\n (0.0117647058823529, 0.5120546875, 0.5120546875), (\n 0.0156862745098039, 0.49553515625, 0.49553515625), (\n 0.0196078431372549, 0.47901953125, 0.47901953125), (\n 0.0235294117647059, 0.4625, 0.4625), (0.0274509803921569, \n 0.44598046875, 0.44598046875), (0.0313725490196078, 0.42946484375, \n 0.42946484375), (0.0352941176470588, 0.4129453125, 0.4129453125), (\n 0.0392156862745098, 0.3964296875, 0.3964296875), (\n 0.0431372549019608, 0.379910546875, 0.379910546875), (\n 0.0470588235294118, 0.36339296875, 0.36339296875), (\n 0.0509803921568627, 0.346875, 0.346875), (0.0549019607843137, \n 0.33035703125, 0.33035703125), (0.0588235294117647, 0.313839453125,\n 0.313839453125), (0.0627450980392157, 0.297321484375, \n 0.297321484375), (0.0666666666666667, 0.280803515625, \n 0.280803515625), (0.0705882352941176, 0.2642859375, 0.2642859375),\n (0.0745098039215686, 0.24776796875, 0.24776796875), (\n 0.0784313725490196, 0.23125, 0.23125), (0.0823529411764706, \n 0.21473203125, 0.21473203125), (0.0862745098039216, 0.198214453125,\n 0.198214453125), (0.0901960784313725, 0.181696484375, \n 0.181696484375), (0.0941176470588235, 0.165178515625, \n 0.165178515625), (0.0980392156862745, 0.148660546875, \n 0.148660546875), (0.101960784313725, 0.13214296875, 0.13214296875),\n (0.105882352941176, 0.115625, 0.115625), (0.109803921568627, \n 0.09910703125, 0.09910703125), (0.113725490196078, 0.082589453125, \n 0.082589453125), (0.117647058823529, 0.066071484375, 0.066071484375\n ), (0.12156862745098, 0.049553515625, 0.049553515625), (\n 0.125490196078431, 0.0330357421875, 0.0330357421875), (\n 0.129411764705882, 0.016517890625, 0.016517890625), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0, 0), (0.2, 0, 0), (0.203921568627451, 0, 0), (\n 0.207843137254902, 0, 0), (0.211764705882353, 0, 0), (\n 0.215686274509804, 0, 0), (0.219607843137255, 0, 0), (\n 0.223529411764706, 0, 0), (0.227450980392157, 0, 0), (\n 0.231372549019608, 0, 0), (0.235294117647059, 0, 0), (\n 0.23921568627451, 0, 0), (0.243137254901961, 0, 0), (\n 0.247058823529412, 0, 0), (0.250980392156863, 0, 0), (\n 0.254901960784314, 0, 0), (0.258823529411765, 0, 0), (\n 0.262745098039216, 0, 0), (0.266666666666667, 0, 0), (\n 0.270588235294118, 0, 0), (0.274509803921569, 0, 0), (\n 0.27843137254902, 0, 0), (0.282352941176471, 0, 0), (\n 0.286274509803922, 0, 0), (0.290196078431373, 0, 0), (\n 0.294117647058824, 0, 0), (0.298039215686275, 0, 0), (\n 0.301960784313725, 0, 0), (0.305882352941176, 0, 0), (\n 0.309803921568627, 0, 0), (0.313725490196078, 0, 0), (\n 0.317647058823529, 0, 0), (0.32156862745098, 0, 0), (\n 0.325490196078431, 0, 0), (0.329411764705882, 0, 0), (\n 0.333333333333333, 0, 0), (0.337254901960784, 0, 0), (\n 0.341176470588235, 0, 0), (0.345098039215686, 0, 0), (\n 0.349019607843137, 0, 0), (0.352941176470588, 0.0061383984375, \n 0.0061383984375), (0.356862745098039, 0.012276796875, \n 0.012276796875), (0.36078431372549, 0.0184151953125, \n 0.0184151953125), (0.364705882352941, 0.0245535546875, \n 0.0245535546875), (0.368627450980392, 0.030691953125, \n 0.030691953125), (0.372549019607843, 0.0368303515625, \n 0.0368303515625), (0.376470588235294, 0.04296875, 0.04296875), (\n 0.380392156862745, 0.04910703125, 0.04910703125), (\n 0.384313725490196, 0.055245703125, 0.055245703125), (\n 0.388235294117647, 0.061383984375, 0.061383984375), (\n 0.392156862745098, 0.067522265625, 0.067522265625), (\n 0.396078431372549, 0.073660546875, 0.073660546875), (0.4, \n 0.07979921875, 0.07979921875), (0.403921568627451, 0.0859375, \n 0.0859375), (0.407843137254902, 0.09207578125, 0.09207578125), (\n 0.411764705882353, 0.098214453125, 0.098214453125), (\n 0.415686274509804, 0.104352734375, 0.104352734375), (\n 0.419607843137255, 0.110491015625, 0.110491015625), (\n 0.423529411764706, 0.116629296875, 0.116629296875), (\n 0.427450980392157, 0.12276796875, 0.12276796875), (\n 0.431372549019608, 0.12890625, 0.12890625), (0.435294117647059, \n 0.13504453125, 0.13504453125), (0.43921568627451, 0.141183203125, \n 0.141183203125), (0.443137254901961, 0.147321484375, 0.147321484375\n ), (0.447058823529412, 0.153459765625, 0.153459765625), (\n 0.450980392156863, 0.159598046875, 0.159598046875), (\n 0.454901960784314, 0.16573671875, 0.16573671875), (\n 0.458823529411765, 0.171875, 0.171875), (0.462745098039216, \n 0.17801328125, 0.17801328125), (0.466666666666667, 0.184151953125, \n 0.184151953125), (0.470588235294118, 0.190290234375, 0.190290234375\n ), (0.474509803921569, 0.196428515625, 0.196428515625), (\n 0.47843137254902, 0.202566796875, 0.202566796875), (\n 0.482352941176471, 0.20870546875, 0.20870546875), (\n 0.486274509803922, 0.21484375, 0.21484375), (0.490196078431373, \n 0.233370703125, 0.233370703125), (0.494117647058824, 0.251897265625,\n 0.251897265625), (0.498039215686275, 0.27042421875, 0.27042421875),\n (0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.86549609375, 0.86549609375), (0.631372549019608, 0.86770703125, \n 0.86770703125), (0.635294117647059, 0.869921875, 0.869921875), (\n 0.63921568627451, 0.87213671875, 0.87213671875), (0.643137254901961,\n 0.87434765625, 0.87434765625), (0.647058823529412, 0.8765625, \n 0.8765625), (0.650980392156863, 0.87877734375, 0.87877734375), (\n 0.654901960784314, 0.88098828125, 0.88098828125), (\n 0.658823529411765, 0.883203125, 0.883203125), (0.662745098039216, \n 0.88541796875, 0.88541796875), (0.666666666666667, 0.88762890625, \n 0.88762890625), (0.670588235294118, 0.88984375, 0.88984375), (\n 0.674509803921569, 0.89205859375, 0.89205859375), (0.67843137254902,\n 0.89426953125, 0.89426953125), (0.682352941176471, 0.896484375, \n 0.896484375), (0.686274509803922, 0.89869921875, 0.89869921875), (\n 0.690196078431373, 0.90091015625, 0.90091015625), (\n 0.694117647058824, 0.903125, 0.903125), (0.698039215686274, \n 0.90533984375, 0.90533984375), (0.701960784313725, 0.90755078125, \n 0.90755078125), (0.705882352941177, 0.909765625, 0.909765625), (\n 0.709803921568627, 0.91198046875, 0.91198046875), (\n 0.713725490196078, 0.91419140625, 0.91419140625), (\n 0.717647058823529, 0.91640625, 0.91640625), (0.72156862745098, \n 0.91862109375, 0.91862109375), (0.725490196078431, 0.92083203125, \n 0.92083203125), (0.729411764705882, 0.923046875, 0.923046875), (\n 0.733333333333333, 0.92526171875, 0.92526171875), (\n 0.737254901960784, 0.92747265625, 0.92747265625), (\n 0.741176470588235, 0.9296875, 0.9296875), (0.745098039215686, \n 0.93190234375, 0.93190234375), (0.749019607843137, 0.93411328125, \n 0.93411328125), (0.752941176470588, 0.936328125, 0.936328125), (\n 0.756862745098039, 0.93854296875, 0.93854296875), (0.76078431372549,\n 0.94075390625, 0.94075390625), (0.764705882352941, 0.94296875, \n 0.94296875), (0.768627450980392, 0.94518359375, 0.94518359375), (\n 0.772549019607843, 0.94739453125, 0.94739453125), (\n 0.776470588235294, 0.949609375, 0.949609375), (0.780392156862745, \n 0.95182421875, 0.95182421875), (0.784313725490196, 0.95403515625, \n 0.95403515625), (0.788235294117647, 0.95625, 0.95625), (\n 0.792156862745098, 0.95846484375, 0.95846484375), (\n 0.796078431372549, 0.96067578125, 0.96067578125), (0.8, 0.962890625,\n 0.962890625), (0.803921568627451, 0.96510546875, 0.96510546875), (\n 0.807843137254902, 0.96731640625, 0.96731640625), (\n 0.811764705882353, 0.96953125, 0.96953125), (0.815686274509804, \n 0.97174609375, 0.97174609375), (0.819607843137255, 0.97395703125, \n 0.97395703125), (0.823529411764706, 0.976171875, 0.976171875), (\n 0.827450980392157, 0.97838671875, 0.97838671875), (\n 0.831372549019608, 0.98059765625, 0.98059765625), (\n 0.835294117647059, 0.9828125, 0.9828125), (0.83921568627451, \n 0.98502734375, 0.98502734375), (0.843137254901961, 0.98723828125, \n 0.98723828125), (0.847058823529412, 0.989453125, 0.989453125), (\n 0.850980392156863, 0.99166796875, 0.99166796875), (\n 0.854901960784314, 0.99387890625, 0.99387890625), (\n 0.858823529411765, 0.99609375, 0.99609375), (0.862745098039216, \n 0.99609375, 0.99609375), (0.866666666666667, 0.99609375, 0.99609375\n ), (0.870588235294118, 0.99609375, 0.99609375), (0.874509803921569,\n 0.99609375, 0.99609375), (0.87843137254902, 0.99609375, 0.99609375),\n (0.882352941176471, 0.99609375, 0.99609375), (0.886274509803922, \n 0.99609375, 0.99609375), (0.890196078431373, 0.99609375, 0.99609375\n ), (0.894117647058824, 0.99609375, 0.99609375), (0.898039215686275,\n 0.99609375, 0.99609375), (0.901960784313726, 0.99609375, 0.99609375\n ), (0.905882352941176, 0.99609375, 0.99609375), (0.909803921568627,\n 0.99609375, 0.99609375), (0.913725490196078, 0.99609375, 0.99609375\n ), (0.917647058823529, 0.99609375, 0.99609375), (0.92156862745098, \n 0.99609375, 0.99609375), (0.925490196078431, 0.99609375, 0.99609375\n ), (0.929411764705882, 0.99609375, 0.99609375), (0.933333333333333,\n 0.99609375, 0.99609375), (0.937254901960784, 0.99609375, 0.99609375\n ), (0.941176470588235, 0.99609375, 0.99609375), (0.945098039215686,\n 0.99609375, 0.99609375), (0.949019607843137, 0.99609375, 0.99609375\n ), (0.952941176470588, 0.99609375, 0.99609375), (0.956862745098039,\n 0.99609375, 0.99609375), (0.96078431372549, 0.99609375, 0.99609375),\n (0.964705882352941, 0.99609375, 0.99609375), (0.968627450980392, \n 0.99609375, 0.99609375), (0.972549019607843, 0.99609375, 0.99609375\n ), (0.976470588235294, 0.99609375, 0.99609375), (0.980392156862745,\n 0.99609375, 0.99609375), (0.984313725490196, 0.99609375, 0.99609375\n ), (0.988235294117647, 0.99609375, 0.99609375), (0.992156862745098,\n 0.99609375, 0.99609375), (0.996078431372549, 0.99609375, 0.99609375\n ), (1, 0.99609375, 0.99609375)), 'green': ((0, 1, 1), (\n 0.00392156862745098, 0, 0), (0.00784313725490196, 0, 0), (\n 0.0117647058823529, 0, 0), (0.0156862745098039, 0, 0), (\n 0.0196078431372549, 0, 0), (0.0235294117647059, 0, 0), (\n 0.0274509803921569, 0, 0), (0.0313725490196078, 0, 0), (\n 0.0352941176470588, 0, 0), (0.0392156862745098, 0, 0), (\n 0.0431372549019608, 0, 0), (0.0470588235294118, 0, 0), (\n 0.0509803921568627, 0, 0), (0.0549019607843137, 0, 0), (\n 0.0588235294117647, 0, 0), (0.0627450980392157, 0, 0), (\n 0.0666666666666667, 0, 0), (0.0705882352941176, 0, 0), (\n 0.0745098039215686, 0, 0), (0.0784313725490196, 0, 0), (\n 0.0823529411764706, 0, 0), (0.0862745098039216, 0, 0), (\n 0.0901960784313725, 0, 0), (0.0941176470588235, 0, 0), (\n 0.0980392156862745, 0, 0), (0.101960784313725, 0, 0), (\n 0.105882352941176, 0, 0), (0.109803921568627, 0, 0), (\n 0.113725490196078, 0, 0), (0.117647058823529, 0, 0), (\n 0.12156862745098, 0, 0), (0.125490196078431, 0, 0), (\n 0.129411764705882, 0, 0), (0.133333333333333, 0, 0), (\n 0.137254901960784, 0.0135653515625, 0.0135653515625), (\n 0.141176470588235, 0.0271306640625, 0.0271306640625), (\n 0.145098039215686, 0.04069609375, 0.04069609375), (\n 0.149019607843137, 0.054261328125, 0.054261328125), (\n 0.152941176470588, 0.0678265625, 0.0678265625), (0.156862745098039,\n 0.0813921875, 0.0813921875), (0.16078431372549, 0.094957421875, \n 0.094957421875), (0.164705882352941, 0.10852265625, 0.10852265625),\n (0.168627450980392, 0.122087890625, 0.122087890625), (\n 0.172549019607843, 0.135653515625, 0.135653515625), (\n 0.176470588235294, 0.14921875, 0.14921875), (0.180392156862745, \n 0.162783984375, 0.162783984375), (0.184313725490196, 0.176349609375,\n 0.176349609375), (0.188235294117647, 0.18991484375, 0.18991484375),\n (0.192156862745098, 0.203480078125, 0.203480078125), (\n 0.196078431372549, 0.2170453125, 0.2170453125), (0.2, 0.2306109375,\n 0.2306109375), (0.203921568627451, 0.244176171875, 0.244176171875),\n (0.207843137254902, 0.25774140625, 0.25774140625), (\n 0.211764705882353, 0.27130703125, 0.27130703125), (\n 0.215686274509804, 0.284872265625, 0.284872265625), (\n 0.219607843137255, 0.2984375, 0.2984375), (0.223529411764706, \n 0.312002734375, 0.312002734375), (0.227450980392157, 0.325568359375,\n 0.325568359375), (0.231372549019608, 0.33913359375, 0.33913359375),\n (0.235294117647059, 0.352698828125, 0.352698828125), (\n 0.23921568627451, 0.3662640625, 0.3662640625), (0.243137254901961, \n 0.3798296875, 0.3798296875), (0.247058823529412, 0.39339453125, \n 0.39339453125), (0.250980392156863, 0.4069609375, 0.4069609375), (\n 0.254901960784314, 0.42052734375, 0.42052734375), (\n 0.258823529411765, 0.43408984375, 0.43408984375), (\n 0.262745098039216, 0.44765625, 0.44765625), (0.266666666666667, \n 0.46122265625, 0.46122265625), (0.270588235294118, 0.47478515625, \n 0.47478515625), (0.274509803921569, 0.4883515625, 0.4883515625), (\n 0.27843137254902, 0.50191796875, 0.50191796875), (0.282352941176471,\n 0.515484375, 0.515484375), (0.286274509803922, 0.529046875, \n 0.529046875), (0.290196078431373, 0.54261328125, 0.54261328125), (\n 0.294117647058824, 0.5561796875, 0.5561796875), (0.298039215686275,\n 0.56974609375, 0.56974609375), (0.301960784313725, 0.58330859375, \n 0.58330859375), (0.305882352941176, 0.596875, 0.596875), (\n 0.309803921568627, 0.61044140625, 0.61044140625), (\n 0.313725490196078, 0.62400390625, 0.62400390625), (\n 0.317647058823529, 0.6375703125, 0.6375703125), (0.32156862745098, \n 0.65113671875, 0.65113671875), (0.325490196078431, 0.664703125, \n 0.664703125), (0.329411764705882, 0.678265625, 0.678265625), (\n 0.333333333333333, 0.69183203125, 0.69183203125), (\n 0.337254901960784, 0.7053984375, 0.7053984375), (0.341176470588235,\n 0.71896484375, 0.71896484375), (0.345098039215686, 0.73252734375, \n 0.73252734375), (0.349019607843137, 0.74609375, 0.74609375), (\n 0.352941176470588, 0.7309140625, 0.7309140625), (0.356862745098039,\n 0.71573828125, 0.71573828125), (0.36078431372549, 0.70055859375, \n 0.70055859375), (0.364705882352941, 0.68537890625, 0.68537890625),\n (0.368627450980392, 0.67019921875, 0.67019921875), (\n 0.372549019607843, 0.6550234375, 0.6550234375), (0.376470588235294,\n 0.63984375, 0.63984375), (0.380392156862745, 0.6246640625, \n 0.6246640625), (0.384313725490196, 0.60948828125, 0.60948828125), (\n 0.388235294117647, 0.59430859375, 0.59430859375), (\n 0.392156862745098, 0.57912890625, 0.57912890625), (\n 0.396078431372549, 0.56394921875, 0.56394921875), (0.4, \n 0.5487734375, 0.5487734375), (0.403921568627451, 0.53359375, \n 0.53359375), (0.407843137254902, 0.5184140625, 0.5184140625), (\n 0.411764705882353, 0.50323828125, 0.50323828125), (\n 0.415686274509804, 0.48805859375, 0.48805859375), (\n 0.419607843137255, 0.47287890625, 0.47287890625), (\n 0.423529411764706, 0.45769921875, 0.45769921875), (\n 0.427450980392157, 0.4425234375, 0.4425234375), (0.431372549019608,\n 0.42734375, 0.42734375), (0.435294117647059, 0.4121640625, \n 0.4121640625), (0.43921568627451, 0.39698828125, 0.39698828125), (\n 0.443137254901961, 0.381808203125, 0.381808203125), (\n 0.447058823529412, 0.366629296875, 0.366629296875), (\n 0.450980392156863, 0.35145078125, 0.35145078125), (\n 0.454901960784314, 0.336272265625, 0.336272265625), (\n 0.458823529411765, 0.32109375, 0.32109375), (0.462745098039216, \n 0.305915234375, 0.305915234375), (0.466666666666667, 0.29073671875,\n 0.29073671875), (0.470588235294118, 0.2755578125, 0.2755578125), (\n 0.474509803921569, 0.260379296875, 0.260379296875), (\n 0.47843137254902, 0.24520078125, 0.24520078125), (0.482352941176471,\n 0.230022265625, 0.230022265625), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.2265625, 0.2265625), (\n 0.494117647058824, 0.23828125, 0.23828125), (0.498039215686275, \n 0.25, 0.25), (0.501960784313725, 0.26171875, 0.26171875), (\n 0.505882352941176, 0.2734375, 0.2734375), (0.509803921568627, \n 0.28515625, 0.28515625), (0.513725490196078, 0.296875, 0.296875), (\n 0.517647058823529, 0.30859375, 0.30859375), (0.52156862745098, \n 0.3203125, 0.3203125), (0.525490196078431, 0.33203125, 0.33203125),\n (0.529411764705882, 0.34375, 0.34375), (0.533333333333333, \n 0.35546875, 0.35546875), (0.537254901960784, 0.3671875, 0.3671875),\n (0.541176470588235, 0.37890625, 0.37890625), (0.545098039215686, \n 0.390625, 0.390625), (0.549019607843137, 0.40234375, 0.40234375), (\n 0.552941176470588, 0.4140625, 0.4140625), (0.556862745098039, \n 0.42578125, 0.42578125), (0.56078431372549, 0.4375, 0.4375), (\n 0.564705882352941, 0.44921875, 0.44921875), (0.568627450980392, \n 0.4609375, 0.4609375), (0.572549019607843, 0.47265625, 0.47265625),\n (0.576470588235294, 0.484375, 0.484375), (0.580392156862745, \n 0.49609375, 0.49609375), (0.584313725490196, 0.5078125, 0.5078125),\n (0.588235294117647, 0.51953125, 0.51953125), (0.592156862745098, \n 0.53125, 0.53125), (0.596078431372549, 0.54296875, 0.54296875), (\n 0.6, 0.5546875, 0.5546875), (0.603921568627451, 0.56640625, \n 0.56640625), (0.607843137254902, 0.578125, 0.578125), (\n 0.611764705882353, 0.58984375, 0.58984375), (0.615686274509804, \n 0.6015625, 0.6015625), (0.619607843137255, 0.61328125, 0.61328125),\n (0.623529411764706, 0.625, 0.625), (0.627450980392157, \n 0.61458203125, 0.61458203125), (0.631372549019608, 0.60416796875, \n 0.60416796875), (0.635294117647059, 0.59375, 0.59375), (\n 0.63921568627451, 0.58333203125, 0.58333203125), (0.643137254901961,\n 0.57291796875, 0.57291796875), (0.647058823529412, 0.5625, 0.5625),\n (0.650980392156863, 0.55208203125, 0.55208203125), (\n 0.654901960784314, 0.54166796875, 0.54166796875), (\n 0.658823529411765, 0.53125, 0.53125), (0.662745098039216, \n 0.52083203125, 0.52083203125), (0.666666666666667, 0.51041796875, \n 0.51041796875), (0.670588235294118, 0.5, 0.5), (0.674509803921569, \n 0.48958203125, 0.48958203125), (0.67843137254902, 0.47916796875, \n 0.47916796875), (0.682352941176471, 0.46875, 0.46875), (\n 0.686274509803922, 0.45833203125, 0.45833203125), (\n 0.690196078431373, 0.44791796875, 0.44791796875), (\n 0.694117647058824, 0.4375, 0.4375), (0.698039215686274, \n 0.42708203125, 0.42708203125), (0.701960784313725, 0.41666796875, \n 0.41666796875), (0.705882352941177, 0.40625, 0.40625), (\n 0.709803921568627, 0.39583203125, 0.39583203125), (\n 0.713725490196078, 0.385416796875, 0.385416796875), (\n 0.717647058823529, 0.375, 0.375), (0.72156862745098, 0.364583203125,\n 0.364583203125), (0.725490196078431, 0.354166796875, 0.354166796875\n ), (0.729411764705882, 0.34375, 0.34375), (0.733333333333333, \n 0.333333203125, 0.333333203125), (0.737254901960784, 0.322916796875,\n 0.322916796875), (0.741176470588235, 0.3125, 0.3125), (\n 0.745098039215686, 0.302083203125, 0.302083203125), (\n 0.749019607843137, 0.291666796875, 0.291666796875), (\n 0.752941176470588, 0.28125, 0.28125), (0.756862745098039, \n 0.270833203125, 0.270833203125), (0.76078431372549, 0.260416796875,\n 0.260416796875), (0.764705882352941, 0.25, 0.25), (\n 0.768627450980392, 0.239583203125, 0.239583203125), (\n 0.772549019607843, 0.229166796875, 0.229166796875), (\n 0.776470588235294, 0.21875, 0.21875), (0.780392156862745, \n 0.208333203125, 0.208333203125), (0.784313725490196, 0.197916796875,\n 0.197916796875), (0.788235294117647, 0.1875, 0.1875), (\n 0.792156862745098, 0.177083203125, 0.177083203125), (\n 0.796078431372549, 0.166666796875, 0.166666796875), (0.8, 0.15625, \n 0.15625), (0.803921568627451, 0.145833203125, 0.145833203125), (\n 0.807843137254902, 0.135416796875, 0.135416796875), (\n 0.811764705882353, 0.125, 0.125), (0.815686274509804, \n 0.114583203125, 0.114583203125), (0.819607843137255, 0.104166796875,\n 0.104166796875), (0.823529411764706, 0.09375, 0.09375), (\n 0.827450980392157, 0.083333203125, 0.083333203125), (\n 0.831372549019608, 0.072916796875, 0.072916796875), (\n 0.835294117647059, 0.0625, 0.0625), (0.83921568627451, \n 0.052083203125, 0.052083203125), (0.843137254901961, 0.041666796875,\n 0.041666796875), (0.847058823529412, 0.03125, 0.03125), (\n 0.850980392156863, 0.0208333203125, 0.0208333203125), (\n 0.854901960784314, 0.0104166796875, 0.0104166796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0.0184151953125, \n 0.0184151953125), (0.866666666666667, 0.0368303515625, \n 0.0368303515625), (0.870588235294118, 0.055245703125, \n 0.055245703125), (0.874509803921569, 0.073660546875, 0.073660546875\n ), (0.87843137254902, 0.09207578125, 0.09207578125), (\n 0.882352941176471, 0.110491015625, 0.110491015625), (\n 0.886274509803922, 0.12890625, 0.12890625), (0.890196078431373, \n 0.147321484375, 0.147321484375), (0.894117647058824, 0.16573671875,\n 0.16573671875), (0.898039215686275, 0.184151953125, 0.184151953125),\n (0.901960784313726, 0.202566796875, 0.202566796875), (\n 0.905882352941176, 0.22098203125, 0.22098203125), (\n 0.909803921568627, 0.239397265625, 0.239397265625), (\n 0.913725490196078, 0.2578125, 0.2578125), (0.917647058823529, \n 0.276227734375, 0.276227734375), (0.92156862745098, 0.29464296875, \n 0.29464296875), (0.925490196078431, 0.313058203125, 0.313058203125),\n (0.929411764705882, 0.331473046875, 0.331473046875), (\n 0.933333333333333, 0.34988828125, 0.34988828125), (\n 0.937254901960784, 0.368303515625, 0.368303515625), (\n 0.941176470588235, 0.38671875, 0.38671875), (0.945098039215686, \n 0.4051328125, 0.4051328125), (0.949019607843137, 0.42355078125, \n 0.42355078125), (0.952941176470588, 0.44196484375, 0.44196484375),\n (0.956862745098039, 0.46037890625, 0.46037890625), (\n 0.96078431372549, 0.47879296875, 0.47879296875), (0.964705882352941,\n 0.4972109375, 0.4972109375), (0.968627450980392, 0.515625, 0.515625\n ), (0.972549019607843, 0.5340390625, 0.5340390625), (\n 0.976470588235294, 0.55245703125, 0.55245703125), (\n 0.980392156862745, 0.57087109375, 0.57087109375), (\n 0.984313725490196, 0.58928515625, 0.58928515625), (\n 0.988235294117647, 0.60769921875, 0.60769921875), (\n 0.992156862745098, 0.6261171875, 0.6261171875), (0.996078431372549,\n 0.64453125, 0.64453125), (1, 0.64453125, 0.64453125)), 'blue': ((0,\n 1, 1), (0.00392156862745098, 0.80569140625, 0.80569140625), (\n 0.00784313725490196, 0.7964296875, 0.7964296875), (\n 0.0117647058823529, 0.7871640625, 0.7871640625), (\n 0.0156862745098039, 0.77790234375, 0.77790234375), (\n 0.0196078431372549, 0.76863671875, 0.76863671875), (\n 0.0235294117647059, 0.759375, 0.759375), (0.0274509803921569, \n 0.75011328125, 0.75011328125), (0.0313725490196078, 0.74084765625, \n 0.74084765625), (0.0352941176470588, 0.7315859375, 0.7315859375), (\n 0.0392156862745098, 0.7223203125, 0.7223203125), (\n 0.0431372549019608, 0.71305859375, 0.71305859375), (\n 0.0470588235294118, 0.70379296875, 0.70379296875), (\n 0.0509803921568627, 0.69453125, 0.69453125), (0.0549019607843137, \n 0.68526953125, 0.68526953125), (0.0588235294117647, 0.67600390625, \n 0.67600390625), (0.0627450980392157, 0.6667421875, 0.6667421875), (\n 0.0666666666666667, 0.6574765625, 0.6574765625), (\n 0.0705882352941176, 0.64821484375, 0.64821484375), (\n 0.0745098039215686, 0.63894921875, 0.63894921875), (\n 0.0784313725490196, 0.6296875, 0.6296875), (0.0823529411764706, \n 0.62042578125, 0.62042578125), (0.0862745098039216, 0.61116015625, \n 0.61116015625), (0.0901960784313725, 0.6018984375, 0.6018984375), (\n 0.0941176470588235, 0.5926328125, 0.5926328125), (\n 0.0980392156862745, 0.58337109375, 0.58337109375), (\n 0.101960784313725, 0.57410546875, 0.57410546875), (\n 0.105882352941176, 0.56484375, 0.56484375), (0.109803921568627, \n 0.55558203125, 0.55558203125), (0.113725490196078, 0.54631640625, \n 0.54631640625), (0.117647058823529, 0.5370546875, 0.5370546875), (\n 0.12156862745098, 0.5277890625, 0.5277890625), (0.125490196078431, \n 0.51852734375, 0.51852734375), (0.129411764705882, 0.50926171875, \n 0.50926171875), (0.133333333333333, 0.5, 0.5), (0.137254901960784, \n 0.50901953125, 0.50901953125), (0.141176470588235, 0.5180390625, \n 0.5180390625), (0.145098039215686, 0.52705859375, 0.52705859375), (\n 0.149019607843137, 0.536078125, 0.536078125), (0.152941176470588, \n 0.54509765625, 0.54509765625), (0.156862745098039, 0.55412109375, \n 0.55412109375), (0.16078431372549, 0.563140625, 0.563140625), (\n 0.164705882352941, 0.57216015625, 0.57216015625), (\n 0.168627450980392, 0.5811796875, 0.5811796875), (0.172549019607843,\n 0.59019921875, 0.59019921875), (0.176470588235294, 0.59921875, \n 0.59921875), (0.180392156862745, 0.60823828125, 0.60823828125), (\n 0.184313725490196, 0.6172578125, 0.6172578125), (0.188235294117647,\n 0.62627734375, 0.62627734375), (0.192156862745098, 0.635296875, \n 0.635296875), (0.196078431372549, 0.64431640625, 0.64431640625), (\n 0.2, 0.65333984375, 0.65333984375), (0.203921568627451, 0.662359375,\n 0.662359375), (0.207843137254902, 0.67137890625, 0.67137890625), (\n 0.211764705882353, 0.6803984375, 0.6803984375), (0.215686274509804,\n 0.68941796875, 0.68941796875), (0.219607843137255, 0.6984375, \n 0.6984375), (0.223529411764706, 0.70745703125, 0.70745703125), (\n 0.227450980392157, 0.7164765625, 0.7164765625), (0.231372549019608,\n 0.72549609375, 0.72549609375), (0.235294117647059, 0.734515625, \n 0.734515625), (0.23921568627451, 0.74353515625, 0.74353515625), (\n 0.243137254901961, 0.75255859375, 0.75255859375), (\n 0.247058823529412, 0.761578125, 0.761578125), (0.250980392156863, \n 0.77059765625, 0.77059765625), (0.254901960784314, 0.7796171875, \n 0.7796171875), (0.258823529411765, 0.78863671875, 0.78863671875), (\n 0.262745098039216, 0.79765625, 0.79765625), (0.266666666666667, \n 0.80667578125, 0.80667578125), (0.270588235294118, 0.8156953125, \n 0.8156953125), (0.274509803921569, 0.82471484375, 0.82471484375), (\n 0.27843137254902, 0.833734375, 0.833734375), (0.282352941176471, \n 0.84275390625, 0.84275390625), (0.286274509803922, 0.85177734375, \n 0.85177734375), (0.290196078431373, 0.860796875, 0.860796875), (\n 0.294117647058824, 0.86981640625, 0.86981640625), (\n 0.298039215686275, 0.8788359375, 0.8788359375), (0.301960784313725,\n 0.88785546875, 0.88785546875), (0.305882352941176, 0.896875, \n 0.896875), (0.309803921568627, 0.90589453125, 0.90589453125), (\n 0.313725490196078, 0.9149140625, 0.9149140625), (0.317647058823529,\n 0.92393359375, 0.92393359375), (0.32156862745098, 0.932953125, \n 0.932953125), (0.325490196078431, 0.94197265625, 0.94197265625), (\n 0.329411764705882, 0.95099609375, 0.95099609375), (\n 0.333333333333333, 0.960015625, 0.960015625), (0.337254901960784, \n 0.96903515625, 0.96903515625), (0.341176470588235, 0.9780546875, \n 0.9780546875), (0.345098039215686, 0.98707421875, 0.98707421875), (\n 0.349019607843137, 0.99609375, 0.99609375), (0.352941176470588, \n 0.9737734375, 0.9737734375), (0.356862745098039, 0.95144921875, \n 0.95144921875), (0.36078431372549, 0.92912890625, 0.92912890625), (\n 0.364705882352941, 0.90680859375, 0.90680859375), (\n 0.368627450980392, 0.88448828125, 0.88448828125), (\n 0.372549019607843, 0.8621640625, 0.8621640625), (0.376470588235294,\n 0.83984375, 0.83984375), (0.380392156862745, 0.8175234375, \n 0.8175234375), (0.384313725490196, 0.79519921875, 0.79519921875), (\n 0.388235294117647, 0.77287890625, 0.77287890625), (\n 0.392156862745098, 0.75055859375, 0.75055859375), (\n 0.396078431372549, 0.72823828125, 0.72823828125), (0.4, \n 0.7059140625, 0.7059140625), (0.403921568627451, 0.68359375, \n 0.68359375), (0.407843137254902, 0.6612734375, 0.6612734375), (\n 0.411764705882353, 0.63894921875, 0.63894921875), (\n 0.415686274509804, 0.61662890625, 0.61662890625), (\n 0.419607843137255, 0.59430859375, 0.59430859375), (\n 0.423529411764706, 0.57198828125, 0.57198828125), (\n 0.427450980392157, 0.5496640625, 0.5496640625), (0.431372549019608,\n 0.52734375, 0.52734375), (0.435294117647059, 0.5050234375, \n 0.5050234375), (0.43921568627451, 0.48269921875, 0.48269921875), (\n 0.443137254901961, 0.46037890625, 0.46037890625), (\n 0.447058823529412, 0.43805859375, 0.43805859375), (\n 0.450980392156863, 0.41573828125, 0.41573828125), (\n 0.454901960784314, 0.3934140625, 0.3934140625), (0.458823529411765,\n 0.37109375, 0.37109375), (0.462745098039216, 0.348772265625, \n 0.348772265625), (0.466666666666667, 0.32645078125, 0.32645078125),\n (0.470588235294118, 0.304129296875, 0.304129296875), (\n 0.474509803921569, 0.281808203125, 0.281808203125), (\n 0.47843137254902, 0.25948671875, 0.25948671875), (0.482352941176471,\n 0.237165234375, 0.237165234375), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.233370703125, 0.233370703125), (\n 0.494117647058824, 0.251897265625, 0.251897265625), (\n 0.498039215686275, 0.27042421875, 0.27042421875), (\n 0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.84889453125, 0.84889453125), (0.631372549019608, 0.83450390625, \n 0.83450390625), (0.635294117647059, 0.8201171875, 0.8201171875), (\n 0.63921568627451, 0.80573046875, 0.80573046875), (0.643137254901961,\n 0.79133984375, 0.79133984375), (0.647058823529412, 0.776953125, \n 0.776953125), (0.650980392156863, 0.76256640625, 0.76256640625), (\n 0.654901960784314, 0.74817578125, 0.74817578125), (\n 0.658823529411765, 0.7337890625, 0.7337890625), (0.662745098039216,\n 0.71940234375, 0.71940234375), (0.666666666666667, 0.70501171875, \n 0.70501171875), (0.670588235294118, 0.690625, 0.690625), (\n 0.674509803921569, 0.67623828125, 0.67623828125), (0.67843137254902,\n 0.66184765625, 0.66184765625), (0.682352941176471, 0.6474609375, \n 0.6474609375), (0.686274509803922, 0.63307421875, 0.63307421875), (\n 0.690196078431373, 0.61868359375, 0.61868359375), (\n 0.694117647058824, 0.604296875, 0.604296875), (0.698039215686274, \n 0.58991015625, 0.58991015625), (0.701960784313725, 0.57551953125, \n 0.57551953125), (0.705882352941177, 0.5611328125, 0.5611328125), (\n 0.709803921568627, 0.54674609375, 0.54674609375), (\n 0.713725490196078, 0.53235546875, 0.53235546875), (\n 0.717647058823529, 0.51796875, 0.51796875), (0.72156862745098, \n 0.50358203125, 0.50358203125), (0.725490196078431, 0.48919140625, \n 0.48919140625), (0.729411764705882, 0.4748046875, 0.4748046875), (\n 0.733333333333333, 0.46041796875, 0.46041796875), (\n 0.737254901960784, 0.44602734375, 0.44602734375), (\n 0.741176470588235, 0.431640625, 0.431640625), (0.745098039215686, \n 0.41725390625, 0.41725390625), (0.749019607843137, 0.40286328125, \n 0.40286328125), (0.752941176470588, 0.3884765625, 0.3884765625), (\n 0.756862745098039, 0.374088671875, 0.374088671875), (\n 0.76078431372549, 0.359700390625, 0.359700390625), (\n 0.764705882352941, 0.3453125, 0.3453125), (0.768627450980392, \n 0.330924609375, 0.330924609375), (0.772549019607843, 0.316536328125,\n 0.316536328125), (0.776470588235294, 0.3021484375, 0.3021484375), (\n 0.780392156862745, 0.287760546875, 0.287760546875), (\n 0.784313725490196, 0.273372265625, 0.273372265625), (\n 0.788235294117647, 0.258984375, 0.258984375), (0.792156862745098, \n 0.244596484375, 0.244596484375), (0.796078431372549, 0.230208203125,\n 0.230208203125), (0.8, 0.2158203125, 0.2158203125), (\n 0.803921568627451, 0.201432421875, 0.201432421875), (\n 0.807843137254902, 0.187044140625, 0.187044140625), (\n 0.811764705882353, 0.17265625, 0.17265625), (0.815686274509804, \n 0.158268359375, 0.158268359375), (0.819607843137255, 0.143880078125,\n 0.143880078125), (0.823529411764706, 0.1294921875, 0.1294921875), (\n 0.827450980392157, 0.115104296875, 0.115104296875), (\n 0.831372549019608, 0.100716015625, 0.100716015625), (\n 0.835294117647059, 0.086328125, 0.086328125), (0.83921568627451, \n 0.071940234375, 0.071940234375), (0.843137254901961, 0.057551953125,\n 0.057551953125), (0.847058823529412, 0.0431640625, 0.0431640625), (\n 0.850980392156863, 0.028776015625, 0.028776015625), (\n 0.854901960784314, 0.01438796875, 0.01438796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0, 0), (\n 0.866666666666667, 0, 0), (0.870588235294118, 0, 0), (\n 0.874509803921569, 0, 0), (0.87843137254902, 0, 0), (\n 0.882352941176471, 0, 0), (0.886274509803922, 0, 0), (\n 0.890196078431373, 0, 0), (0.894117647058824, 0, 0), (\n 0.898039215686275, 0, 0), (0.901960784313726, 0, 0), (\n 0.905882352941176, 0, 0), (0.909803921568627, 0, 0), (\n 0.913725490196078, 0, 0), (0.917647058823529, 0, 0), (\n 0.92156862745098, 0, 0), (0.925490196078431, 0, 0), (\n 0.929411764705882, 0, 0), (0.933333333333333, 0, 0), (\n 0.937254901960784, 0, 0), (0.941176470588235, 0, 0), (\n 0.945098039215686, 0, 0), (0.949019607843137, 0, 0), (\n 0.952941176470588, 0, 0), (0.956862745098039, 0, 0), (\n 0.96078431372549, 0, 0), (0.964705882352941, 0, 0), (\n 0.968627450980392, 0, 0), (0.972549019607843, 0, 0), (\n 0.976470588235294, 0, 0), (0.980392156862745, 0, 0), (\n 0.984313725490196, 0, 0), (0.988235294117647, 0, 0), (\n 0.992156862745098, 0, 0), (0.996078431372549, 0, 0), (1, 0, 0))}\n califa = mcol.LinearSegmentedColormap('califa', cdict)\n vcalifa = mcol.LinearSegmentedColormap('vcalifa', vcdict)\n if option == 'v':\n return vcalifa\n else:\n return califa\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\ndef O3O1_line_c(x):\n val = 0.05612915 / (x + 0.39641533) + 0.60969495\n return val\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\ndef color_map_califa(option='v'):\n cdict = {'red': ((0.0, 0, 0), (0.00392156862745098, 0, 0), (\n 0.00784313725490196, 0, 0), (0.0117647058823529, 0, 0), (\n 0.0156862745098039, 0, 0), (0.0196078431372549, 0, 0), (\n 0.0235294117647059, 0, 0), (0.0274509803921569, 0, 0), (\n 0.0313725490196078, 0, 0), (0.0352941176470588, 0, 0), (\n 0.0392156862745098, 0, 0), (0.0431372549019608, 0, 0), (\n 0.0470588235294118, 0, 0), (0.0509803921568627, 0, 0), (\n 0.0549019607843137, 0, 0), (0.0588235294117647, 0, 0), (\n 0.0627450980392157, 0, 0), (0.0666666666666667, 0, 0), (\n 0.0705882352941176, 0, 0), (0.0745098039215686, 0, 0), (\n 0.0784313725490196, 0, 0), (0.0823529411764706, 0, 0), (\n 0.0862745098039216, 0, 0), (0.0901960784313725, 0, 0), (\n 0.0941176470588235, 0, 0), (0.0980392156862745, 0, 0), (\n 0.101960784313725, 0, 0), (0.105882352941176, 0, 0), (\n 0.109803921568627, 0, 0), (0.113725490196078, 0, 0), (\n 0.117647058823529, 0, 0), (0.12156862745098, 0, 0), (\n 0.125490196078431, 0, 0), (0.129411764705882, 0, 0), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0.019921875, 0.019921875), (0.2, 0.03984375, \n 0.03984375), (0.203921568627451, 0.059765625, 0.059765625), (\n 0.207843137254902, 0.0796875, 0.0796875), (0.211764705882353, \n 0.099609375, 0.099609375), (0.215686274509804, 0.11953125, \n 0.11953125), (0.219607843137255, 0.139453125, 0.139453125), (\n 0.223529411764706, 0.159375, 0.159375), (0.227450980392157, \n 0.179296875, 0.179296875), (0.231372549019608, 0.19921875, \n 0.19921875), (0.235294117647059, 0.219140625, 0.219140625), (\n 0.23921568627451, 0.2390625, 0.2390625), (0.243137254901961, \n 0.258984375, 0.258984375), (0.247058823529412, 0.27890625, \n 0.27890625), (0.250980392156863, 0.298828125, 0.298828125), (\n 0.254901960784314, 0.31875, 0.31875), (0.258823529411765, \n 0.338671875, 0.338671875), (0.262745098039216, 0.35859375, \n 0.35859375), (0.266666666666667, 0.378515625, 0.378515625), (\n 0.270588235294118, 0.3984375, 0.3984375), (0.274509803921569, \n 0.418359375, 0.418359375), (0.27843137254902, 0.43828125, \n 0.43828125), (0.282352941176471, 0.458203125, 0.458203125), (\n 0.286274509803922, 0.478125, 0.478125), (0.290196078431373, \n 0.498046875, 0.498046875), (0.294117647058824, 0.51796875, \n 0.51796875), (0.298039215686275, 0.537890625, 0.537890625), (\n 0.301960784313725, 0.5578125, 0.5578125), (0.305882352941176, \n 0.577734375, 0.577734375), (0.309803921568627, 0.59765625, \n 0.59765625), (0.313725490196078, 0.617578125, 0.617578125), (\n 0.317647058823529, 0.6375, 0.6375), (0.32156862745098, 0.657421875,\n 0.657421875), (0.325490196078431, 0.67734375, 0.67734375), (\n 0.329411764705882, 0.697265625, 0.697265625), (0.333333333333333, \n 0.7171875, 0.7171875), (0.337254901960784, 0.737109375, 0.737109375\n ), (0.341176470588235, 0.75703125, 0.75703125), (0.345098039215686,\n 0.776953125, 0.776953125), (0.349019607843137, 0.796875, 0.796875),\n (0.352941176470588, 0.816796875, 0.816796875), (0.356862745098039, \n 0.83671875, 0.83671875), (0.36078431372549, 0.856640625, \n 0.856640625), (0.364705882352941, 0.8765625, 0.8765625), (\n 0.368627450980392, 0.896484375, 0.896484375), (0.372549019607843, \n 0.91640625, 0.91640625), (0.376470588235294, 0.936328125, \n 0.936328125), (0.380392156862745, 0.95625, 0.95625), (\n 0.384313725490196, 0.976171875, 0.976171875), (0.388235294117647, \n 0.99609375, 0.99609375), (0.392156862745098, 0.99609375, 0.99609375\n ), (0.396078431372549, 0.99609375, 0.99609375), (0.4, 0.99609375, \n 0.99609375), (0.403921568627451, 0.99609375, 0.99609375), (\n 0.407843137254902, 0.99609375, 0.99609375), (0.411764705882353, \n 0.99609375, 0.99609375), (0.415686274509804, 0.99609375, 0.99609375\n ), (0.419607843137255, 0.99609375, 0.99609375), (0.423529411764706,\n 0.99609375, 0.99609375), (0.427450980392157, 0.99609375, 0.99609375\n ), (0.431372549019608, 0.99609375, 0.99609375), (0.435294117647059,\n 0.99609375, 0.99609375), (0.43921568627451, 0.99609375, 0.99609375),\n (0.443137254901961, 0.99609375, 0.99609375), (0.447058823529412, \n 0.99609375, 0.99609375), (0.450980392156863, 0.99609375, 0.99609375\n ), (0.454901960784314, 0.99609375, 0.99609375), (0.458823529411765,\n 0.99609375, 0.99609375), (0.462745098039216, 0.99609375, 0.99609375\n ), (0.466666666666667, 0.99609375, 0.99609375), (0.470588235294118,\n 0.99609375, 0.99609375), (0.474509803921569, 0.99609375, 0.99609375\n ), (0.47843137254902, 0.99609375, 0.99609375), (0.482352941176471, \n 0.99609375, 0.99609375), (0.486274509803922, 0.99609375, 0.99609375\n ), (0.490196078431373, 0.99609375, 0.99609375), (0.494117647058824,\n 0.99609375, 0.99609375), (0.498039215686275, 0.99609375, 0.99609375\n ), (0.501960784313725, 0.99609375, 0.99609375), (0.505882352941176,\n 0.99609375, 0.99609375), (0.509803921568627, 0.99609375, 0.99609375\n ), (0.513725490196078, 0.99609375, 0.99609375), (0.517647058823529,\n 0.99609375, 0.99609375), (0.52156862745098, 0.99609375, 0.99609375),\n (0.525490196078431, 0.99609375, 0.99609375), (0.529411764705882, \n 0.99609375, 0.99609375), (0.533333333333333, 0.99609375, 0.99609375\n ), (0.537254901960784, 0.99609375, 0.99609375), (0.541176470588235,\n 0.99609375, 0.99609375), (0.545098039215686, 0.99609375, 0.99609375\n ), (0.549019607843137, 0.99609375, 0.99609375), (0.552941176470588,\n 0.99609375, 0.99609375), (0.556862745098039, 0.99609375, 0.99609375\n ), (0.56078431372549, 0.99609375, 0.99609375), (0.564705882352941, \n 0.99609375, 0.99609375), (0.568627450980392, 0.99609375, 0.99609375\n ), (0.572549019607843, 0.99609375, 0.99609375), (0.576470588235294,\n 0.99609375, 0.99609375), (0.580392156862745, 0.99609375, 0.99609375\n ), (0.584313725490196, 0.99609375, 0.99609375), (0.588235294117647,\n 0.98046875, 0.98046875), (0.592156862745098, 0.96484375, 0.96484375\n ), (0.596078431372549, 0.94921875, 0.94921875), (0.6, 0.93359375, \n 0.93359375), (0.603921568627451, 0.91796875, 0.91796875), (\n 0.607843137254902, 0.90234375, 0.90234375), (0.611764705882353, \n 0.88671875, 0.88671875), (0.615686274509804, 0.87109375, 0.87109375\n ), (0.619607843137255, 0.85546875, 0.85546875), (0.623529411764706,\n 0.83984375, 0.83984375), (0.627450980392157, 0.82421875, 0.82421875\n ), (0.631372549019608, 0.80859375, 0.80859375), (0.635294117647059,\n 0.79296875, 0.79296875), (0.63921568627451, 0.77734375, 0.77734375),\n (0.643137254901961, 0.76171875, 0.76171875), (0.647058823529412, \n 0.74609375, 0.74609375), (0.650980392156863, 0.73046875, 0.73046875\n ), (0.654901960784314, 0.71484375, 0.71484375), (0.658823529411765,\n 0.69921875, 0.69921875), (0.662745098039216, 0.68359375, 0.68359375\n ), (0.666666666666667, 0.66796875, 0.66796875), (0.670588235294118,\n 0.65234375, 0.65234375), (0.674509803921569, 0.63671875, 0.63671875\n ), (0.67843137254902, 0.62109375, 0.62109375), (0.682352941176471, \n 0.60546875, 0.60546875), (0.686274509803922, 0.58984375, 0.58984375\n ), (0.690196078431373, 0.57421875, 0.57421875), (0.694117647058824,\n 0.55859375, 0.55859375), (0.698039215686274, 0.54296875, 0.54296875\n ), (0.701960784313725, 0.52734375, 0.52734375), (0.705882352941177,\n 0.51171875, 0.51171875), (0.709803921568627, 0.49609375, 0.49609375\n ), (0.713725490196078, 0.48046875, 0.48046875), (0.717647058823529,\n 0.46484375, 0.46484375), (0.72156862745098, 0.44921875, 0.44921875),\n (0.725490196078431, 0.43359375, 0.43359375), (0.729411764705882, \n 0.41796875, 0.41796875), (0.733333333333333, 0.40234375, 0.40234375\n ), (0.737254901960784, 0.38671875, 0.38671875), (0.741176470588235,\n 0.37109375, 0.37109375), (0.745098039215686, 0.35546875, 0.35546875\n ), (0.749019607843137, 0.33984375, 0.33984375), (0.752941176470588,\n 0.32421875, 0.32421875), (0.756862745098039, 0.30859375, 0.30859375\n ), (0.76078431372549, 0.29296875, 0.29296875), (0.764705882352941, \n 0.27734375, 0.27734375), (0.768627450980392, 0.26171875, 0.26171875\n ), (0.772549019607843, 0.24609375, 0.24609375), (0.776470588235294,\n 0.23046875, 0.23046875), (0.780392156862745, 0.21484375, 0.21484375\n ), (0.784313725490196, 0.22663359375, 0.22663359375), (\n 0.788235294117647, 0.2384234375, 0.2384234375), (0.792156862745098,\n 0.250212890625, 0.250212890625), (0.796078431372549, 0.262002734375,\n 0.262002734375), (0.8, 0.273792578125, 0.273792578125), (\n 0.803921568627451, 0.285582421875, 0.285582421875), (\n 0.807843137254902, 0.297372265625, 0.297372265625), (\n 0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125)), 'green': ((0.0, \n 0.02984375, 0.02984375), (0.00392156862745098, 0.02984375, \n 0.02984375), (0.00784313725490196, 0.044765625, 0.044765625), (\n 0.0117647058823529, 0.0596875, 0.0596875), (0.0156862745098039, \n 0.074609375, 0.074609375), (0.0196078431372549, 0.08953125, \n 0.08953125), (0.0235294117647059, 0.104453125, 0.104453125), (\n 0.0274509803921569, 0.119375, 0.119375), (0.0313725490196078, \n 0.134296875, 0.134296875), (0.0352941176470588, 0.14921875, \n 0.14921875), (0.0392156862745098, 0.164140625, 0.164140625), (\n 0.0431372549019608, 0.1790625, 0.1790625), (0.0470588235294118, \n 0.193984375, 0.193984375), (0.0509803921568627, 0.20890625, \n 0.20890625), (0.0549019607843137, 0.223828125, 0.223828125), (\n 0.0588235294117647, 0.23875, 0.23875), (0.0627450980392157, \n 0.253671875, 0.253671875), (0.0666666666666667, 0.26859375, \n 0.26859375), (0.0705882352941176, 0.283515625, 0.283515625), (\n 0.0745098039215686, 0.2984375, 0.2984375), (0.0784313725490196, \n 0.313359375, 0.313359375), (0.0823529411764706, 0.32828125, \n 0.32828125), (0.0862745098039216, 0.343203125, 0.343203125), (\n 0.0901960784313725, 0.358125, 0.358125), (0.0941176470588235, \n 0.373046875, 0.373046875), (0.0980392156862745, 0.38796875, \n 0.38796875), (0.101960784313725, 0.402890625, 0.402890625), (\n 0.105882352941176, 0.4178125, 0.4178125), (0.109803921568627, \n 0.432734375, 0.432734375), (0.113725490196078, 0.44765625, \n 0.44765625), (0.117647058823529, 0.462578125, 0.462578125), (\n 0.12156862745098, 0.4775, 0.4775), (0.125490196078431, 0.492421875,\n 0.492421875), (0.129411764705882, 0.50734375, 0.50734375), (\n 0.133333333333333, 0.522265625, 0.522265625), (0.137254901960784, \n 0.5371875, 0.5371875), (0.141176470588235, 0.552109375, 0.552109375\n ), (0.145098039215686, 0.56703125, 0.56703125), (0.149019607843137,\n 0.581953125, 0.581953125), (0.152941176470588, 0.596875, 0.596875),\n (0.156862745098039, 0.611796875, 0.611796875), (0.16078431372549, \n 0.62671875, 0.62671875), (0.164705882352941, 0.641640625, \n 0.641640625), (0.168627450980392, 0.6565625, 0.6565625), (\n 0.172549019607843, 0.671484375, 0.671484375), (0.176470588235294, \n 0.68640625, 0.68640625), (0.180392156862745, 0.701328125, \n 0.701328125), (0.184313725490196, 0.71625, 0.71625), (\n 0.188235294117647, 0.731171875, 0.731171875), (0.192156862745098, \n 0.74609375, 0.74609375), (0.196078431372549, 0.731171875, \n 0.731171875), (0.2, 0.71625, 0.71625), (0.203921568627451, \n 0.701328125, 0.701328125), (0.207843137254902, 0.68640625, \n 0.68640625), (0.211764705882353, 0.671484375, 0.671484375), (\n 0.215686274509804, 0.6565625, 0.6565625), (0.219607843137255, \n 0.641640625, 0.641640625), (0.223529411764706, 0.62671875, \n 0.62671875), (0.227450980392157, 0.611796875, 0.611796875), (\n 0.231372549019608, 0.596875, 0.596875), (0.235294117647059, \n 0.581953125, 0.581953125), (0.23921568627451, 0.56703125, \n 0.56703125), (0.243137254901961, 0.552109375, 0.552109375), (\n 0.247058823529412, 0.5371875, 0.5371875), (0.250980392156863, \n 0.522265625, 0.522265625), (0.254901960784314, 0.50734375, \n 0.50734375), (0.258823529411765, 0.492421875, 0.492421875), (\n 0.262745098039216, 0.4775, 0.4775), (0.266666666666667, 0.462578125,\n 0.462578125), (0.270588235294118, 0.44765625, 0.44765625), (\n 0.274509803921569, 0.432734375, 0.432734375), (0.27843137254902, \n 0.4178125, 0.4178125), (0.282352941176471, 0.402890625, 0.402890625\n ), (0.286274509803922, 0.38796875, 0.38796875), (0.290196078431373,\n 0.373046875, 0.373046875), (0.294117647058824, 0.358125, 0.358125),\n (0.298039215686275, 0.343203125, 0.343203125), (0.301960784313725, \n 0.32828125, 0.32828125), (0.305882352941176, 0.313359375, \n 0.313359375), (0.309803921568627, 0.2984375, 0.2984375), (\n 0.313725490196078, 0.283515625, 0.283515625), (0.317647058823529, \n 0.26859375, 0.26859375), (0.32156862745098, 0.253671875, \n 0.253671875), (0.325490196078431, 0.23875, 0.23875), (\n 0.329411764705882, 0.223828125, 0.223828125), (0.333333333333333, \n 0.20890625, 0.20890625), (0.337254901960784, 0.193984375, \n 0.193984375), (0.341176470588235, 0.1790625, 0.1790625), (\n 0.345098039215686, 0.164140625, 0.164140625), (0.349019607843137, \n 0.14921875, 0.14921875), (0.352941176470588, 0.134296875, \n 0.134296875), (0.356862745098039, 0.119375, 0.119375), (\n 0.36078431372549, 0.104453125, 0.104453125), (0.364705882352941, \n 0.08953125, 0.08953125), (0.368627450980392, 0.074609375, \n 0.074609375), (0.372549019607843, 0.0596875, 0.0596875), (\n 0.376470588235294, 0.044765625, 0.044765625), (0.380392156862745, \n 0.0298437890625, 0.0298437890625), (0.384313725490196, 0.014921875,\n 0.014921875), (0.388235294117647, 0, 0), (0.392156862745098, \n 0.012890625, 0.012890625), (0.396078431372549, 0.02578125, \n 0.02578125), (0.4, 0.038671875, 0.038671875), (0.403921568627451, \n 0.0515625, 0.0515625), (0.407843137254902, 0.064453125, 0.064453125\n ), (0.411764705882353, 0.07734375, 0.07734375), (0.415686274509804,\n 0.090234375, 0.090234375), (0.419607843137255, 0.103125, 0.103125),\n (0.423529411764706, 0.116015625, 0.116015625), (0.427450980392157, \n 0.12890625, 0.12890625), (0.431372549019608, 0.141796875, \n 0.141796875), (0.435294117647059, 0.1546875, 0.1546875), (\n 0.43921568627451, 0.167578125, 0.167578125), (0.443137254901961, \n 0.18046875, 0.18046875), (0.447058823529412, 0.193359375, \n 0.193359375), (0.450980392156863, 0.20625, 0.20625), (\n 0.454901960784314, 0.219140625, 0.219140625), (0.458823529411765, \n 0.23203125, 0.23203125), (0.462745098039216, 0.244921875, \n 0.244921875), (0.466666666666667, 0.2578125, 0.2578125), (\n 0.470588235294118, 0.270703125, 0.270703125), (0.474509803921569, \n 0.28359375, 0.28359375), (0.47843137254902, 0.296484375, \n 0.296484375), (0.482352941176471, 0.309375, 0.309375), (\n 0.486274509803922, 0.322265625, 0.322265625), (0.490196078431373, \n 0.33515625, 0.33515625), (0.494117647058824, 0.348046875, \n 0.348046875), (0.498039215686275, 0.3609375, 0.3609375), (\n 0.501960784313725, 0.373828125, 0.373828125), (0.505882352941176, \n 0.38671875, 0.38671875), (0.509803921568627, 0.399609375, \n 0.399609375), (0.513725490196078, 0.4125, 0.4125), (\n 0.517647058823529, 0.425390625, 0.425390625), (0.52156862745098, \n 0.43828125, 0.43828125), (0.525490196078431, 0.451171875, \n 0.451171875), (0.529411764705882, 0.4640625, 0.4640625), (\n 0.533333333333333, 0.476953125, 0.476953125), (0.537254901960784, \n 0.48984375, 0.48984375), (0.541176470588235, 0.502734375, \n 0.502734375), (0.545098039215686, 0.515625, 0.515625), (\n 0.549019607843137, 0.528515625, 0.528515625), (0.552941176470588, \n 0.54140625, 0.54140625), (0.556862745098039, 0.554296875, \n 0.554296875), (0.56078431372549, 0.5671875, 0.5671875), (\n 0.564705882352941, 0.580078125, 0.580078125), (0.568627450980392, \n 0.59296875, 0.59296875), (0.572549019607843, 0.605859375, \n 0.605859375), (0.576470588235294, 0.61875, 0.61875), (\n 0.580392156862745, 0.631640625, 0.631640625), (0.584313725490196, \n 0.64453125, 0.64453125), (0.588235294117647, 0.6359375, 0.6359375),\n (0.592156862745098, 0.62734375, 0.62734375), (0.596078431372549, \n 0.61875, 0.61875), (0.6, 0.61015625, 0.61015625), (\n 0.603921568627451, 0.6015625, 0.6015625), (0.607843137254902, \n 0.59296875, 0.59296875), (0.611764705882353, 0.584375, 0.584375), (\n 0.615686274509804, 0.57578125, 0.57578125), (0.619607843137255, \n 0.5671875, 0.5671875), (0.623529411764706, 0.55859375, 0.55859375),\n (0.627450980392157, 0.55, 0.55), (0.631372549019608, 0.54140625, \n 0.54140625), (0.635294117647059, 0.5328125, 0.5328125), (\n 0.63921568627451, 0.52421875, 0.52421875), (0.643137254901961, \n 0.515625, 0.515625), (0.647058823529412, 0.50703125, 0.50703125), (\n 0.650980392156863, 0.4984375, 0.4984375), (0.654901960784314, \n 0.48984375, 0.48984375), (0.658823529411765, 0.48125, 0.48125), (\n 0.662745098039216, 0.47265625, 0.47265625), (0.666666666666667, \n 0.4640625, 0.4640625), (0.670588235294118, 0.45546875, 0.45546875),\n (0.674509803921569, 0.446875, 0.446875), (0.67843137254902, \n 0.43828125, 0.43828125), (0.682352941176471, 0.4296875, 0.4296875),\n (0.686274509803922, 0.42109375, 0.42109375), (0.690196078431373, \n 0.4125, 0.4125), (0.694117647058824, 0.40390625, 0.40390625), (\n 0.698039215686274, 0.3953125, 0.3953125), (0.701960784313725, \n 0.38671875, 0.38671875), (0.705882352941177, 0.378125, 0.378125), (\n 0.709803921568627, 0.36953125, 0.36953125), (0.713725490196078, \n 0.3609375, 0.3609375), (0.717647058823529, 0.35234375, 0.35234375),\n (0.72156862745098, 0.34375, 0.34375), (0.725490196078431, \n 0.33515625, 0.33515625), (0.729411764705882, 0.3265625, 0.3265625),\n (0.733333333333333, 0.31796875, 0.31796875), (0.737254901960784, \n 0.309375, 0.309375), (0.741176470588235, 0.30078125, 0.30078125), (\n 0.745098039215686, 0.2921875, 0.2921875), (0.749019607843137, \n 0.28359375, 0.28359375), (0.752941176470588, 0.275, 0.275), (\n 0.756862745098039, 0.26640625, 0.26640625), (0.76078431372549, \n 0.2578125, 0.2578125), (0.764705882352941, 0.24921875, 0.24921875),\n (0.768627450980392, 0.240625, 0.240625), (0.772549019607843, \n 0.23203125, 0.23203125), (0.776470588235294, 0.2234375, 0.2234375),\n (0.780392156862745, 0.21484375, 0.21484375), (0.784313725490196, \n 0.222301171875, 0.222301171875), (0.788235294117647, 0.22975859375,\n 0.22975859375), (0.792156862745098, 0.237216015625, 0.237216015625),\n (0.796078431372549, 0.2446734375, 0.2446734375), (0.8, \n 0.252130859375, 0.252130859375), (0.803921568627451, 0.259587890625,\n 0.259587890625), (0.807843137254902, 0.2670453125, 0.2670453125), (\n 0.811764705882353, 0.274502734375, 0.274502734375), (\n 0.815686274509804, 0.28196015625, 0.28196015625), (\n 0.819607843137255, 0.289417578125, 0.289417578125), (\n 0.823529411764706, 0.296875, 0.296875), (0.827450980392157, \n 0.304332421875, 0.304332421875), (0.831372549019608, 0.31178984375,\n 0.31178984375), (0.835294117647059, 0.319247265625, 0.319247265625),\n (0.83921568627451, 0.3267046875, 0.3267046875), (0.843137254901961,\n 0.334162109375, 0.334162109375), (0.847058823529412, 0.34161953125,\n 0.34161953125), (0.850980392156863, 0.3490765625, 0.3490765625), (\n 0.854901960784314, 0.356533984375, 0.356533984375), (\n 0.858823529411765, 0.36399140625, 0.36399140625), (\n 0.862745098039216, 0.371448828125, 0.371448828125), (\n 0.866666666666667, 0.37890625, 0.37890625), (0.870588235294118, \n 0.386363671875, 0.386363671875), (0.874509803921569, 0.3938203125, \n 0.3938203125), (0.87843137254902, 0.40127734375, 0.40127734375), (\n 0.882352941176471, 0.408734375, 0.408734375), (0.886274509803922, \n 0.41619140625, 0.41619140625), (0.890196078431373, 0.42365234375, \n 0.42365234375), (0.894117647058824, 0.431109375, 0.431109375), (\n 0.898039215686275, 0.43856640625, 0.43856640625), (\n 0.901960784313726, 0.4460234375, 0.4460234375), (0.905882352941176,\n 0.45348046875, 0.45348046875), (0.909803921568627, 0.4609375, \n 0.4609375), (0.913725490196078, 0.46839453125, 0.46839453125), (\n 0.917647058823529, 0.4758515625, 0.4758515625), (0.92156862745098, \n 0.48330859375, 0.48330859375), (0.925490196078431, 0.490765625, \n 0.490765625), (0.929411764705882, 0.49822265625, 0.49822265625), (\n 0.933333333333333, 0.50568359375, 0.50568359375), (\n 0.937254901960784, 0.513140625, 0.513140625), (0.941176470588235, \n 0.52059765625, 0.52059765625), (0.945098039215686, 0.5280546875, \n 0.5280546875), (0.949019607843137, 0.53551171875, 0.53551171875), (\n 0.952941176470588, 0.54296875, 0.54296875), (0.956862745098039, \n 0.55042578125, 0.55042578125), (0.96078431372549, 0.5578828125, \n 0.5578828125), (0.964705882352941, 0.56533984375, 0.56533984375), (\n 0.968627450980392, 0.572796875, 0.572796875), (0.972549019607843, \n 0.58025390625, 0.58025390625), (0.976470588235294, 0.58771484375, \n 0.58771484375), (0.980392156862745, 0.595171875, 0.595171875), (\n 0.984313725490196, 0.60262890625, 0.60262890625), (\n 0.988235294117647, 0.6100859375, 0.6100859375), (0.992156862745098,\n 0.61754296875, 0.61754296875), (0.996078431372549, 0.625, 0.625), (\n 1.0, 0.625, 0.625)), 'blue': ((0.0, 0.51984375, 0.51984375), (\n 0.00392156862745098, 0.51984375, 0.51984375), (0.00784313725490196,\n 0.529765625, 0.529765625), (0.0117647058823529, 0.5396875, \n 0.5396875), (0.0156862745098039, 0.549609375, 0.549609375), (\n 0.0196078431372549, 0.55953125, 0.55953125), (0.0235294117647059, \n 0.569453125, 0.569453125), (0.0274509803921569, 0.579375, 0.579375),\n (0.0313725490196078, 0.589296875, 0.589296875), (0.0352941176470588,\n 0.59921875, 0.59921875), (0.0392156862745098, 0.609140625, \n 0.609140625), (0.0431372549019608, 0.6190625, 0.6190625), (\n 0.0470588235294118, 0.628984375, 0.628984375), (0.0509803921568627,\n 0.63890625, 0.63890625), (0.0549019607843137, 0.648828125, \n 0.648828125), (0.0588235294117647, 0.65875, 0.65875), (\n 0.0627450980392157, 0.668671875, 0.668671875), (0.0666666666666667,\n 0.67859375, 0.67859375), (0.0705882352941176, 0.688515625, \n 0.688515625), (0.0745098039215686, 0.6984375, 0.6984375), (\n 0.0784313725490196, 0.708359375, 0.708359375), (0.0823529411764706,\n 0.71828125, 0.71828125), (0.0862745098039216, 0.728203125, \n 0.728203125), (0.0901960784313725, 0.738125, 0.738125), (\n 0.0941176470588235, 0.748046875, 0.748046875), (0.0980392156862745,\n 0.75796875, 0.75796875), (0.101960784313725, 0.767890625, \n 0.767890625), (0.105882352941176, 0.7778125, 0.7778125), (\n 0.109803921568627, 0.787734375, 0.787734375), (0.113725490196078, \n 0.79765625, 0.79765625), (0.117647058823529, 0.807578125, \n 0.807578125), (0.12156862745098, 0.8175, 0.8175), (\n 0.125490196078431, 0.827421875, 0.827421875), (0.129411764705882, \n 0.83734375, 0.83734375), (0.133333333333333, 0.847265625, \n 0.847265625), (0.137254901960784, 0.8571875, 0.8571875), (\n 0.141176470588235, 0.867109375, 0.867109375), (0.145098039215686, \n 0.87703125, 0.87703125), (0.149019607843137, 0.886953125, \n 0.886953125), (0.152941176470588, 0.896875, 0.896875), (\n 0.156862745098039, 0.906796875, 0.906796875), (0.16078431372549, \n 0.91671875, 0.91671875), (0.164705882352941, 0.926640625, \n 0.926640625), (0.168627450980392, 0.9365625, 0.9365625), (\n 0.172549019607843, 0.946484375, 0.946484375), (0.176470588235294, \n 0.95640625, 0.95640625), (0.180392156862745, 0.966328125, \n 0.966328125), (0.184313725490196, 0.97625, 0.97625), (\n 0.188235294117647, 0.986171875, 0.986171875), (0.192156862745098, \n 0.99609375, 0.99609375), (0.196078431372549, 0.976171875, \n 0.976171875), (0.2, 0.95625, 0.95625), (0.203921568627451, \n 0.936328125, 0.936328125), (0.207843137254902, 0.91640625, \n 0.91640625), (0.211764705882353, 0.896484375, 0.896484375), (\n 0.215686274509804, 0.8765625, 0.8765625), (0.219607843137255, \n 0.856640625, 0.856640625), (0.223529411764706, 0.83671875, \n 0.83671875), (0.227450980392157, 0.816796875, 0.816796875), (\n 0.231372549019608, 0.796875, 0.796875), (0.235294117647059, \n 0.776953125, 0.776953125), (0.23921568627451, 0.75703125, \n 0.75703125), (0.243137254901961, 0.737109375, 0.737109375), (\n 0.247058823529412, 0.7171875, 0.7171875), (0.250980392156863, \n 0.697265625, 0.697265625), (0.254901960784314, 0.67734375, \n 0.67734375), (0.258823529411765, 0.657421875, 0.657421875), (\n 0.262745098039216, 0.6375, 0.6375), (0.266666666666667, 0.617578125,\n 0.617578125), (0.270588235294118, 0.59765625, 0.59765625), (\n 0.274509803921569, 0.577734375, 0.577734375), (0.27843137254902, \n 0.5578125, 0.5578125), (0.282352941176471, 0.537890625, 0.537890625\n ), (0.286274509803922, 0.51796875, 0.51796875), (0.290196078431373,\n 0.498046875, 0.498046875), (0.294117647058824, 0.478125, 0.478125),\n (0.298039215686275, 0.458203125, 0.458203125), (0.301960784313725, \n 0.43828125, 0.43828125), (0.305882352941176, 0.418359375, \n 0.418359375), (0.309803921568627, 0.3984375, 0.3984375), (\n 0.313725490196078, 0.378515625, 0.378515625), (0.317647058823529, \n 0.35859375, 0.35859375), (0.32156862745098, 0.338671875, \n 0.338671875), (0.325490196078431, 0.31875, 0.31875), (\n 0.329411764705882, 0.298828125, 0.298828125), (0.333333333333333, \n 0.27890625, 0.27890625), (0.337254901960784, 0.258984375, \n 0.258984375), (0.341176470588235, 0.2390625, 0.2390625), (\n 0.345098039215686, 0.219140625, 0.219140625), (0.349019607843137, \n 0.19921875, 0.19921875), (0.352941176470588, 0.179296875, \n 0.179296875), (0.356862745098039, 0.159375, 0.159375), (\n 0.36078431372549, 0.139453125, 0.139453125), (0.364705882352941, \n 0.11953125, 0.11953125), (0.368627450980392, 0.099609375, \n 0.099609375), (0.372549019607843, 0.0796875, 0.0796875), (\n 0.376470588235294, 0.059765625, 0.059765625), (0.380392156862745, \n 0.03984375, 0.03984375), (0.384313725490196, 0.019921875, \n 0.019921875), (0.388235294117647, 0, 0), (0.392156862745098, 0, 0),\n (0.396078431372549, 0, 0), (0.4, 0, 0), (0.403921568627451, 0, 0),\n (0.407843137254902, 0, 0), (0.411764705882353, 0, 0), (\n 0.415686274509804, 0, 0), (0.419607843137255, 0, 0), (\n 0.423529411764706, 0, 0), (0.427450980392157, 0, 0), (\n 0.431372549019608, 0, 0), (0.435294117647059, 0, 0), (\n 0.43921568627451, 0, 0), (0.443137254901961, 0, 0), (\n 0.447058823529412, 0, 0), (0.450980392156863, 0, 0), (\n 0.454901960784314, 0, 0), (0.458823529411765, 0, 0), (\n 0.462745098039216, 0, 0), (0.466666666666667, 0, 0), (\n 0.470588235294118, 0, 0), (0.474509803921569, 0, 0), (\n 0.47843137254902, 0, 0), (0.482352941176471, 0, 0), (\n 0.486274509803922, 0, 0), (0.490196078431373, 0, 0), (\n 0.494117647058824, 0, 0), (0.498039215686275, 0, 0), (\n 0.501960784313725, 0, 0), (0.505882352941176, 0, 0), (\n 0.509803921568627, 0, 0), (0.513725490196078, 0, 0), (\n 0.517647058823529, 0, 0), (0.52156862745098, 0, 0), (\n 0.525490196078431, 0, 0), (0.529411764705882, 0, 0), (\n 0.533333333333333, 0, 0), (0.537254901960784, 0, 0), (\n 0.541176470588235, 0, 0), (0.545098039215686, 0, 0), (\n 0.549019607843137, 0, 0), (0.552941176470588, 0, 0), (\n 0.556862745098039, 0, 0), (0.56078431372549, 0, 0), (\n 0.564705882352941, 0, 0), (0.568627450980392, 0, 0), (\n 0.572549019607843, 0, 0), (0.576470588235294, 0, 0), (\n 0.580392156862745, 0, 0), (0.584313725490196, 0, 0), (\n 0.588235294117647, 0.004296875, 0.004296875), (0.592156862745098, \n 0.00859375, 0.00859375), (0.596078431372549, 0.012890625, \n 0.012890625), (0.6, 0.0171875, 0.0171875), (0.603921568627451, \n 0.021484375, 0.021484375), (0.607843137254902, 0.02578125, \n 0.02578125), (0.611764705882353, 0.030078125, 0.030078125), (\n 0.615686274509804, 0.034375, 0.034375), (0.619607843137255, \n 0.038671875, 0.038671875), (0.623529411764706, 0.04296875, \n 0.04296875), (0.627450980392157, 0.047265625, 0.047265625), (\n 0.631372549019608, 0.0515625, 0.0515625), (0.635294117647059, \n 0.055859375, 0.055859375), (0.63921568627451, 0.06015625, \n 0.06015625), (0.643137254901961, 0.064453125, 0.064453125), (\n 0.647058823529412, 0.06875, 0.06875), (0.650980392156863, \n 0.073046875, 0.073046875), (0.654901960784314, 0.07734375, \n 0.07734375), (0.658823529411765, 0.081640625, 0.081640625), (\n 0.662745098039216, 0.0859375, 0.0859375), (0.666666666666667, \n 0.090234375, 0.090234375), (0.670588235294118, 0.09453125, \n 0.09453125), (0.674509803921569, 0.098828125, 0.098828125), (\n 0.67843137254902, 0.103125, 0.103125), (0.682352941176471, \n 0.107421875, 0.107421875), (0.686274509803922, 0.11171875, \n 0.11171875), (0.690196078431373, 0.116015625, 0.116015625), (\n 0.694117647058824, 0.1203125, 0.1203125), (0.698039215686274, \n 0.124609375, 0.124609375), (0.701960784313725, 0.12890625, \n 0.12890625), (0.705882352941177, 0.133203125, 0.133203125), (\n 0.709803921568627, 0.1375, 0.1375), (0.713725490196078, 0.141796875,\n 0.141796875), (0.717647058823529, 0.14609375, 0.14609375), (\n 0.72156862745098, 0.150390625, 0.150390625), (0.725490196078431, \n 0.1546875, 0.1546875), (0.729411764705882, 0.158984375, 0.158984375\n ), (0.733333333333333, 0.16328125, 0.16328125), (0.737254901960784,\n 0.167578125, 0.167578125), (0.741176470588235, 0.171875, 0.171875),\n (0.745098039215686, 0.176171875, 0.176171875), (0.749019607843137, \n 0.18046875, 0.18046875), (0.752941176470588, 0.184765625, \n 0.184765625), (0.756862745098039, 0.1890625, 0.1890625), (\n 0.76078431372549, 0.193359375, 0.193359375), (0.764705882352941, \n 0.19765625, 0.19765625), (0.768627450980392, 0.201953125, \n 0.201953125), (0.772549019607843, 0.20625, 0.20625), (\n 0.776470588235294, 0.210546875, 0.210546875), (0.780392156862745, \n 0.21484375, 0.21484375), (0.784313725490196, 0.22663359375, \n 0.22663359375), (0.788235294117647, 0.2384234375, 0.2384234375), (\n 0.792156862745098, 0.250212890625, 0.250212890625), (\n 0.796078431372549, 0.262002734375, 0.262002734375), (0.8, \n 0.273792578125, 0.273792578125), (0.803921568627451, 0.285582421875,\n 0.285582421875), (0.807843137254902, 0.297372265625, 0.297372265625\n ), (0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125))}\n vcdict = {'red': ((0, 1, 1), (0.00392156862745098, 0.54508984375, \n 0.54508984375), (0.00784313725490196, 0.5285703125, 0.5285703125),\n (0.0117647058823529, 0.5120546875, 0.5120546875), (\n 0.0156862745098039, 0.49553515625, 0.49553515625), (\n 0.0196078431372549, 0.47901953125, 0.47901953125), (\n 0.0235294117647059, 0.4625, 0.4625), (0.0274509803921569, \n 0.44598046875, 0.44598046875), (0.0313725490196078, 0.42946484375, \n 0.42946484375), (0.0352941176470588, 0.4129453125, 0.4129453125), (\n 0.0392156862745098, 0.3964296875, 0.3964296875), (\n 0.0431372549019608, 0.379910546875, 0.379910546875), (\n 0.0470588235294118, 0.36339296875, 0.36339296875), (\n 0.0509803921568627, 0.346875, 0.346875), (0.0549019607843137, \n 0.33035703125, 0.33035703125), (0.0588235294117647, 0.313839453125,\n 0.313839453125), (0.0627450980392157, 0.297321484375, \n 0.297321484375), (0.0666666666666667, 0.280803515625, \n 0.280803515625), (0.0705882352941176, 0.2642859375, 0.2642859375),\n (0.0745098039215686, 0.24776796875, 0.24776796875), (\n 0.0784313725490196, 0.23125, 0.23125), (0.0823529411764706, \n 0.21473203125, 0.21473203125), (0.0862745098039216, 0.198214453125,\n 0.198214453125), (0.0901960784313725, 0.181696484375, \n 0.181696484375), (0.0941176470588235, 0.165178515625, \n 0.165178515625), (0.0980392156862745, 0.148660546875, \n 0.148660546875), (0.101960784313725, 0.13214296875, 0.13214296875),\n (0.105882352941176, 0.115625, 0.115625), (0.109803921568627, \n 0.09910703125, 0.09910703125), (0.113725490196078, 0.082589453125, \n 0.082589453125), (0.117647058823529, 0.066071484375, 0.066071484375\n ), (0.12156862745098, 0.049553515625, 0.049553515625), (\n 0.125490196078431, 0.0330357421875, 0.0330357421875), (\n 0.129411764705882, 0.016517890625, 0.016517890625), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0, 0), (0.2, 0, 0), (0.203921568627451, 0, 0), (\n 0.207843137254902, 0, 0), (0.211764705882353, 0, 0), (\n 0.215686274509804, 0, 0), (0.219607843137255, 0, 0), (\n 0.223529411764706, 0, 0), (0.227450980392157, 0, 0), (\n 0.231372549019608, 0, 0), (0.235294117647059, 0, 0), (\n 0.23921568627451, 0, 0), (0.243137254901961, 0, 0), (\n 0.247058823529412, 0, 0), (0.250980392156863, 0, 0), (\n 0.254901960784314, 0, 0), (0.258823529411765, 0, 0), (\n 0.262745098039216, 0, 0), (0.266666666666667, 0, 0), (\n 0.270588235294118, 0, 0), (0.274509803921569, 0, 0), (\n 0.27843137254902, 0, 0), (0.282352941176471, 0, 0), (\n 0.286274509803922, 0, 0), (0.290196078431373, 0, 0), (\n 0.294117647058824, 0, 0), (0.298039215686275, 0, 0), (\n 0.301960784313725, 0, 0), (0.305882352941176, 0, 0), (\n 0.309803921568627, 0, 0), (0.313725490196078, 0, 0), (\n 0.317647058823529, 0, 0), (0.32156862745098, 0, 0), (\n 0.325490196078431, 0, 0), (0.329411764705882, 0, 0), (\n 0.333333333333333, 0, 0), (0.337254901960784, 0, 0), (\n 0.341176470588235, 0, 0), (0.345098039215686, 0, 0), (\n 0.349019607843137, 0, 0), (0.352941176470588, 0.0061383984375, \n 0.0061383984375), (0.356862745098039, 0.012276796875, \n 0.012276796875), (0.36078431372549, 0.0184151953125, \n 0.0184151953125), (0.364705882352941, 0.0245535546875, \n 0.0245535546875), (0.368627450980392, 0.030691953125, \n 0.030691953125), (0.372549019607843, 0.0368303515625, \n 0.0368303515625), (0.376470588235294, 0.04296875, 0.04296875), (\n 0.380392156862745, 0.04910703125, 0.04910703125), (\n 0.384313725490196, 0.055245703125, 0.055245703125), (\n 0.388235294117647, 0.061383984375, 0.061383984375), (\n 0.392156862745098, 0.067522265625, 0.067522265625), (\n 0.396078431372549, 0.073660546875, 0.073660546875), (0.4, \n 0.07979921875, 0.07979921875), (0.403921568627451, 0.0859375, \n 0.0859375), (0.407843137254902, 0.09207578125, 0.09207578125), (\n 0.411764705882353, 0.098214453125, 0.098214453125), (\n 0.415686274509804, 0.104352734375, 0.104352734375), (\n 0.419607843137255, 0.110491015625, 0.110491015625), (\n 0.423529411764706, 0.116629296875, 0.116629296875), (\n 0.427450980392157, 0.12276796875, 0.12276796875), (\n 0.431372549019608, 0.12890625, 0.12890625), (0.435294117647059, \n 0.13504453125, 0.13504453125), (0.43921568627451, 0.141183203125, \n 0.141183203125), (0.443137254901961, 0.147321484375, 0.147321484375\n ), (0.447058823529412, 0.153459765625, 0.153459765625), (\n 0.450980392156863, 0.159598046875, 0.159598046875), (\n 0.454901960784314, 0.16573671875, 0.16573671875), (\n 0.458823529411765, 0.171875, 0.171875), (0.462745098039216, \n 0.17801328125, 0.17801328125), (0.466666666666667, 0.184151953125, \n 0.184151953125), (0.470588235294118, 0.190290234375, 0.190290234375\n ), (0.474509803921569, 0.196428515625, 0.196428515625), (\n 0.47843137254902, 0.202566796875, 0.202566796875), (\n 0.482352941176471, 0.20870546875, 0.20870546875), (\n 0.486274509803922, 0.21484375, 0.21484375), (0.490196078431373, \n 0.233370703125, 0.233370703125), (0.494117647058824, 0.251897265625,\n 0.251897265625), (0.498039215686275, 0.27042421875, 0.27042421875),\n (0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.86549609375, 0.86549609375), (0.631372549019608, 0.86770703125, \n 0.86770703125), (0.635294117647059, 0.869921875, 0.869921875), (\n 0.63921568627451, 0.87213671875, 0.87213671875), (0.643137254901961,\n 0.87434765625, 0.87434765625), (0.647058823529412, 0.8765625, \n 0.8765625), (0.650980392156863, 0.87877734375, 0.87877734375), (\n 0.654901960784314, 0.88098828125, 0.88098828125), (\n 0.658823529411765, 0.883203125, 0.883203125), (0.662745098039216, \n 0.88541796875, 0.88541796875), (0.666666666666667, 0.88762890625, \n 0.88762890625), (0.670588235294118, 0.88984375, 0.88984375), (\n 0.674509803921569, 0.89205859375, 0.89205859375), (0.67843137254902,\n 0.89426953125, 0.89426953125), (0.682352941176471, 0.896484375, \n 0.896484375), (0.686274509803922, 0.89869921875, 0.89869921875), (\n 0.690196078431373, 0.90091015625, 0.90091015625), (\n 0.694117647058824, 0.903125, 0.903125), (0.698039215686274, \n 0.90533984375, 0.90533984375), (0.701960784313725, 0.90755078125, \n 0.90755078125), (0.705882352941177, 0.909765625, 0.909765625), (\n 0.709803921568627, 0.91198046875, 0.91198046875), (\n 0.713725490196078, 0.91419140625, 0.91419140625), (\n 0.717647058823529, 0.91640625, 0.91640625), (0.72156862745098, \n 0.91862109375, 0.91862109375), (0.725490196078431, 0.92083203125, \n 0.92083203125), (0.729411764705882, 0.923046875, 0.923046875), (\n 0.733333333333333, 0.92526171875, 0.92526171875), (\n 0.737254901960784, 0.92747265625, 0.92747265625), (\n 0.741176470588235, 0.9296875, 0.9296875), (0.745098039215686, \n 0.93190234375, 0.93190234375), (0.749019607843137, 0.93411328125, \n 0.93411328125), (0.752941176470588, 0.936328125, 0.936328125), (\n 0.756862745098039, 0.93854296875, 0.93854296875), (0.76078431372549,\n 0.94075390625, 0.94075390625), (0.764705882352941, 0.94296875, \n 0.94296875), (0.768627450980392, 0.94518359375, 0.94518359375), (\n 0.772549019607843, 0.94739453125, 0.94739453125), (\n 0.776470588235294, 0.949609375, 0.949609375), (0.780392156862745, \n 0.95182421875, 0.95182421875), (0.784313725490196, 0.95403515625, \n 0.95403515625), (0.788235294117647, 0.95625, 0.95625), (\n 0.792156862745098, 0.95846484375, 0.95846484375), (\n 0.796078431372549, 0.96067578125, 0.96067578125), (0.8, 0.962890625,\n 0.962890625), (0.803921568627451, 0.96510546875, 0.96510546875), (\n 0.807843137254902, 0.96731640625, 0.96731640625), (\n 0.811764705882353, 0.96953125, 0.96953125), (0.815686274509804, \n 0.97174609375, 0.97174609375), (0.819607843137255, 0.97395703125, \n 0.97395703125), (0.823529411764706, 0.976171875, 0.976171875), (\n 0.827450980392157, 0.97838671875, 0.97838671875), (\n 0.831372549019608, 0.98059765625, 0.98059765625), (\n 0.835294117647059, 0.9828125, 0.9828125), (0.83921568627451, \n 0.98502734375, 0.98502734375), (0.843137254901961, 0.98723828125, \n 0.98723828125), (0.847058823529412, 0.989453125, 0.989453125), (\n 0.850980392156863, 0.99166796875, 0.99166796875), (\n 0.854901960784314, 0.99387890625, 0.99387890625), (\n 0.858823529411765, 0.99609375, 0.99609375), (0.862745098039216, \n 0.99609375, 0.99609375), (0.866666666666667, 0.99609375, 0.99609375\n ), (0.870588235294118, 0.99609375, 0.99609375), (0.874509803921569,\n 0.99609375, 0.99609375), (0.87843137254902, 0.99609375, 0.99609375),\n (0.882352941176471, 0.99609375, 0.99609375), (0.886274509803922, \n 0.99609375, 0.99609375), (0.890196078431373, 0.99609375, 0.99609375\n ), (0.894117647058824, 0.99609375, 0.99609375), (0.898039215686275,\n 0.99609375, 0.99609375), (0.901960784313726, 0.99609375, 0.99609375\n ), (0.905882352941176, 0.99609375, 0.99609375), (0.909803921568627,\n 0.99609375, 0.99609375), (0.913725490196078, 0.99609375, 0.99609375\n ), (0.917647058823529, 0.99609375, 0.99609375), (0.92156862745098, \n 0.99609375, 0.99609375), (0.925490196078431, 0.99609375, 0.99609375\n ), (0.929411764705882, 0.99609375, 0.99609375), (0.933333333333333,\n 0.99609375, 0.99609375), (0.937254901960784, 0.99609375, 0.99609375\n ), (0.941176470588235, 0.99609375, 0.99609375), (0.945098039215686,\n 0.99609375, 0.99609375), (0.949019607843137, 0.99609375, 0.99609375\n ), (0.952941176470588, 0.99609375, 0.99609375), (0.956862745098039,\n 0.99609375, 0.99609375), (0.96078431372549, 0.99609375, 0.99609375),\n (0.964705882352941, 0.99609375, 0.99609375), (0.968627450980392, \n 0.99609375, 0.99609375), (0.972549019607843, 0.99609375, 0.99609375\n ), (0.976470588235294, 0.99609375, 0.99609375), (0.980392156862745,\n 0.99609375, 0.99609375), (0.984313725490196, 0.99609375, 0.99609375\n ), (0.988235294117647, 0.99609375, 0.99609375), (0.992156862745098,\n 0.99609375, 0.99609375), (0.996078431372549, 0.99609375, 0.99609375\n ), (1, 0.99609375, 0.99609375)), 'green': ((0, 1, 1), (\n 0.00392156862745098, 0, 0), (0.00784313725490196, 0, 0), (\n 0.0117647058823529, 0, 0), (0.0156862745098039, 0, 0), (\n 0.0196078431372549, 0, 0), (0.0235294117647059, 0, 0), (\n 0.0274509803921569, 0, 0), (0.0313725490196078, 0, 0), (\n 0.0352941176470588, 0, 0), (0.0392156862745098, 0, 0), (\n 0.0431372549019608, 0, 0), (0.0470588235294118, 0, 0), (\n 0.0509803921568627, 0, 0), (0.0549019607843137, 0, 0), (\n 0.0588235294117647, 0, 0), (0.0627450980392157, 0, 0), (\n 0.0666666666666667, 0, 0), (0.0705882352941176, 0, 0), (\n 0.0745098039215686, 0, 0), (0.0784313725490196, 0, 0), (\n 0.0823529411764706, 0, 0), (0.0862745098039216, 0, 0), (\n 0.0901960784313725, 0, 0), (0.0941176470588235, 0, 0), (\n 0.0980392156862745, 0, 0), (0.101960784313725, 0, 0), (\n 0.105882352941176, 0, 0), (0.109803921568627, 0, 0), (\n 0.113725490196078, 0, 0), (0.117647058823529, 0, 0), (\n 0.12156862745098, 0, 0), (0.125490196078431, 0, 0), (\n 0.129411764705882, 0, 0), (0.133333333333333, 0, 0), (\n 0.137254901960784, 0.0135653515625, 0.0135653515625), (\n 0.141176470588235, 0.0271306640625, 0.0271306640625), (\n 0.145098039215686, 0.04069609375, 0.04069609375), (\n 0.149019607843137, 0.054261328125, 0.054261328125), (\n 0.152941176470588, 0.0678265625, 0.0678265625), (0.156862745098039,\n 0.0813921875, 0.0813921875), (0.16078431372549, 0.094957421875, \n 0.094957421875), (0.164705882352941, 0.10852265625, 0.10852265625),\n (0.168627450980392, 0.122087890625, 0.122087890625), (\n 0.172549019607843, 0.135653515625, 0.135653515625), (\n 0.176470588235294, 0.14921875, 0.14921875), (0.180392156862745, \n 0.162783984375, 0.162783984375), (0.184313725490196, 0.176349609375,\n 0.176349609375), (0.188235294117647, 0.18991484375, 0.18991484375),\n (0.192156862745098, 0.203480078125, 0.203480078125), (\n 0.196078431372549, 0.2170453125, 0.2170453125), (0.2, 0.2306109375,\n 0.2306109375), (0.203921568627451, 0.244176171875, 0.244176171875),\n (0.207843137254902, 0.25774140625, 0.25774140625), (\n 0.211764705882353, 0.27130703125, 0.27130703125), (\n 0.215686274509804, 0.284872265625, 0.284872265625), (\n 0.219607843137255, 0.2984375, 0.2984375), (0.223529411764706, \n 0.312002734375, 0.312002734375), (0.227450980392157, 0.325568359375,\n 0.325568359375), (0.231372549019608, 0.33913359375, 0.33913359375),\n (0.235294117647059, 0.352698828125, 0.352698828125), (\n 0.23921568627451, 0.3662640625, 0.3662640625), (0.243137254901961, \n 0.3798296875, 0.3798296875), (0.247058823529412, 0.39339453125, \n 0.39339453125), (0.250980392156863, 0.4069609375, 0.4069609375), (\n 0.254901960784314, 0.42052734375, 0.42052734375), (\n 0.258823529411765, 0.43408984375, 0.43408984375), (\n 0.262745098039216, 0.44765625, 0.44765625), (0.266666666666667, \n 0.46122265625, 0.46122265625), (0.270588235294118, 0.47478515625, \n 0.47478515625), (0.274509803921569, 0.4883515625, 0.4883515625), (\n 0.27843137254902, 0.50191796875, 0.50191796875), (0.282352941176471,\n 0.515484375, 0.515484375), (0.286274509803922, 0.529046875, \n 0.529046875), (0.290196078431373, 0.54261328125, 0.54261328125), (\n 0.294117647058824, 0.5561796875, 0.5561796875), (0.298039215686275,\n 0.56974609375, 0.56974609375), (0.301960784313725, 0.58330859375, \n 0.58330859375), (0.305882352941176, 0.596875, 0.596875), (\n 0.309803921568627, 0.61044140625, 0.61044140625), (\n 0.313725490196078, 0.62400390625, 0.62400390625), (\n 0.317647058823529, 0.6375703125, 0.6375703125), (0.32156862745098, \n 0.65113671875, 0.65113671875), (0.325490196078431, 0.664703125, \n 0.664703125), (0.329411764705882, 0.678265625, 0.678265625), (\n 0.333333333333333, 0.69183203125, 0.69183203125), (\n 0.337254901960784, 0.7053984375, 0.7053984375), (0.341176470588235,\n 0.71896484375, 0.71896484375), (0.345098039215686, 0.73252734375, \n 0.73252734375), (0.349019607843137, 0.74609375, 0.74609375), (\n 0.352941176470588, 0.7309140625, 0.7309140625), (0.356862745098039,\n 0.71573828125, 0.71573828125), (0.36078431372549, 0.70055859375, \n 0.70055859375), (0.364705882352941, 0.68537890625, 0.68537890625),\n (0.368627450980392, 0.67019921875, 0.67019921875), (\n 0.372549019607843, 0.6550234375, 0.6550234375), (0.376470588235294,\n 0.63984375, 0.63984375), (0.380392156862745, 0.6246640625, \n 0.6246640625), (0.384313725490196, 0.60948828125, 0.60948828125), (\n 0.388235294117647, 0.59430859375, 0.59430859375), (\n 0.392156862745098, 0.57912890625, 0.57912890625), (\n 0.396078431372549, 0.56394921875, 0.56394921875), (0.4, \n 0.5487734375, 0.5487734375), (0.403921568627451, 0.53359375, \n 0.53359375), (0.407843137254902, 0.5184140625, 0.5184140625), (\n 0.411764705882353, 0.50323828125, 0.50323828125), (\n 0.415686274509804, 0.48805859375, 0.48805859375), (\n 0.419607843137255, 0.47287890625, 0.47287890625), (\n 0.423529411764706, 0.45769921875, 0.45769921875), (\n 0.427450980392157, 0.4425234375, 0.4425234375), (0.431372549019608,\n 0.42734375, 0.42734375), (0.435294117647059, 0.4121640625, \n 0.4121640625), (0.43921568627451, 0.39698828125, 0.39698828125), (\n 0.443137254901961, 0.381808203125, 0.381808203125), (\n 0.447058823529412, 0.366629296875, 0.366629296875), (\n 0.450980392156863, 0.35145078125, 0.35145078125), (\n 0.454901960784314, 0.336272265625, 0.336272265625), (\n 0.458823529411765, 0.32109375, 0.32109375), (0.462745098039216, \n 0.305915234375, 0.305915234375), (0.466666666666667, 0.29073671875,\n 0.29073671875), (0.470588235294118, 0.2755578125, 0.2755578125), (\n 0.474509803921569, 0.260379296875, 0.260379296875), (\n 0.47843137254902, 0.24520078125, 0.24520078125), (0.482352941176471,\n 0.230022265625, 0.230022265625), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.2265625, 0.2265625), (\n 0.494117647058824, 0.23828125, 0.23828125), (0.498039215686275, \n 0.25, 0.25), (0.501960784313725, 0.26171875, 0.26171875), (\n 0.505882352941176, 0.2734375, 0.2734375), (0.509803921568627, \n 0.28515625, 0.28515625), (0.513725490196078, 0.296875, 0.296875), (\n 0.517647058823529, 0.30859375, 0.30859375), (0.52156862745098, \n 0.3203125, 0.3203125), (0.525490196078431, 0.33203125, 0.33203125),\n (0.529411764705882, 0.34375, 0.34375), (0.533333333333333, \n 0.35546875, 0.35546875), (0.537254901960784, 0.3671875, 0.3671875),\n (0.541176470588235, 0.37890625, 0.37890625), (0.545098039215686, \n 0.390625, 0.390625), (0.549019607843137, 0.40234375, 0.40234375), (\n 0.552941176470588, 0.4140625, 0.4140625), (0.556862745098039, \n 0.42578125, 0.42578125), (0.56078431372549, 0.4375, 0.4375), (\n 0.564705882352941, 0.44921875, 0.44921875), (0.568627450980392, \n 0.4609375, 0.4609375), (0.572549019607843, 0.47265625, 0.47265625),\n (0.576470588235294, 0.484375, 0.484375), (0.580392156862745, \n 0.49609375, 0.49609375), (0.584313725490196, 0.5078125, 0.5078125),\n (0.588235294117647, 0.51953125, 0.51953125), (0.592156862745098, \n 0.53125, 0.53125), (0.596078431372549, 0.54296875, 0.54296875), (\n 0.6, 0.5546875, 0.5546875), (0.603921568627451, 0.56640625, \n 0.56640625), (0.607843137254902, 0.578125, 0.578125), (\n 0.611764705882353, 0.58984375, 0.58984375), (0.615686274509804, \n 0.6015625, 0.6015625), (0.619607843137255, 0.61328125, 0.61328125),\n (0.623529411764706, 0.625, 0.625), (0.627450980392157, \n 0.61458203125, 0.61458203125), (0.631372549019608, 0.60416796875, \n 0.60416796875), (0.635294117647059, 0.59375, 0.59375), (\n 0.63921568627451, 0.58333203125, 0.58333203125), (0.643137254901961,\n 0.57291796875, 0.57291796875), (0.647058823529412, 0.5625, 0.5625),\n (0.650980392156863, 0.55208203125, 0.55208203125), (\n 0.654901960784314, 0.54166796875, 0.54166796875), (\n 0.658823529411765, 0.53125, 0.53125), (0.662745098039216, \n 0.52083203125, 0.52083203125), (0.666666666666667, 0.51041796875, \n 0.51041796875), (0.670588235294118, 0.5, 0.5), (0.674509803921569, \n 0.48958203125, 0.48958203125), (0.67843137254902, 0.47916796875, \n 0.47916796875), (0.682352941176471, 0.46875, 0.46875), (\n 0.686274509803922, 0.45833203125, 0.45833203125), (\n 0.690196078431373, 0.44791796875, 0.44791796875), (\n 0.694117647058824, 0.4375, 0.4375), (0.698039215686274, \n 0.42708203125, 0.42708203125), (0.701960784313725, 0.41666796875, \n 0.41666796875), (0.705882352941177, 0.40625, 0.40625), (\n 0.709803921568627, 0.39583203125, 0.39583203125), (\n 0.713725490196078, 0.385416796875, 0.385416796875), (\n 0.717647058823529, 0.375, 0.375), (0.72156862745098, 0.364583203125,\n 0.364583203125), (0.725490196078431, 0.354166796875, 0.354166796875\n ), (0.729411764705882, 0.34375, 0.34375), (0.733333333333333, \n 0.333333203125, 0.333333203125), (0.737254901960784, 0.322916796875,\n 0.322916796875), (0.741176470588235, 0.3125, 0.3125), (\n 0.745098039215686, 0.302083203125, 0.302083203125), (\n 0.749019607843137, 0.291666796875, 0.291666796875), (\n 0.752941176470588, 0.28125, 0.28125), (0.756862745098039, \n 0.270833203125, 0.270833203125), (0.76078431372549, 0.260416796875,\n 0.260416796875), (0.764705882352941, 0.25, 0.25), (\n 0.768627450980392, 0.239583203125, 0.239583203125), (\n 0.772549019607843, 0.229166796875, 0.229166796875), (\n 0.776470588235294, 0.21875, 0.21875), (0.780392156862745, \n 0.208333203125, 0.208333203125), (0.784313725490196, 0.197916796875,\n 0.197916796875), (0.788235294117647, 0.1875, 0.1875), (\n 0.792156862745098, 0.177083203125, 0.177083203125), (\n 0.796078431372549, 0.166666796875, 0.166666796875), (0.8, 0.15625, \n 0.15625), (0.803921568627451, 0.145833203125, 0.145833203125), (\n 0.807843137254902, 0.135416796875, 0.135416796875), (\n 0.811764705882353, 0.125, 0.125), (0.815686274509804, \n 0.114583203125, 0.114583203125), (0.819607843137255, 0.104166796875,\n 0.104166796875), (0.823529411764706, 0.09375, 0.09375), (\n 0.827450980392157, 0.083333203125, 0.083333203125), (\n 0.831372549019608, 0.072916796875, 0.072916796875), (\n 0.835294117647059, 0.0625, 0.0625), (0.83921568627451, \n 0.052083203125, 0.052083203125), (0.843137254901961, 0.041666796875,\n 0.041666796875), (0.847058823529412, 0.03125, 0.03125), (\n 0.850980392156863, 0.0208333203125, 0.0208333203125), (\n 0.854901960784314, 0.0104166796875, 0.0104166796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0.0184151953125, \n 0.0184151953125), (0.866666666666667, 0.0368303515625, \n 0.0368303515625), (0.870588235294118, 0.055245703125, \n 0.055245703125), (0.874509803921569, 0.073660546875, 0.073660546875\n ), (0.87843137254902, 0.09207578125, 0.09207578125), (\n 0.882352941176471, 0.110491015625, 0.110491015625), (\n 0.886274509803922, 0.12890625, 0.12890625), (0.890196078431373, \n 0.147321484375, 0.147321484375), (0.894117647058824, 0.16573671875,\n 0.16573671875), (0.898039215686275, 0.184151953125, 0.184151953125),\n (0.901960784313726, 0.202566796875, 0.202566796875), (\n 0.905882352941176, 0.22098203125, 0.22098203125), (\n 0.909803921568627, 0.239397265625, 0.239397265625), (\n 0.913725490196078, 0.2578125, 0.2578125), (0.917647058823529, \n 0.276227734375, 0.276227734375), (0.92156862745098, 0.29464296875, \n 0.29464296875), (0.925490196078431, 0.313058203125, 0.313058203125),\n (0.929411764705882, 0.331473046875, 0.331473046875), (\n 0.933333333333333, 0.34988828125, 0.34988828125), (\n 0.937254901960784, 0.368303515625, 0.368303515625), (\n 0.941176470588235, 0.38671875, 0.38671875), (0.945098039215686, \n 0.4051328125, 0.4051328125), (0.949019607843137, 0.42355078125, \n 0.42355078125), (0.952941176470588, 0.44196484375, 0.44196484375),\n (0.956862745098039, 0.46037890625, 0.46037890625), (\n 0.96078431372549, 0.47879296875, 0.47879296875), (0.964705882352941,\n 0.4972109375, 0.4972109375), (0.968627450980392, 0.515625, 0.515625\n ), (0.972549019607843, 0.5340390625, 0.5340390625), (\n 0.976470588235294, 0.55245703125, 0.55245703125), (\n 0.980392156862745, 0.57087109375, 0.57087109375), (\n 0.984313725490196, 0.58928515625, 0.58928515625), (\n 0.988235294117647, 0.60769921875, 0.60769921875), (\n 0.992156862745098, 0.6261171875, 0.6261171875), (0.996078431372549,\n 0.64453125, 0.64453125), (1, 0.64453125, 0.64453125)), 'blue': ((0,\n 1, 1), (0.00392156862745098, 0.80569140625, 0.80569140625), (\n 0.00784313725490196, 0.7964296875, 0.7964296875), (\n 0.0117647058823529, 0.7871640625, 0.7871640625), (\n 0.0156862745098039, 0.77790234375, 0.77790234375), (\n 0.0196078431372549, 0.76863671875, 0.76863671875), (\n 0.0235294117647059, 0.759375, 0.759375), (0.0274509803921569, \n 0.75011328125, 0.75011328125), (0.0313725490196078, 0.74084765625, \n 0.74084765625), (0.0352941176470588, 0.7315859375, 0.7315859375), (\n 0.0392156862745098, 0.7223203125, 0.7223203125), (\n 0.0431372549019608, 0.71305859375, 0.71305859375), (\n 0.0470588235294118, 0.70379296875, 0.70379296875), (\n 0.0509803921568627, 0.69453125, 0.69453125), (0.0549019607843137, \n 0.68526953125, 0.68526953125), (0.0588235294117647, 0.67600390625, \n 0.67600390625), (0.0627450980392157, 0.6667421875, 0.6667421875), (\n 0.0666666666666667, 0.6574765625, 0.6574765625), (\n 0.0705882352941176, 0.64821484375, 0.64821484375), (\n 0.0745098039215686, 0.63894921875, 0.63894921875), (\n 0.0784313725490196, 0.6296875, 0.6296875), (0.0823529411764706, \n 0.62042578125, 0.62042578125), (0.0862745098039216, 0.61116015625, \n 0.61116015625), (0.0901960784313725, 0.6018984375, 0.6018984375), (\n 0.0941176470588235, 0.5926328125, 0.5926328125), (\n 0.0980392156862745, 0.58337109375, 0.58337109375), (\n 0.101960784313725, 0.57410546875, 0.57410546875), (\n 0.105882352941176, 0.56484375, 0.56484375), (0.109803921568627, \n 0.55558203125, 0.55558203125), (0.113725490196078, 0.54631640625, \n 0.54631640625), (0.117647058823529, 0.5370546875, 0.5370546875), (\n 0.12156862745098, 0.5277890625, 0.5277890625), (0.125490196078431, \n 0.51852734375, 0.51852734375), (0.129411764705882, 0.50926171875, \n 0.50926171875), (0.133333333333333, 0.5, 0.5), (0.137254901960784, \n 0.50901953125, 0.50901953125), (0.141176470588235, 0.5180390625, \n 0.5180390625), (0.145098039215686, 0.52705859375, 0.52705859375), (\n 0.149019607843137, 0.536078125, 0.536078125), (0.152941176470588, \n 0.54509765625, 0.54509765625), (0.156862745098039, 0.55412109375, \n 0.55412109375), (0.16078431372549, 0.563140625, 0.563140625), (\n 0.164705882352941, 0.57216015625, 0.57216015625), (\n 0.168627450980392, 0.5811796875, 0.5811796875), (0.172549019607843,\n 0.59019921875, 0.59019921875), (0.176470588235294, 0.59921875, \n 0.59921875), (0.180392156862745, 0.60823828125, 0.60823828125), (\n 0.184313725490196, 0.6172578125, 0.6172578125), (0.188235294117647,\n 0.62627734375, 0.62627734375), (0.192156862745098, 0.635296875, \n 0.635296875), (0.196078431372549, 0.64431640625, 0.64431640625), (\n 0.2, 0.65333984375, 0.65333984375), (0.203921568627451, 0.662359375,\n 0.662359375), (0.207843137254902, 0.67137890625, 0.67137890625), (\n 0.211764705882353, 0.6803984375, 0.6803984375), (0.215686274509804,\n 0.68941796875, 0.68941796875), (0.219607843137255, 0.6984375, \n 0.6984375), (0.223529411764706, 0.70745703125, 0.70745703125), (\n 0.227450980392157, 0.7164765625, 0.7164765625), (0.231372549019608,\n 0.72549609375, 0.72549609375), (0.235294117647059, 0.734515625, \n 0.734515625), (0.23921568627451, 0.74353515625, 0.74353515625), (\n 0.243137254901961, 0.75255859375, 0.75255859375), (\n 0.247058823529412, 0.761578125, 0.761578125), (0.250980392156863, \n 0.77059765625, 0.77059765625), (0.254901960784314, 0.7796171875, \n 0.7796171875), (0.258823529411765, 0.78863671875, 0.78863671875), (\n 0.262745098039216, 0.79765625, 0.79765625), (0.266666666666667, \n 0.80667578125, 0.80667578125), (0.270588235294118, 0.8156953125, \n 0.8156953125), (0.274509803921569, 0.82471484375, 0.82471484375), (\n 0.27843137254902, 0.833734375, 0.833734375), (0.282352941176471, \n 0.84275390625, 0.84275390625), (0.286274509803922, 0.85177734375, \n 0.85177734375), (0.290196078431373, 0.860796875, 0.860796875), (\n 0.294117647058824, 0.86981640625, 0.86981640625), (\n 0.298039215686275, 0.8788359375, 0.8788359375), (0.301960784313725,\n 0.88785546875, 0.88785546875), (0.305882352941176, 0.896875, \n 0.896875), (0.309803921568627, 0.90589453125, 0.90589453125), (\n 0.313725490196078, 0.9149140625, 0.9149140625), (0.317647058823529,\n 0.92393359375, 0.92393359375), (0.32156862745098, 0.932953125, \n 0.932953125), (0.325490196078431, 0.94197265625, 0.94197265625), (\n 0.329411764705882, 0.95099609375, 0.95099609375), (\n 0.333333333333333, 0.960015625, 0.960015625), (0.337254901960784, \n 0.96903515625, 0.96903515625), (0.341176470588235, 0.9780546875, \n 0.9780546875), (0.345098039215686, 0.98707421875, 0.98707421875), (\n 0.349019607843137, 0.99609375, 0.99609375), (0.352941176470588, \n 0.9737734375, 0.9737734375), (0.356862745098039, 0.95144921875, \n 0.95144921875), (0.36078431372549, 0.92912890625, 0.92912890625), (\n 0.364705882352941, 0.90680859375, 0.90680859375), (\n 0.368627450980392, 0.88448828125, 0.88448828125), (\n 0.372549019607843, 0.8621640625, 0.8621640625), (0.376470588235294,\n 0.83984375, 0.83984375), (0.380392156862745, 0.8175234375, \n 0.8175234375), (0.384313725490196, 0.79519921875, 0.79519921875), (\n 0.388235294117647, 0.77287890625, 0.77287890625), (\n 0.392156862745098, 0.75055859375, 0.75055859375), (\n 0.396078431372549, 0.72823828125, 0.72823828125), (0.4, \n 0.7059140625, 0.7059140625), (0.403921568627451, 0.68359375, \n 0.68359375), (0.407843137254902, 0.6612734375, 0.6612734375), (\n 0.411764705882353, 0.63894921875, 0.63894921875), (\n 0.415686274509804, 0.61662890625, 0.61662890625), (\n 0.419607843137255, 0.59430859375, 0.59430859375), (\n 0.423529411764706, 0.57198828125, 0.57198828125), (\n 0.427450980392157, 0.5496640625, 0.5496640625), (0.431372549019608,\n 0.52734375, 0.52734375), (0.435294117647059, 0.5050234375, \n 0.5050234375), (0.43921568627451, 0.48269921875, 0.48269921875), (\n 0.443137254901961, 0.46037890625, 0.46037890625), (\n 0.447058823529412, 0.43805859375, 0.43805859375), (\n 0.450980392156863, 0.41573828125, 0.41573828125), (\n 0.454901960784314, 0.3934140625, 0.3934140625), (0.458823529411765,\n 0.37109375, 0.37109375), (0.462745098039216, 0.348772265625, \n 0.348772265625), (0.466666666666667, 0.32645078125, 0.32645078125),\n (0.470588235294118, 0.304129296875, 0.304129296875), (\n 0.474509803921569, 0.281808203125, 0.281808203125), (\n 0.47843137254902, 0.25948671875, 0.25948671875), (0.482352941176471,\n 0.237165234375, 0.237165234375), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.233370703125, 0.233370703125), (\n 0.494117647058824, 0.251897265625, 0.251897265625), (\n 0.498039215686275, 0.27042421875, 0.27042421875), (\n 0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.84889453125, 0.84889453125), (0.631372549019608, 0.83450390625, \n 0.83450390625), (0.635294117647059, 0.8201171875, 0.8201171875), (\n 0.63921568627451, 0.80573046875, 0.80573046875), (0.643137254901961,\n 0.79133984375, 0.79133984375), (0.647058823529412, 0.776953125, \n 0.776953125), (0.650980392156863, 0.76256640625, 0.76256640625), (\n 0.654901960784314, 0.74817578125, 0.74817578125), (\n 0.658823529411765, 0.7337890625, 0.7337890625), (0.662745098039216,\n 0.71940234375, 0.71940234375), (0.666666666666667, 0.70501171875, \n 0.70501171875), (0.670588235294118, 0.690625, 0.690625), (\n 0.674509803921569, 0.67623828125, 0.67623828125), (0.67843137254902,\n 0.66184765625, 0.66184765625), (0.682352941176471, 0.6474609375, \n 0.6474609375), (0.686274509803922, 0.63307421875, 0.63307421875), (\n 0.690196078431373, 0.61868359375, 0.61868359375), (\n 0.694117647058824, 0.604296875, 0.604296875), (0.698039215686274, \n 0.58991015625, 0.58991015625), (0.701960784313725, 0.57551953125, \n 0.57551953125), (0.705882352941177, 0.5611328125, 0.5611328125), (\n 0.709803921568627, 0.54674609375, 0.54674609375), (\n 0.713725490196078, 0.53235546875, 0.53235546875), (\n 0.717647058823529, 0.51796875, 0.51796875), (0.72156862745098, \n 0.50358203125, 0.50358203125), (0.725490196078431, 0.48919140625, \n 0.48919140625), (0.729411764705882, 0.4748046875, 0.4748046875), (\n 0.733333333333333, 0.46041796875, 0.46041796875), (\n 0.737254901960784, 0.44602734375, 0.44602734375), (\n 0.741176470588235, 0.431640625, 0.431640625), (0.745098039215686, \n 0.41725390625, 0.41725390625), (0.749019607843137, 0.40286328125, \n 0.40286328125), (0.752941176470588, 0.3884765625, 0.3884765625), (\n 0.756862745098039, 0.374088671875, 0.374088671875), (\n 0.76078431372549, 0.359700390625, 0.359700390625), (\n 0.764705882352941, 0.3453125, 0.3453125), (0.768627450980392, \n 0.330924609375, 0.330924609375), (0.772549019607843, 0.316536328125,\n 0.316536328125), (0.776470588235294, 0.3021484375, 0.3021484375), (\n 0.780392156862745, 0.287760546875, 0.287760546875), (\n 0.784313725490196, 0.273372265625, 0.273372265625), (\n 0.788235294117647, 0.258984375, 0.258984375), (0.792156862745098, \n 0.244596484375, 0.244596484375), (0.796078431372549, 0.230208203125,\n 0.230208203125), (0.8, 0.2158203125, 0.2158203125), (\n 0.803921568627451, 0.201432421875, 0.201432421875), (\n 0.807843137254902, 0.187044140625, 0.187044140625), (\n 0.811764705882353, 0.17265625, 0.17265625), (0.815686274509804, \n 0.158268359375, 0.158268359375), (0.819607843137255, 0.143880078125,\n 0.143880078125), (0.823529411764706, 0.1294921875, 0.1294921875), (\n 0.827450980392157, 0.115104296875, 0.115104296875), (\n 0.831372549019608, 0.100716015625, 0.100716015625), (\n 0.835294117647059, 0.086328125, 0.086328125), (0.83921568627451, \n 0.071940234375, 0.071940234375), (0.843137254901961, 0.057551953125,\n 0.057551953125), (0.847058823529412, 0.0431640625, 0.0431640625), (\n 0.850980392156863, 0.028776015625, 0.028776015625), (\n 0.854901960784314, 0.01438796875, 0.01438796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0, 0), (\n 0.866666666666667, 0, 0), (0.870588235294118, 0, 0), (\n 0.874509803921569, 0, 0), (0.87843137254902, 0, 0), (\n 0.882352941176471, 0, 0), (0.886274509803922, 0, 0), (\n 0.890196078431373, 0, 0), (0.894117647058824, 0, 0), (\n 0.898039215686275, 0, 0), (0.901960784313726, 0, 0), (\n 0.905882352941176, 0, 0), (0.909803921568627, 0, 0), (\n 0.913725490196078, 0, 0), (0.917647058823529, 0, 0), (\n 0.92156862745098, 0, 0), (0.925490196078431, 0, 0), (\n 0.929411764705882, 0, 0), (0.933333333333333, 0, 0), (\n 0.937254901960784, 0, 0), (0.941176470588235, 0, 0), (\n 0.945098039215686, 0, 0), (0.949019607843137, 0, 0), (\n 0.952941176470588, 0, 0), (0.956862745098039, 0, 0), (\n 0.96078431372549, 0, 0), (0.964705882352941, 0, 0), (\n 0.968627450980392, 0, 0), (0.972549019607843, 0, 0), (\n 0.976470588235294, 0, 0), (0.980392156862745, 0, 0), (\n 0.984313725490196, 0, 0), (0.988235294117647, 0, 0), (\n 0.992156862745098, 0, 0), (0.996078431372549, 0, 0), (1, 0, 0))}\n califa = mcol.LinearSegmentedColormap('califa', cdict)\n vcalifa = mcol.LinearSegmentedColormap('vcalifa', vcdict)\n if option == 'v':\n return vcalifa\n else:\n return califa\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\ndef O3O1_line_c(x):\n val = 0.05612915 / (x + 0.39641533) + 0.60969495\n return val\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\ndef color_map_califa(option='v'):\n cdict = {'red': ((0.0, 0, 0), (0.00392156862745098, 0, 0), (\n 0.00784313725490196, 0, 0), (0.0117647058823529, 0, 0), (\n 0.0156862745098039, 0, 0), (0.0196078431372549, 0, 0), (\n 0.0235294117647059, 0, 0), (0.0274509803921569, 0, 0), (\n 0.0313725490196078, 0, 0), (0.0352941176470588, 0, 0), (\n 0.0392156862745098, 0, 0), (0.0431372549019608, 0, 0), (\n 0.0470588235294118, 0, 0), (0.0509803921568627, 0, 0), (\n 0.0549019607843137, 0, 0), (0.0588235294117647, 0, 0), (\n 0.0627450980392157, 0, 0), (0.0666666666666667, 0, 0), (\n 0.0705882352941176, 0, 0), (0.0745098039215686, 0, 0), (\n 0.0784313725490196, 0, 0), (0.0823529411764706, 0, 0), (\n 0.0862745098039216, 0, 0), (0.0901960784313725, 0, 0), (\n 0.0941176470588235, 0, 0), (0.0980392156862745, 0, 0), (\n 0.101960784313725, 0, 0), (0.105882352941176, 0, 0), (\n 0.109803921568627, 0, 0), (0.113725490196078, 0, 0), (\n 0.117647058823529, 0, 0), (0.12156862745098, 0, 0), (\n 0.125490196078431, 0, 0), (0.129411764705882, 0, 0), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0.019921875, 0.019921875), (0.2, 0.03984375, \n 0.03984375), (0.203921568627451, 0.059765625, 0.059765625), (\n 0.207843137254902, 0.0796875, 0.0796875), (0.211764705882353, \n 0.099609375, 0.099609375), (0.215686274509804, 0.11953125, \n 0.11953125), (0.219607843137255, 0.139453125, 0.139453125), (\n 0.223529411764706, 0.159375, 0.159375), (0.227450980392157, \n 0.179296875, 0.179296875), (0.231372549019608, 0.19921875, \n 0.19921875), (0.235294117647059, 0.219140625, 0.219140625), (\n 0.23921568627451, 0.2390625, 0.2390625), (0.243137254901961, \n 0.258984375, 0.258984375), (0.247058823529412, 0.27890625, \n 0.27890625), (0.250980392156863, 0.298828125, 0.298828125), (\n 0.254901960784314, 0.31875, 0.31875), (0.258823529411765, \n 0.338671875, 0.338671875), (0.262745098039216, 0.35859375, \n 0.35859375), (0.266666666666667, 0.378515625, 0.378515625), (\n 0.270588235294118, 0.3984375, 0.3984375), (0.274509803921569, \n 0.418359375, 0.418359375), (0.27843137254902, 0.43828125, \n 0.43828125), (0.282352941176471, 0.458203125, 0.458203125), (\n 0.286274509803922, 0.478125, 0.478125), (0.290196078431373, \n 0.498046875, 0.498046875), (0.294117647058824, 0.51796875, \n 0.51796875), (0.298039215686275, 0.537890625, 0.537890625), (\n 0.301960784313725, 0.5578125, 0.5578125), (0.305882352941176, \n 0.577734375, 0.577734375), (0.309803921568627, 0.59765625, \n 0.59765625), (0.313725490196078, 0.617578125, 0.617578125), (\n 0.317647058823529, 0.6375, 0.6375), (0.32156862745098, 0.657421875,\n 0.657421875), (0.325490196078431, 0.67734375, 0.67734375), (\n 0.329411764705882, 0.697265625, 0.697265625), (0.333333333333333, \n 0.7171875, 0.7171875), (0.337254901960784, 0.737109375, 0.737109375\n ), (0.341176470588235, 0.75703125, 0.75703125), (0.345098039215686,\n 0.776953125, 0.776953125), (0.349019607843137, 0.796875, 0.796875),\n (0.352941176470588, 0.816796875, 0.816796875), (0.356862745098039, \n 0.83671875, 0.83671875), (0.36078431372549, 0.856640625, \n 0.856640625), (0.364705882352941, 0.8765625, 0.8765625), (\n 0.368627450980392, 0.896484375, 0.896484375), (0.372549019607843, \n 0.91640625, 0.91640625), (0.376470588235294, 0.936328125, \n 0.936328125), (0.380392156862745, 0.95625, 0.95625), (\n 0.384313725490196, 0.976171875, 0.976171875), (0.388235294117647, \n 0.99609375, 0.99609375), (0.392156862745098, 0.99609375, 0.99609375\n ), (0.396078431372549, 0.99609375, 0.99609375), (0.4, 0.99609375, \n 0.99609375), (0.403921568627451, 0.99609375, 0.99609375), (\n 0.407843137254902, 0.99609375, 0.99609375), (0.411764705882353, \n 0.99609375, 0.99609375), (0.415686274509804, 0.99609375, 0.99609375\n ), (0.419607843137255, 0.99609375, 0.99609375), (0.423529411764706,\n 0.99609375, 0.99609375), (0.427450980392157, 0.99609375, 0.99609375\n ), (0.431372549019608, 0.99609375, 0.99609375), (0.435294117647059,\n 0.99609375, 0.99609375), (0.43921568627451, 0.99609375, 0.99609375),\n (0.443137254901961, 0.99609375, 0.99609375), (0.447058823529412, \n 0.99609375, 0.99609375), (0.450980392156863, 0.99609375, 0.99609375\n ), (0.454901960784314, 0.99609375, 0.99609375), (0.458823529411765,\n 0.99609375, 0.99609375), (0.462745098039216, 0.99609375, 0.99609375\n ), (0.466666666666667, 0.99609375, 0.99609375), (0.470588235294118,\n 0.99609375, 0.99609375), (0.474509803921569, 0.99609375, 0.99609375\n ), (0.47843137254902, 0.99609375, 0.99609375), (0.482352941176471, \n 0.99609375, 0.99609375), (0.486274509803922, 0.99609375, 0.99609375\n ), (0.490196078431373, 0.99609375, 0.99609375), (0.494117647058824,\n 0.99609375, 0.99609375), (0.498039215686275, 0.99609375, 0.99609375\n ), (0.501960784313725, 0.99609375, 0.99609375), (0.505882352941176,\n 0.99609375, 0.99609375), (0.509803921568627, 0.99609375, 0.99609375\n ), (0.513725490196078, 0.99609375, 0.99609375), (0.517647058823529,\n 0.99609375, 0.99609375), (0.52156862745098, 0.99609375, 0.99609375),\n (0.525490196078431, 0.99609375, 0.99609375), (0.529411764705882, \n 0.99609375, 0.99609375), (0.533333333333333, 0.99609375, 0.99609375\n ), (0.537254901960784, 0.99609375, 0.99609375), (0.541176470588235,\n 0.99609375, 0.99609375), (0.545098039215686, 0.99609375, 0.99609375\n ), (0.549019607843137, 0.99609375, 0.99609375), (0.552941176470588,\n 0.99609375, 0.99609375), (0.556862745098039, 0.99609375, 0.99609375\n ), (0.56078431372549, 0.99609375, 0.99609375), (0.564705882352941, \n 0.99609375, 0.99609375), (0.568627450980392, 0.99609375, 0.99609375\n ), (0.572549019607843, 0.99609375, 0.99609375), (0.576470588235294,\n 0.99609375, 0.99609375), (0.580392156862745, 0.99609375, 0.99609375\n ), (0.584313725490196, 0.99609375, 0.99609375), (0.588235294117647,\n 0.98046875, 0.98046875), (0.592156862745098, 0.96484375, 0.96484375\n ), (0.596078431372549, 0.94921875, 0.94921875), (0.6, 0.93359375, \n 0.93359375), (0.603921568627451, 0.91796875, 0.91796875), (\n 0.607843137254902, 0.90234375, 0.90234375), (0.611764705882353, \n 0.88671875, 0.88671875), (0.615686274509804, 0.87109375, 0.87109375\n ), (0.619607843137255, 0.85546875, 0.85546875), (0.623529411764706,\n 0.83984375, 0.83984375), (0.627450980392157, 0.82421875, 0.82421875\n ), (0.631372549019608, 0.80859375, 0.80859375), (0.635294117647059,\n 0.79296875, 0.79296875), (0.63921568627451, 0.77734375, 0.77734375),\n (0.643137254901961, 0.76171875, 0.76171875), (0.647058823529412, \n 0.74609375, 0.74609375), (0.650980392156863, 0.73046875, 0.73046875\n ), (0.654901960784314, 0.71484375, 0.71484375), (0.658823529411765,\n 0.69921875, 0.69921875), (0.662745098039216, 0.68359375, 0.68359375\n ), (0.666666666666667, 0.66796875, 0.66796875), (0.670588235294118,\n 0.65234375, 0.65234375), (0.674509803921569, 0.63671875, 0.63671875\n ), (0.67843137254902, 0.62109375, 0.62109375), (0.682352941176471, \n 0.60546875, 0.60546875), (0.686274509803922, 0.58984375, 0.58984375\n ), (0.690196078431373, 0.57421875, 0.57421875), (0.694117647058824,\n 0.55859375, 0.55859375), (0.698039215686274, 0.54296875, 0.54296875\n ), (0.701960784313725, 0.52734375, 0.52734375), (0.705882352941177,\n 0.51171875, 0.51171875), (0.709803921568627, 0.49609375, 0.49609375\n ), (0.713725490196078, 0.48046875, 0.48046875), (0.717647058823529,\n 0.46484375, 0.46484375), (0.72156862745098, 0.44921875, 0.44921875),\n (0.725490196078431, 0.43359375, 0.43359375), (0.729411764705882, \n 0.41796875, 0.41796875), (0.733333333333333, 0.40234375, 0.40234375\n ), (0.737254901960784, 0.38671875, 0.38671875), (0.741176470588235,\n 0.37109375, 0.37109375), (0.745098039215686, 0.35546875, 0.35546875\n ), (0.749019607843137, 0.33984375, 0.33984375), (0.752941176470588,\n 0.32421875, 0.32421875), (0.756862745098039, 0.30859375, 0.30859375\n ), (0.76078431372549, 0.29296875, 0.29296875), (0.764705882352941, \n 0.27734375, 0.27734375), (0.768627450980392, 0.26171875, 0.26171875\n ), (0.772549019607843, 0.24609375, 0.24609375), (0.776470588235294,\n 0.23046875, 0.23046875), (0.780392156862745, 0.21484375, 0.21484375\n ), (0.784313725490196, 0.22663359375, 0.22663359375), (\n 0.788235294117647, 0.2384234375, 0.2384234375), (0.792156862745098,\n 0.250212890625, 0.250212890625), (0.796078431372549, 0.262002734375,\n 0.262002734375), (0.8, 0.273792578125, 0.273792578125), (\n 0.803921568627451, 0.285582421875, 0.285582421875), (\n 0.807843137254902, 0.297372265625, 0.297372265625), (\n 0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125)), 'green': ((0.0, \n 0.02984375, 0.02984375), (0.00392156862745098, 0.02984375, \n 0.02984375), (0.00784313725490196, 0.044765625, 0.044765625), (\n 0.0117647058823529, 0.0596875, 0.0596875), (0.0156862745098039, \n 0.074609375, 0.074609375), (0.0196078431372549, 0.08953125, \n 0.08953125), (0.0235294117647059, 0.104453125, 0.104453125), (\n 0.0274509803921569, 0.119375, 0.119375), (0.0313725490196078, \n 0.134296875, 0.134296875), (0.0352941176470588, 0.14921875, \n 0.14921875), (0.0392156862745098, 0.164140625, 0.164140625), (\n 0.0431372549019608, 0.1790625, 0.1790625), (0.0470588235294118, \n 0.193984375, 0.193984375), (0.0509803921568627, 0.20890625, \n 0.20890625), (0.0549019607843137, 0.223828125, 0.223828125), (\n 0.0588235294117647, 0.23875, 0.23875), (0.0627450980392157, \n 0.253671875, 0.253671875), (0.0666666666666667, 0.26859375, \n 0.26859375), (0.0705882352941176, 0.283515625, 0.283515625), (\n 0.0745098039215686, 0.2984375, 0.2984375), (0.0784313725490196, \n 0.313359375, 0.313359375), (0.0823529411764706, 0.32828125, \n 0.32828125), (0.0862745098039216, 0.343203125, 0.343203125), (\n 0.0901960784313725, 0.358125, 0.358125), (0.0941176470588235, \n 0.373046875, 0.373046875), (0.0980392156862745, 0.38796875, \n 0.38796875), (0.101960784313725, 0.402890625, 0.402890625), (\n 0.105882352941176, 0.4178125, 0.4178125), (0.109803921568627, \n 0.432734375, 0.432734375), (0.113725490196078, 0.44765625, \n 0.44765625), (0.117647058823529, 0.462578125, 0.462578125), (\n 0.12156862745098, 0.4775, 0.4775), (0.125490196078431, 0.492421875,\n 0.492421875), (0.129411764705882, 0.50734375, 0.50734375), (\n 0.133333333333333, 0.522265625, 0.522265625), (0.137254901960784, \n 0.5371875, 0.5371875), (0.141176470588235, 0.552109375, 0.552109375\n ), (0.145098039215686, 0.56703125, 0.56703125), (0.149019607843137,\n 0.581953125, 0.581953125), (0.152941176470588, 0.596875, 0.596875),\n (0.156862745098039, 0.611796875, 0.611796875), (0.16078431372549, \n 0.62671875, 0.62671875), (0.164705882352941, 0.641640625, \n 0.641640625), (0.168627450980392, 0.6565625, 0.6565625), (\n 0.172549019607843, 0.671484375, 0.671484375), (0.176470588235294, \n 0.68640625, 0.68640625), (0.180392156862745, 0.701328125, \n 0.701328125), (0.184313725490196, 0.71625, 0.71625), (\n 0.188235294117647, 0.731171875, 0.731171875), (0.192156862745098, \n 0.74609375, 0.74609375), (0.196078431372549, 0.731171875, \n 0.731171875), (0.2, 0.71625, 0.71625), (0.203921568627451, \n 0.701328125, 0.701328125), (0.207843137254902, 0.68640625, \n 0.68640625), (0.211764705882353, 0.671484375, 0.671484375), (\n 0.215686274509804, 0.6565625, 0.6565625), (0.219607843137255, \n 0.641640625, 0.641640625), (0.223529411764706, 0.62671875, \n 0.62671875), (0.227450980392157, 0.611796875, 0.611796875), (\n 0.231372549019608, 0.596875, 0.596875), (0.235294117647059, \n 0.581953125, 0.581953125), (0.23921568627451, 0.56703125, \n 0.56703125), (0.243137254901961, 0.552109375, 0.552109375), (\n 0.247058823529412, 0.5371875, 0.5371875), (0.250980392156863, \n 0.522265625, 0.522265625), (0.254901960784314, 0.50734375, \n 0.50734375), (0.258823529411765, 0.492421875, 0.492421875), (\n 0.262745098039216, 0.4775, 0.4775), (0.266666666666667, 0.462578125,\n 0.462578125), (0.270588235294118, 0.44765625, 0.44765625), (\n 0.274509803921569, 0.432734375, 0.432734375), (0.27843137254902, \n 0.4178125, 0.4178125), (0.282352941176471, 0.402890625, 0.402890625\n ), (0.286274509803922, 0.38796875, 0.38796875), (0.290196078431373,\n 0.373046875, 0.373046875), (0.294117647058824, 0.358125, 0.358125),\n (0.298039215686275, 0.343203125, 0.343203125), (0.301960784313725, \n 0.32828125, 0.32828125), (0.305882352941176, 0.313359375, \n 0.313359375), (0.309803921568627, 0.2984375, 0.2984375), (\n 0.313725490196078, 0.283515625, 0.283515625), (0.317647058823529, \n 0.26859375, 0.26859375), (0.32156862745098, 0.253671875, \n 0.253671875), (0.325490196078431, 0.23875, 0.23875), (\n 0.329411764705882, 0.223828125, 0.223828125), (0.333333333333333, \n 0.20890625, 0.20890625), (0.337254901960784, 0.193984375, \n 0.193984375), (0.341176470588235, 0.1790625, 0.1790625), (\n 0.345098039215686, 0.164140625, 0.164140625), (0.349019607843137, \n 0.14921875, 0.14921875), (0.352941176470588, 0.134296875, \n 0.134296875), (0.356862745098039, 0.119375, 0.119375), (\n 0.36078431372549, 0.104453125, 0.104453125), (0.364705882352941, \n 0.08953125, 0.08953125), (0.368627450980392, 0.074609375, \n 0.074609375), (0.372549019607843, 0.0596875, 0.0596875), (\n 0.376470588235294, 0.044765625, 0.044765625), (0.380392156862745, \n 0.0298437890625, 0.0298437890625), (0.384313725490196, 0.014921875,\n 0.014921875), (0.388235294117647, 0, 0), (0.392156862745098, \n 0.012890625, 0.012890625), (0.396078431372549, 0.02578125, \n 0.02578125), (0.4, 0.038671875, 0.038671875), (0.403921568627451, \n 0.0515625, 0.0515625), (0.407843137254902, 0.064453125, 0.064453125\n ), (0.411764705882353, 0.07734375, 0.07734375), (0.415686274509804,\n 0.090234375, 0.090234375), (0.419607843137255, 0.103125, 0.103125),\n (0.423529411764706, 0.116015625, 0.116015625), (0.427450980392157, \n 0.12890625, 0.12890625), (0.431372549019608, 0.141796875, \n 0.141796875), (0.435294117647059, 0.1546875, 0.1546875), (\n 0.43921568627451, 0.167578125, 0.167578125), (0.443137254901961, \n 0.18046875, 0.18046875), (0.447058823529412, 0.193359375, \n 0.193359375), (0.450980392156863, 0.20625, 0.20625), (\n 0.454901960784314, 0.219140625, 0.219140625), (0.458823529411765, \n 0.23203125, 0.23203125), (0.462745098039216, 0.244921875, \n 0.244921875), (0.466666666666667, 0.2578125, 0.2578125), (\n 0.470588235294118, 0.270703125, 0.270703125), (0.474509803921569, \n 0.28359375, 0.28359375), (0.47843137254902, 0.296484375, \n 0.296484375), (0.482352941176471, 0.309375, 0.309375), (\n 0.486274509803922, 0.322265625, 0.322265625), (0.490196078431373, \n 0.33515625, 0.33515625), (0.494117647058824, 0.348046875, \n 0.348046875), (0.498039215686275, 0.3609375, 0.3609375), (\n 0.501960784313725, 0.373828125, 0.373828125), (0.505882352941176, \n 0.38671875, 0.38671875), (0.509803921568627, 0.399609375, \n 0.399609375), (0.513725490196078, 0.4125, 0.4125), (\n 0.517647058823529, 0.425390625, 0.425390625), (0.52156862745098, \n 0.43828125, 0.43828125), (0.525490196078431, 0.451171875, \n 0.451171875), (0.529411764705882, 0.4640625, 0.4640625), (\n 0.533333333333333, 0.476953125, 0.476953125), (0.537254901960784, \n 0.48984375, 0.48984375), (0.541176470588235, 0.502734375, \n 0.502734375), (0.545098039215686, 0.515625, 0.515625), (\n 0.549019607843137, 0.528515625, 0.528515625), (0.552941176470588, \n 0.54140625, 0.54140625), (0.556862745098039, 0.554296875, \n 0.554296875), (0.56078431372549, 0.5671875, 0.5671875), (\n 0.564705882352941, 0.580078125, 0.580078125), (0.568627450980392, \n 0.59296875, 0.59296875), (0.572549019607843, 0.605859375, \n 0.605859375), (0.576470588235294, 0.61875, 0.61875), (\n 0.580392156862745, 0.631640625, 0.631640625), (0.584313725490196, \n 0.64453125, 0.64453125), (0.588235294117647, 0.6359375, 0.6359375),\n (0.592156862745098, 0.62734375, 0.62734375), (0.596078431372549, \n 0.61875, 0.61875), (0.6, 0.61015625, 0.61015625), (\n 0.603921568627451, 0.6015625, 0.6015625), (0.607843137254902, \n 0.59296875, 0.59296875), (0.611764705882353, 0.584375, 0.584375), (\n 0.615686274509804, 0.57578125, 0.57578125), (0.619607843137255, \n 0.5671875, 0.5671875), (0.623529411764706, 0.55859375, 0.55859375),\n (0.627450980392157, 0.55, 0.55), (0.631372549019608, 0.54140625, \n 0.54140625), (0.635294117647059, 0.5328125, 0.5328125), (\n 0.63921568627451, 0.52421875, 0.52421875), (0.643137254901961, \n 0.515625, 0.515625), (0.647058823529412, 0.50703125, 0.50703125), (\n 0.650980392156863, 0.4984375, 0.4984375), (0.654901960784314, \n 0.48984375, 0.48984375), (0.658823529411765, 0.48125, 0.48125), (\n 0.662745098039216, 0.47265625, 0.47265625), (0.666666666666667, \n 0.4640625, 0.4640625), (0.670588235294118, 0.45546875, 0.45546875),\n (0.674509803921569, 0.446875, 0.446875), (0.67843137254902, \n 0.43828125, 0.43828125), (0.682352941176471, 0.4296875, 0.4296875),\n (0.686274509803922, 0.42109375, 0.42109375), (0.690196078431373, \n 0.4125, 0.4125), (0.694117647058824, 0.40390625, 0.40390625), (\n 0.698039215686274, 0.3953125, 0.3953125), (0.701960784313725, \n 0.38671875, 0.38671875), (0.705882352941177, 0.378125, 0.378125), (\n 0.709803921568627, 0.36953125, 0.36953125), (0.713725490196078, \n 0.3609375, 0.3609375), (0.717647058823529, 0.35234375, 0.35234375),\n (0.72156862745098, 0.34375, 0.34375), (0.725490196078431, \n 0.33515625, 0.33515625), (0.729411764705882, 0.3265625, 0.3265625),\n (0.733333333333333, 0.31796875, 0.31796875), (0.737254901960784, \n 0.309375, 0.309375), (0.741176470588235, 0.30078125, 0.30078125), (\n 0.745098039215686, 0.2921875, 0.2921875), (0.749019607843137, \n 0.28359375, 0.28359375), (0.752941176470588, 0.275, 0.275), (\n 0.756862745098039, 0.26640625, 0.26640625), (0.76078431372549, \n 0.2578125, 0.2578125), (0.764705882352941, 0.24921875, 0.24921875),\n (0.768627450980392, 0.240625, 0.240625), (0.772549019607843, \n 0.23203125, 0.23203125), (0.776470588235294, 0.2234375, 0.2234375),\n (0.780392156862745, 0.21484375, 0.21484375), (0.784313725490196, \n 0.222301171875, 0.222301171875), (0.788235294117647, 0.22975859375,\n 0.22975859375), (0.792156862745098, 0.237216015625, 0.237216015625),\n (0.796078431372549, 0.2446734375, 0.2446734375), (0.8, \n 0.252130859375, 0.252130859375), (0.803921568627451, 0.259587890625,\n 0.259587890625), (0.807843137254902, 0.2670453125, 0.2670453125), (\n 0.811764705882353, 0.274502734375, 0.274502734375), (\n 0.815686274509804, 0.28196015625, 0.28196015625), (\n 0.819607843137255, 0.289417578125, 0.289417578125), (\n 0.823529411764706, 0.296875, 0.296875), (0.827450980392157, \n 0.304332421875, 0.304332421875), (0.831372549019608, 0.31178984375,\n 0.31178984375), (0.835294117647059, 0.319247265625, 0.319247265625),\n (0.83921568627451, 0.3267046875, 0.3267046875), (0.843137254901961,\n 0.334162109375, 0.334162109375), (0.847058823529412, 0.34161953125,\n 0.34161953125), (0.850980392156863, 0.3490765625, 0.3490765625), (\n 0.854901960784314, 0.356533984375, 0.356533984375), (\n 0.858823529411765, 0.36399140625, 0.36399140625), (\n 0.862745098039216, 0.371448828125, 0.371448828125), (\n 0.866666666666667, 0.37890625, 0.37890625), (0.870588235294118, \n 0.386363671875, 0.386363671875), (0.874509803921569, 0.3938203125, \n 0.3938203125), (0.87843137254902, 0.40127734375, 0.40127734375), (\n 0.882352941176471, 0.408734375, 0.408734375), (0.886274509803922, \n 0.41619140625, 0.41619140625), (0.890196078431373, 0.42365234375, \n 0.42365234375), (0.894117647058824, 0.431109375, 0.431109375), (\n 0.898039215686275, 0.43856640625, 0.43856640625), (\n 0.901960784313726, 0.4460234375, 0.4460234375), (0.905882352941176,\n 0.45348046875, 0.45348046875), (0.909803921568627, 0.4609375, \n 0.4609375), (0.913725490196078, 0.46839453125, 0.46839453125), (\n 0.917647058823529, 0.4758515625, 0.4758515625), (0.92156862745098, \n 0.48330859375, 0.48330859375), (0.925490196078431, 0.490765625, \n 0.490765625), (0.929411764705882, 0.49822265625, 0.49822265625), (\n 0.933333333333333, 0.50568359375, 0.50568359375), (\n 0.937254901960784, 0.513140625, 0.513140625), (0.941176470588235, \n 0.52059765625, 0.52059765625), (0.945098039215686, 0.5280546875, \n 0.5280546875), (0.949019607843137, 0.53551171875, 0.53551171875), (\n 0.952941176470588, 0.54296875, 0.54296875), (0.956862745098039, \n 0.55042578125, 0.55042578125), (0.96078431372549, 0.5578828125, \n 0.5578828125), (0.964705882352941, 0.56533984375, 0.56533984375), (\n 0.968627450980392, 0.572796875, 0.572796875), (0.972549019607843, \n 0.58025390625, 0.58025390625), (0.976470588235294, 0.58771484375, \n 0.58771484375), (0.980392156862745, 0.595171875, 0.595171875), (\n 0.984313725490196, 0.60262890625, 0.60262890625), (\n 0.988235294117647, 0.6100859375, 0.6100859375), (0.992156862745098,\n 0.61754296875, 0.61754296875), (0.996078431372549, 0.625, 0.625), (\n 1.0, 0.625, 0.625)), 'blue': ((0.0, 0.51984375, 0.51984375), (\n 0.00392156862745098, 0.51984375, 0.51984375), (0.00784313725490196,\n 0.529765625, 0.529765625), (0.0117647058823529, 0.5396875, \n 0.5396875), (0.0156862745098039, 0.549609375, 0.549609375), (\n 0.0196078431372549, 0.55953125, 0.55953125), (0.0235294117647059, \n 0.569453125, 0.569453125), (0.0274509803921569, 0.579375, 0.579375),\n (0.0313725490196078, 0.589296875, 0.589296875), (0.0352941176470588,\n 0.59921875, 0.59921875), (0.0392156862745098, 0.609140625, \n 0.609140625), (0.0431372549019608, 0.6190625, 0.6190625), (\n 0.0470588235294118, 0.628984375, 0.628984375), (0.0509803921568627,\n 0.63890625, 0.63890625), (0.0549019607843137, 0.648828125, \n 0.648828125), (0.0588235294117647, 0.65875, 0.65875), (\n 0.0627450980392157, 0.668671875, 0.668671875), (0.0666666666666667,\n 0.67859375, 0.67859375), (0.0705882352941176, 0.688515625, \n 0.688515625), (0.0745098039215686, 0.6984375, 0.6984375), (\n 0.0784313725490196, 0.708359375, 0.708359375), (0.0823529411764706,\n 0.71828125, 0.71828125), (0.0862745098039216, 0.728203125, \n 0.728203125), (0.0901960784313725, 0.738125, 0.738125), (\n 0.0941176470588235, 0.748046875, 0.748046875), (0.0980392156862745,\n 0.75796875, 0.75796875), (0.101960784313725, 0.767890625, \n 0.767890625), (0.105882352941176, 0.7778125, 0.7778125), (\n 0.109803921568627, 0.787734375, 0.787734375), (0.113725490196078, \n 0.79765625, 0.79765625), (0.117647058823529, 0.807578125, \n 0.807578125), (0.12156862745098, 0.8175, 0.8175), (\n 0.125490196078431, 0.827421875, 0.827421875), (0.129411764705882, \n 0.83734375, 0.83734375), (0.133333333333333, 0.847265625, \n 0.847265625), (0.137254901960784, 0.8571875, 0.8571875), (\n 0.141176470588235, 0.867109375, 0.867109375), (0.145098039215686, \n 0.87703125, 0.87703125), (0.149019607843137, 0.886953125, \n 0.886953125), (0.152941176470588, 0.896875, 0.896875), (\n 0.156862745098039, 0.906796875, 0.906796875), (0.16078431372549, \n 0.91671875, 0.91671875), (0.164705882352941, 0.926640625, \n 0.926640625), (0.168627450980392, 0.9365625, 0.9365625), (\n 0.172549019607843, 0.946484375, 0.946484375), (0.176470588235294, \n 0.95640625, 0.95640625), (0.180392156862745, 0.966328125, \n 0.966328125), (0.184313725490196, 0.97625, 0.97625), (\n 0.188235294117647, 0.986171875, 0.986171875), (0.192156862745098, \n 0.99609375, 0.99609375), (0.196078431372549, 0.976171875, \n 0.976171875), (0.2, 0.95625, 0.95625), (0.203921568627451, \n 0.936328125, 0.936328125), (0.207843137254902, 0.91640625, \n 0.91640625), (0.211764705882353, 0.896484375, 0.896484375), (\n 0.215686274509804, 0.8765625, 0.8765625), (0.219607843137255, \n 0.856640625, 0.856640625), (0.223529411764706, 0.83671875, \n 0.83671875), (0.227450980392157, 0.816796875, 0.816796875), (\n 0.231372549019608, 0.796875, 0.796875), (0.235294117647059, \n 0.776953125, 0.776953125), (0.23921568627451, 0.75703125, \n 0.75703125), (0.243137254901961, 0.737109375, 0.737109375), (\n 0.247058823529412, 0.7171875, 0.7171875), (0.250980392156863, \n 0.697265625, 0.697265625), (0.254901960784314, 0.67734375, \n 0.67734375), (0.258823529411765, 0.657421875, 0.657421875), (\n 0.262745098039216, 0.6375, 0.6375), (0.266666666666667, 0.617578125,\n 0.617578125), (0.270588235294118, 0.59765625, 0.59765625), (\n 0.274509803921569, 0.577734375, 0.577734375), (0.27843137254902, \n 0.5578125, 0.5578125), (0.282352941176471, 0.537890625, 0.537890625\n ), (0.286274509803922, 0.51796875, 0.51796875), (0.290196078431373,\n 0.498046875, 0.498046875), (0.294117647058824, 0.478125, 0.478125),\n (0.298039215686275, 0.458203125, 0.458203125), (0.301960784313725, \n 0.43828125, 0.43828125), (0.305882352941176, 0.418359375, \n 0.418359375), (0.309803921568627, 0.3984375, 0.3984375), (\n 0.313725490196078, 0.378515625, 0.378515625), (0.317647058823529, \n 0.35859375, 0.35859375), (0.32156862745098, 0.338671875, \n 0.338671875), (0.325490196078431, 0.31875, 0.31875), (\n 0.329411764705882, 0.298828125, 0.298828125), (0.333333333333333, \n 0.27890625, 0.27890625), (0.337254901960784, 0.258984375, \n 0.258984375), (0.341176470588235, 0.2390625, 0.2390625), (\n 0.345098039215686, 0.219140625, 0.219140625), (0.349019607843137, \n 0.19921875, 0.19921875), (0.352941176470588, 0.179296875, \n 0.179296875), (0.356862745098039, 0.159375, 0.159375), (\n 0.36078431372549, 0.139453125, 0.139453125), (0.364705882352941, \n 0.11953125, 0.11953125), (0.368627450980392, 0.099609375, \n 0.099609375), (0.372549019607843, 0.0796875, 0.0796875), (\n 0.376470588235294, 0.059765625, 0.059765625), (0.380392156862745, \n 0.03984375, 0.03984375), (0.384313725490196, 0.019921875, \n 0.019921875), (0.388235294117647, 0, 0), (0.392156862745098, 0, 0),\n (0.396078431372549, 0, 0), (0.4, 0, 0), (0.403921568627451, 0, 0),\n (0.407843137254902, 0, 0), (0.411764705882353, 0, 0), (\n 0.415686274509804, 0, 0), (0.419607843137255, 0, 0), (\n 0.423529411764706, 0, 0), (0.427450980392157, 0, 0), (\n 0.431372549019608, 0, 0), (0.435294117647059, 0, 0), (\n 0.43921568627451, 0, 0), (0.443137254901961, 0, 0), (\n 0.447058823529412, 0, 0), (0.450980392156863, 0, 0), (\n 0.454901960784314, 0, 0), (0.458823529411765, 0, 0), (\n 0.462745098039216, 0, 0), (0.466666666666667, 0, 0), (\n 0.470588235294118, 0, 0), (0.474509803921569, 0, 0), (\n 0.47843137254902, 0, 0), (0.482352941176471, 0, 0), (\n 0.486274509803922, 0, 0), (0.490196078431373, 0, 0), (\n 0.494117647058824, 0, 0), (0.498039215686275, 0, 0), (\n 0.501960784313725, 0, 0), (0.505882352941176, 0, 0), (\n 0.509803921568627, 0, 0), (0.513725490196078, 0, 0), (\n 0.517647058823529, 0, 0), (0.52156862745098, 0, 0), (\n 0.525490196078431, 0, 0), (0.529411764705882, 0, 0), (\n 0.533333333333333, 0, 0), (0.537254901960784, 0, 0), (\n 0.541176470588235, 0, 0), (0.545098039215686, 0, 0), (\n 0.549019607843137, 0, 0), (0.552941176470588, 0, 0), (\n 0.556862745098039, 0, 0), (0.56078431372549, 0, 0), (\n 0.564705882352941, 0, 0), (0.568627450980392, 0, 0), (\n 0.572549019607843, 0, 0), (0.576470588235294, 0, 0), (\n 0.580392156862745, 0, 0), (0.584313725490196, 0, 0), (\n 0.588235294117647, 0.004296875, 0.004296875), (0.592156862745098, \n 0.00859375, 0.00859375), (0.596078431372549, 0.012890625, \n 0.012890625), (0.6, 0.0171875, 0.0171875), (0.603921568627451, \n 0.021484375, 0.021484375), (0.607843137254902, 0.02578125, \n 0.02578125), (0.611764705882353, 0.030078125, 0.030078125), (\n 0.615686274509804, 0.034375, 0.034375), (0.619607843137255, \n 0.038671875, 0.038671875), (0.623529411764706, 0.04296875, \n 0.04296875), (0.627450980392157, 0.047265625, 0.047265625), (\n 0.631372549019608, 0.0515625, 0.0515625), (0.635294117647059, \n 0.055859375, 0.055859375), (0.63921568627451, 0.06015625, \n 0.06015625), (0.643137254901961, 0.064453125, 0.064453125), (\n 0.647058823529412, 0.06875, 0.06875), (0.650980392156863, \n 0.073046875, 0.073046875), (0.654901960784314, 0.07734375, \n 0.07734375), (0.658823529411765, 0.081640625, 0.081640625), (\n 0.662745098039216, 0.0859375, 0.0859375), (0.666666666666667, \n 0.090234375, 0.090234375), (0.670588235294118, 0.09453125, \n 0.09453125), (0.674509803921569, 0.098828125, 0.098828125), (\n 0.67843137254902, 0.103125, 0.103125), (0.682352941176471, \n 0.107421875, 0.107421875), (0.686274509803922, 0.11171875, \n 0.11171875), (0.690196078431373, 0.116015625, 0.116015625), (\n 0.694117647058824, 0.1203125, 0.1203125), (0.698039215686274, \n 0.124609375, 0.124609375), (0.701960784313725, 0.12890625, \n 0.12890625), (0.705882352941177, 0.133203125, 0.133203125), (\n 0.709803921568627, 0.1375, 0.1375), (0.713725490196078, 0.141796875,\n 0.141796875), (0.717647058823529, 0.14609375, 0.14609375), (\n 0.72156862745098, 0.150390625, 0.150390625), (0.725490196078431, \n 0.1546875, 0.1546875), (0.729411764705882, 0.158984375, 0.158984375\n ), (0.733333333333333, 0.16328125, 0.16328125), (0.737254901960784,\n 0.167578125, 0.167578125), (0.741176470588235, 0.171875, 0.171875),\n (0.745098039215686, 0.176171875, 0.176171875), (0.749019607843137, \n 0.18046875, 0.18046875), (0.752941176470588, 0.184765625, \n 0.184765625), (0.756862745098039, 0.1890625, 0.1890625), (\n 0.76078431372549, 0.193359375, 0.193359375), (0.764705882352941, \n 0.19765625, 0.19765625), (0.768627450980392, 0.201953125, \n 0.201953125), (0.772549019607843, 0.20625, 0.20625), (\n 0.776470588235294, 0.210546875, 0.210546875), (0.780392156862745, \n 0.21484375, 0.21484375), (0.784313725490196, 0.22663359375, \n 0.22663359375), (0.788235294117647, 0.2384234375, 0.2384234375), (\n 0.792156862745098, 0.250212890625, 0.250212890625), (\n 0.796078431372549, 0.262002734375, 0.262002734375), (0.8, \n 0.273792578125, 0.273792578125), (0.803921568627451, 0.285582421875,\n 0.285582421875), (0.807843137254902, 0.297372265625, 0.297372265625\n ), (0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125))}\n vcdict = {'red': ((0, 1, 1), (0.00392156862745098, 0.54508984375, \n 0.54508984375), (0.00784313725490196, 0.5285703125, 0.5285703125),\n (0.0117647058823529, 0.5120546875, 0.5120546875), (\n 0.0156862745098039, 0.49553515625, 0.49553515625), (\n 0.0196078431372549, 0.47901953125, 0.47901953125), (\n 0.0235294117647059, 0.4625, 0.4625), (0.0274509803921569, \n 0.44598046875, 0.44598046875), (0.0313725490196078, 0.42946484375, \n 0.42946484375), (0.0352941176470588, 0.4129453125, 0.4129453125), (\n 0.0392156862745098, 0.3964296875, 0.3964296875), (\n 0.0431372549019608, 0.379910546875, 0.379910546875), (\n 0.0470588235294118, 0.36339296875, 0.36339296875), (\n 0.0509803921568627, 0.346875, 0.346875), (0.0549019607843137, \n 0.33035703125, 0.33035703125), (0.0588235294117647, 0.313839453125,\n 0.313839453125), (0.0627450980392157, 0.297321484375, \n 0.297321484375), (0.0666666666666667, 0.280803515625, \n 0.280803515625), (0.0705882352941176, 0.2642859375, 0.2642859375),\n (0.0745098039215686, 0.24776796875, 0.24776796875), (\n 0.0784313725490196, 0.23125, 0.23125), (0.0823529411764706, \n 0.21473203125, 0.21473203125), (0.0862745098039216, 0.198214453125,\n 0.198214453125), (0.0901960784313725, 0.181696484375, \n 0.181696484375), (0.0941176470588235, 0.165178515625, \n 0.165178515625), (0.0980392156862745, 0.148660546875, \n 0.148660546875), (0.101960784313725, 0.13214296875, 0.13214296875),\n (0.105882352941176, 0.115625, 0.115625), (0.109803921568627, \n 0.09910703125, 0.09910703125), (0.113725490196078, 0.082589453125, \n 0.082589453125), (0.117647058823529, 0.066071484375, 0.066071484375\n ), (0.12156862745098, 0.049553515625, 0.049553515625), (\n 0.125490196078431, 0.0330357421875, 0.0330357421875), (\n 0.129411764705882, 0.016517890625, 0.016517890625), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0, 0), (0.2, 0, 0), (0.203921568627451, 0, 0), (\n 0.207843137254902, 0, 0), (0.211764705882353, 0, 0), (\n 0.215686274509804, 0, 0), (0.219607843137255, 0, 0), (\n 0.223529411764706, 0, 0), (0.227450980392157, 0, 0), (\n 0.231372549019608, 0, 0), (0.235294117647059, 0, 0), (\n 0.23921568627451, 0, 0), (0.243137254901961, 0, 0), (\n 0.247058823529412, 0, 0), (0.250980392156863, 0, 0), (\n 0.254901960784314, 0, 0), (0.258823529411765, 0, 0), (\n 0.262745098039216, 0, 0), (0.266666666666667, 0, 0), (\n 0.270588235294118, 0, 0), (0.274509803921569, 0, 0), (\n 0.27843137254902, 0, 0), (0.282352941176471, 0, 0), (\n 0.286274509803922, 0, 0), (0.290196078431373, 0, 0), (\n 0.294117647058824, 0, 0), (0.298039215686275, 0, 0), (\n 0.301960784313725, 0, 0), (0.305882352941176, 0, 0), (\n 0.309803921568627, 0, 0), (0.313725490196078, 0, 0), (\n 0.317647058823529, 0, 0), (0.32156862745098, 0, 0), (\n 0.325490196078431, 0, 0), (0.329411764705882, 0, 0), (\n 0.333333333333333, 0, 0), (0.337254901960784, 0, 0), (\n 0.341176470588235, 0, 0), (0.345098039215686, 0, 0), (\n 0.349019607843137, 0, 0), (0.352941176470588, 0.0061383984375, \n 0.0061383984375), (0.356862745098039, 0.012276796875, \n 0.012276796875), (0.36078431372549, 0.0184151953125, \n 0.0184151953125), (0.364705882352941, 0.0245535546875, \n 0.0245535546875), (0.368627450980392, 0.030691953125, \n 0.030691953125), (0.372549019607843, 0.0368303515625, \n 0.0368303515625), (0.376470588235294, 0.04296875, 0.04296875), (\n 0.380392156862745, 0.04910703125, 0.04910703125), (\n 0.384313725490196, 0.055245703125, 0.055245703125), (\n 0.388235294117647, 0.061383984375, 0.061383984375), (\n 0.392156862745098, 0.067522265625, 0.067522265625), (\n 0.396078431372549, 0.073660546875, 0.073660546875), (0.4, \n 0.07979921875, 0.07979921875), (0.403921568627451, 0.0859375, \n 0.0859375), (0.407843137254902, 0.09207578125, 0.09207578125), (\n 0.411764705882353, 0.098214453125, 0.098214453125), (\n 0.415686274509804, 0.104352734375, 0.104352734375), (\n 0.419607843137255, 0.110491015625, 0.110491015625), (\n 0.423529411764706, 0.116629296875, 0.116629296875), (\n 0.427450980392157, 0.12276796875, 0.12276796875), (\n 0.431372549019608, 0.12890625, 0.12890625), (0.435294117647059, \n 0.13504453125, 0.13504453125), (0.43921568627451, 0.141183203125, \n 0.141183203125), (0.443137254901961, 0.147321484375, 0.147321484375\n ), (0.447058823529412, 0.153459765625, 0.153459765625), (\n 0.450980392156863, 0.159598046875, 0.159598046875), (\n 0.454901960784314, 0.16573671875, 0.16573671875), (\n 0.458823529411765, 0.171875, 0.171875), (0.462745098039216, \n 0.17801328125, 0.17801328125), (0.466666666666667, 0.184151953125, \n 0.184151953125), (0.470588235294118, 0.190290234375, 0.190290234375\n ), (0.474509803921569, 0.196428515625, 0.196428515625), (\n 0.47843137254902, 0.202566796875, 0.202566796875), (\n 0.482352941176471, 0.20870546875, 0.20870546875), (\n 0.486274509803922, 0.21484375, 0.21484375), (0.490196078431373, \n 0.233370703125, 0.233370703125), (0.494117647058824, 0.251897265625,\n 0.251897265625), (0.498039215686275, 0.27042421875, 0.27042421875),\n (0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.86549609375, 0.86549609375), (0.631372549019608, 0.86770703125, \n 0.86770703125), (0.635294117647059, 0.869921875, 0.869921875), (\n 0.63921568627451, 0.87213671875, 0.87213671875), (0.643137254901961,\n 0.87434765625, 0.87434765625), (0.647058823529412, 0.8765625, \n 0.8765625), (0.650980392156863, 0.87877734375, 0.87877734375), (\n 0.654901960784314, 0.88098828125, 0.88098828125), (\n 0.658823529411765, 0.883203125, 0.883203125), (0.662745098039216, \n 0.88541796875, 0.88541796875), (0.666666666666667, 0.88762890625, \n 0.88762890625), (0.670588235294118, 0.88984375, 0.88984375), (\n 0.674509803921569, 0.89205859375, 0.89205859375), (0.67843137254902,\n 0.89426953125, 0.89426953125), (0.682352941176471, 0.896484375, \n 0.896484375), (0.686274509803922, 0.89869921875, 0.89869921875), (\n 0.690196078431373, 0.90091015625, 0.90091015625), (\n 0.694117647058824, 0.903125, 0.903125), (0.698039215686274, \n 0.90533984375, 0.90533984375), (0.701960784313725, 0.90755078125, \n 0.90755078125), (0.705882352941177, 0.909765625, 0.909765625), (\n 0.709803921568627, 0.91198046875, 0.91198046875), (\n 0.713725490196078, 0.91419140625, 0.91419140625), (\n 0.717647058823529, 0.91640625, 0.91640625), (0.72156862745098, \n 0.91862109375, 0.91862109375), (0.725490196078431, 0.92083203125, \n 0.92083203125), (0.729411764705882, 0.923046875, 0.923046875), (\n 0.733333333333333, 0.92526171875, 0.92526171875), (\n 0.737254901960784, 0.92747265625, 0.92747265625), (\n 0.741176470588235, 0.9296875, 0.9296875), (0.745098039215686, \n 0.93190234375, 0.93190234375), (0.749019607843137, 0.93411328125, \n 0.93411328125), (0.752941176470588, 0.936328125, 0.936328125), (\n 0.756862745098039, 0.93854296875, 0.93854296875), (0.76078431372549,\n 0.94075390625, 0.94075390625), (0.764705882352941, 0.94296875, \n 0.94296875), (0.768627450980392, 0.94518359375, 0.94518359375), (\n 0.772549019607843, 0.94739453125, 0.94739453125), (\n 0.776470588235294, 0.949609375, 0.949609375), (0.780392156862745, \n 0.95182421875, 0.95182421875), (0.784313725490196, 0.95403515625, \n 0.95403515625), (0.788235294117647, 0.95625, 0.95625), (\n 0.792156862745098, 0.95846484375, 0.95846484375), (\n 0.796078431372549, 0.96067578125, 0.96067578125), (0.8, 0.962890625,\n 0.962890625), (0.803921568627451, 0.96510546875, 0.96510546875), (\n 0.807843137254902, 0.96731640625, 0.96731640625), (\n 0.811764705882353, 0.96953125, 0.96953125), (0.815686274509804, \n 0.97174609375, 0.97174609375), (0.819607843137255, 0.97395703125, \n 0.97395703125), (0.823529411764706, 0.976171875, 0.976171875), (\n 0.827450980392157, 0.97838671875, 0.97838671875), (\n 0.831372549019608, 0.98059765625, 0.98059765625), (\n 0.835294117647059, 0.9828125, 0.9828125), (0.83921568627451, \n 0.98502734375, 0.98502734375), (0.843137254901961, 0.98723828125, \n 0.98723828125), (0.847058823529412, 0.989453125, 0.989453125), (\n 0.850980392156863, 0.99166796875, 0.99166796875), (\n 0.854901960784314, 0.99387890625, 0.99387890625), (\n 0.858823529411765, 0.99609375, 0.99609375), (0.862745098039216, \n 0.99609375, 0.99609375), (0.866666666666667, 0.99609375, 0.99609375\n ), (0.870588235294118, 0.99609375, 0.99609375), (0.874509803921569,\n 0.99609375, 0.99609375), (0.87843137254902, 0.99609375, 0.99609375),\n (0.882352941176471, 0.99609375, 0.99609375), (0.886274509803922, \n 0.99609375, 0.99609375), (0.890196078431373, 0.99609375, 0.99609375\n ), (0.894117647058824, 0.99609375, 0.99609375), (0.898039215686275,\n 0.99609375, 0.99609375), (0.901960784313726, 0.99609375, 0.99609375\n ), (0.905882352941176, 0.99609375, 0.99609375), (0.909803921568627,\n 0.99609375, 0.99609375), (0.913725490196078, 0.99609375, 0.99609375\n ), (0.917647058823529, 0.99609375, 0.99609375), (0.92156862745098, \n 0.99609375, 0.99609375), (0.925490196078431, 0.99609375, 0.99609375\n ), (0.929411764705882, 0.99609375, 0.99609375), (0.933333333333333,\n 0.99609375, 0.99609375), (0.937254901960784, 0.99609375, 0.99609375\n ), (0.941176470588235, 0.99609375, 0.99609375), (0.945098039215686,\n 0.99609375, 0.99609375), (0.949019607843137, 0.99609375, 0.99609375\n ), (0.952941176470588, 0.99609375, 0.99609375), (0.956862745098039,\n 0.99609375, 0.99609375), (0.96078431372549, 0.99609375, 0.99609375),\n (0.964705882352941, 0.99609375, 0.99609375), (0.968627450980392, \n 0.99609375, 0.99609375), (0.972549019607843, 0.99609375, 0.99609375\n ), (0.976470588235294, 0.99609375, 0.99609375), (0.980392156862745,\n 0.99609375, 0.99609375), (0.984313725490196, 0.99609375, 0.99609375\n ), (0.988235294117647, 0.99609375, 0.99609375), (0.992156862745098,\n 0.99609375, 0.99609375), (0.996078431372549, 0.99609375, 0.99609375\n ), (1, 0.99609375, 0.99609375)), 'green': ((0, 1, 1), (\n 0.00392156862745098, 0, 0), (0.00784313725490196, 0, 0), (\n 0.0117647058823529, 0, 0), (0.0156862745098039, 0, 0), (\n 0.0196078431372549, 0, 0), (0.0235294117647059, 0, 0), (\n 0.0274509803921569, 0, 0), (0.0313725490196078, 0, 0), (\n 0.0352941176470588, 0, 0), (0.0392156862745098, 0, 0), (\n 0.0431372549019608, 0, 0), (0.0470588235294118, 0, 0), (\n 0.0509803921568627, 0, 0), (0.0549019607843137, 0, 0), (\n 0.0588235294117647, 0, 0), (0.0627450980392157, 0, 0), (\n 0.0666666666666667, 0, 0), (0.0705882352941176, 0, 0), (\n 0.0745098039215686, 0, 0), (0.0784313725490196, 0, 0), (\n 0.0823529411764706, 0, 0), (0.0862745098039216, 0, 0), (\n 0.0901960784313725, 0, 0), (0.0941176470588235, 0, 0), (\n 0.0980392156862745, 0, 0), (0.101960784313725, 0, 0), (\n 0.105882352941176, 0, 0), (0.109803921568627, 0, 0), (\n 0.113725490196078, 0, 0), (0.117647058823529, 0, 0), (\n 0.12156862745098, 0, 0), (0.125490196078431, 0, 0), (\n 0.129411764705882, 0, 0), (0.133333333333333, 0, 0), (\n 0.137254901960784, 0.0135653515625, 0.0135653515625), (\n 0.141176470588235, 0.0271306640625, 0.0271306640625), (\n 0.145098039215686, 0.04069609375, 0.04069609375), (\n 0.149019607843137, 0.054261328125, 0.054261328125), (\n 0.152941176470588, 0.0678265625, 0.0678265625), (0.156862745098039,\n 0.0813921875, 0.0813921875), (0.16078431372549, 0.094957421875, \n 0.094957421875), (0.164705882352941, 0.10852265625, 0.10852265625),\n (0.168627450980392, 0.122087890625, 0.122087890625), (\n 0.172549019607843, 0.135653515625, 0.135653515625), (\n 0.176470588235294, 0.14921875, 0.14921875), (0.180392156862745, \n 0.162783984375, 0.162783984375), (0.184313725490196, 0.176349609375,\n 0.176349609375), (0.188235294117647, 0.18991484375, 0.18991484375),\n (0.192156862745098, 0.203480078125, 0.203480078125), (\n 0.196078431372549, 0.2170453125, 0.2170453125), (0.2, 0.2306109375,\n 0.2306109375), (0.203921568627451, 0.244176171875, 0.244176171875),\n (0.207843137254902, 0.25774140625, 0.25774140625), (\n 0.211764705882353, 0.27130703125, 0.27130703125), (\n 0.215686274509804, 0.284872265625, 0.284872265625), (\n 0.219607843137255, 0.2984375, 0.2984375), (0.223529411764706, \n 0.312002734375, 0.312002734375), (0.227450980392157, 0.325568359375,\n 0.325568359375), (0.231372549019608, 0.33913359375, 0.33913359375),\n (0.235294117647059, 0.352698828125, 0.352698828125), (\n 0.23921568627451, 0.3662640625, 0.3662640625), (0.243137254901961, \n 0.3798296875, 0.3798296875), (0.247058823529412, 0.39339453125, \n 0.39339453125), (0.250980392156863, 0.4069609375, 0.4069609375), (\n 0.254901960784314, 0.42052734375, 0.42052734375), (\n 0.258823529411765, 0.43408984375, 0.43408984375), (\n 0.262745098039216, 0.44765625, 0.44765625), (0.266666666666667, \n 0.46122265625, 0.46122265625), (0.270588235294118, 0.47478515625, \n 0.47478515625), (0.274509803921569, 0.4883515625, 0.4883515625), (\n 0.27843137254902, 0.50191796875, 0.50191796875), (0.282352941176471,\n 0.515484375, 0.515484375), (0.286274509803922, 0.529046875, \n 0.529046875), (0.290196078431373, 0.54261328125, 0.54261328125), (\n 0.294117647058824, 0.5561796875, 0.5561796875), (0.298039215686275,\n 0.56974609375, 0.56974609375), (0.301960784313725, 0.58330859375, \n 0.58330859375), (0.305882352941176, 0.596875, 0.596875), (\n 0.309803921568627, 0.61044140625, 0.61044140625), (\n 0.313725490196078, 0.62400390625, 0.62400390625), (\n 0.317647058823529, 0.6375703125, 0.6375703125), (0.32156862745098, \n 0.65113671875, 0.65113671875), (0.325490196078431, 0.664703125, \n 0.664703125), (0.329411764705882, 0.678265625, 0.678265625), (\n 0.333333333333333, 0.69183203125, 0.69183203125), (\n 0.337254901960784, 0.7053984375, 0.7053984375), (0.341176470588235,\n 0.71896484375, 0.71896484375), (0.345098039215686, 0.73252734375, \n 0.73252734375), (0.349019607843137, 0.74609375, 0.74609375), (\n 0.352941176470588, 0.7309140625, 0.7309140625), (0.356862745098039,\n 0.71573828125, 0.71573828125), (0.36078431372549, 0.70055859375, \n 0.70055859375), (0.364705882352941, 0.68537890625, 0.68537890625),\n (0.368627450980392, 0.67019921875, 0.67019921875), (\n 0.372549019607843, 0.6550234375, 0.6550234375), (0.376470588235294,\n 0.63984375, 0.63984375), (0.380392156862745, 0.6246640625, \n 0.6246640625), (0.384313725490196, 0.60948828125, 0.60948828125), (\n 0.388235294117647, 0.59430859375, 0.59430859375), (\n 0.392156862745098, 0.57912890625, 0.57912890625), (\n 0.396078431372549, 0.56394921875, 0.56394921875), (0.4, \n 0.5487734375, 0.5487734375), (0.403921568627451, 0.53359375, \n 0.53359375), (0.407843137254902, 0.5184140625, 0.5184140625), (\n 0.411764705882353, 0.50323828125, 0.50323828125), (\n 0.415686274509804, 0.48805859375, 0.48805859375), (\n 0.419607843137255, 0.47287890625, 0.47287890625), (\n 0.423529411764706, 0.45769921875, 0.45769921875), (\n 0.427450980392157, 0.4425234375, 0.4425234375), (0.431372549019608,\n 0.42734375, 0.42734375), (0.435294117647059, 0.4121640625, \n 0.4121640625), (0.43921568627451, 0.39698828125, 0.39698828125), (\n 0.443137254901961, 0.381808203125, 0.381808203125), (\n 0.447058823529412, 0.366629296875, 0.366629296875), (\n 0.450980392156863, 0.35145078125, 0.35145078125), (\n 0.454901960784314, 0.336272265625, 0.336272265625), (\n 0.458823529411765, 0.32109375, 0.32109375), (0.462745098039216, \n 0.305915234375, 0.305915234375), (0.466666666666667, 0.29073671875,\n 0.29073671875), (0.470588235294118, 0.2755578125, 0.2755578125), (\n 0.474509803921569, 0.260379296875, 0.260379296875), (\n 0.47843137254902, 0.24520078125, 0.24520078125), (0.482352941176471,\n 0.230022265625, 0.230022265625), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.2265625, 0.2265625), (\n 0.494117647058824, 0.23828125, 0.23828125), (0.498039215686275, \n 0.25, 0.25), (0.501960784313725, 0.26171875, 0.26171875), (\n 0.505882352941176, 0.2734375, 0.2734375), (0.509803921568627, \n 0.28515625, 0.28515625), (0.513725490196078, 0.296875, 0.296875), (\n 0.517647058823529, 0.30859375, 0.30859375), (0.52156862745098, \n 0.3203125, 0.3203125), (0.525490196078431, 0.33203125, 0.33203125),\n (0.529411764705882, 0.34375, 0.34375), (0.533333333333333, \n 0.35546875, 0.35546875), (0.537254901960784, 0.3671875, 0.3671875),\n (0.541176470588235, 0.37890625, 0.37890625), (0.545098039215686, \n 0.390625, 0.390625), (0.549019607843137, 0.40234375, 0.40234375), (\n 0.552941176470588, 0.4140625, 0.4140625), (0.556862745098039, \n 0.42578125, 0.42578125), (0.56078431372549, 0.4375, 0.4375), (\n 0.564705882352941, 0.44921875, 0.44921875), (0.568627450980392, \n 0.4609375, 0.4609375), (0.572549019607843, 0.47265625, 0.47265625),\n (0.576470588235294, 0.484375, 0.484375), (0.580392156862745, \n 0.49609375, 0.49609375), (0.584313725490196, 0.5078125, 0.5078125),\n (0.588235294117647, 0.51953125, 0.51953125), (0.592156862745098, \n 0.53125, 0.53125), (0.596078431372549, 0.54296875, 0.54296875), (\n 0.6, 0.5546875, 0.5546875), (0.603921568627451, 0.56640625, \n 0.56640625), (0.607843137254902, 0.578125, 0.578125), (\n 0.611764705882353, 0.58984375, 0.58984375), (0.615686274509804, \n 0.6015625, 0.6015625), (0.619607843137255, 0.61328125, 0.61328125),\n (0.623529411764706, 0.625, 0.625), (0.627450980392157, \n 0.61458203125, 0.61458203125), (0.631372549019608, 0.60416796875, \n 0.60416796875), (0.635294117647059, 0.59375, 0.59375), (\n 0.63921568627451, 0.58333203125, 0.58333203125), (0.643137254901961,\n 0.57291796875, 0.57291796875), (0.647058823529412, 0.5625, 0.5625),\n (0.650980392156863, 0.55208203125, 0.55208203125), (\n 0.654901960784314, 0.54166796875, 0.54166796875), (\n 0.658823529411765, 0.53125, 0.53125), (0.662745098039216, \n 0.52083203125, 0.52083203125), (0.666666666666667, 0.51041796875, \n 0.51041796875), (0.670588235294118, 0.5, 0.5), (0.674509803921569, \n 0.48958203125, 0.48958203125), (0.67843137254902, 0.47916796875, \n 0.47916796875), (0.682352941176471, 0.46875, 0.46875), (\n 0.686274509803922, 0.45833203125, 0.45833203125), (\n 0.690196078431373, 0.44791796875, 0.44791796875), (\n 0.694117647058824, 0.4375, 0.4375), (0.698039215686274, \n 0.42708203125, 0.42708203125), (0.701960784313725, 0.41666796875, \n 0.41666796875), (0.705882352941177, 0.40625, 0.40625), (\n 0.709803921568627, 0.39583203125, 0.39583203125), (\n 0.713725490196078, 0.385416796875, 0.385416796875), (\n 0.717647058823529, 0.375, 0.375), (0.72156862745098, 0.364583203125,\n 0.364583203125), (0.725490196078431, 0.354166796875, 0.354166796875\n ), (0.729411764705882, 0.34375, 0.34375), (0.733333333333333, \n 0.333333203125, 0.333333203125), (0.737254901960784, 0.322916796875,\n 0.322916796875), (0.741176470588235, 0.3125, 0.3125), (\n 0.745098039215686, 0.302083203125, 0.302083203125), (\n 0.749019607843137, 0.291666796875, 0.291666796875), (\n 0.752941176470588, 0.28125, 0.28125), (0.756862745098039, \n 0.270833203125, 0.270833203125), (0.76078431372549, 0.260416796875,\n 0.260416796875), (0.764705882352941, 0.25, 0.25), (\n 0.768627450980392, 0.239583203125, 0.239583203125), (\n 0.772549019607843, 0.229166796875, 0.229166796875), (\n 0.776470588235294, 0.21875, 0.21875), (0.780392156862745, \n 0.208333203125, 0.208333203125), (0.784313725490196, 0.197916796875,\n 0.197916796875), (0.788235294117647, 0.1875, 0.1875), (\n 0.792156862745098, 0.177083203125, 0.177083203125), (\n 0.796078431372549, 0.166666796875, 0.166666796875), (0.8, 0.15625, \n 0.15625), (0.803921568627451, 0.145833203125, 0.145833203125), (\n 0.807843137254902, 0.135416796875, 0.135416796875), (\n 0.811764705882353, 0.125, 0.125), (0.815686274509804, \n 0.114583203125, 0.114583203125), (0.819607843137255, 0.104166796875,\n 0.104166796875), (0.823529411764706, 0.09375, 0.09375), (\n 0.827450980392157, 0.083333203125, 0.083333203125), (\n 0.831372549019608, 0.072916796875, 0.072916796875), (\n 0.835294117647059, 0.0625, 0.0625), (0.83921568627451, \n 0.052083203125, 0.052083203125), (0.843137254901961, 0.041666796875,\n 0.041666796875), (0.847058823529412, 0.03125, 0.03125), (\n 0.850980392156863, 0.0208333203125, 0.0208333203125), (\n 0.854901960784314, 0.0104166796875, 0.0104166796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0.0184151953125, \n 0.0184151953125), (0.866666666666667, 0.0368303515625, \n 0.0368303515625), (0.870588235294118, 0.055245703125, \n 0.055245703125), (0.874509803921569, 0.073660546875, 0.073660546875\n ), (0.87843137254902, 0.09207578125, 0.09207578125), (\n 0.882352941176471, 0.110491015625, 0.110491015625), (\n 0.886274509803922, 0.12890625, 0.12890625), (0.890196078431373, \n 0.147321484375, 0.147321484375), (0.894117647058824, 0.16573671875,\n 0.16573671875), (0.898039215686275, 0.184151953125, 0.184151953125),\n (0.901960784313726, 0.202566796875, 0.202566796875), (\n 0.905882352941176, 0.22098203125, 0.22098203125), (\n 0.909803921568627, 0.239397265625, 0.239397265625), (\n 0.913725490196078, 0.2578125, 0.2578125), (0.917647058823529, \n 0.276227734375, 0.276227734375), (0.92156862745098, 0.29464296875, \n 0.29464296875), (0.925490196078431, 0.313058203125, 0.313058203125),\n (0.929411764705882, 0.331473046875, 0.331473046875), (\n 0.933333333333333, 0.34988828125, 0.34988828125), (\n 0.937254901960784, 0.368303515625, 0.368303515625), (\n 0.941176470588235, 0.38671875, 0.38671875), (0.945098039215686, \n 0.4051328125, 0.4051328125), (0.949019607843137, 0.42355078125, \n 0.42355078125), (0.952941176470588, 0.44196484375, 0.44196484375),\n (0.956862745098039, 0.46037890625, 0.46037890625), (\n 0.96078431372549, 0.47879296875, 0.47879296875), (0.964705882352941,\n 0.4972109375, 0.4972109375), (0.968627450980392, 0.515625, 0.515625\n ), (0.972549019607843, 0.5340390625, 0.5340390625), (\n 0.976470588235294, 0.55245703125, 0.55245703125), (\n 0.980392156862745, 0.57087109375, 0.57087109375), (\n 0.984313725490196, 0.58928515625, 0.58928515625), (\n 0.988235294117647, 0.60769921875, 0.60769921875), (\n 0.992156862745098, 0.6261171875, 0.6261171875), (0.996078431372549,\n 0.64453125, 0.64453125), (1, 0.64453125, 0.64453125)), 'blue': ((0,\n 1, 1), (0.00392156862745098, 0.80569140625, 0.80569140625), (\n 0.00784313725490196, 0.7964296875, 0.7964296875), (\n 0.0117647058823529, 0.7871640625, 0.7871640625), (\n 0.0156862745098039, 0.77790234375, 0.77790234375), (\n 0.0196078431372549, 0.76863671875, 0.76863671875), (\n 0.0235294117647059, 0.759375, 0.759375), (0.0274509803921569, \n 0.75011328125, 0.75011328125), (0.0313725490196078, 0.74084765625, \n 0.74084765625), (0.0352941176470588, 0.7315859375, 0.7315859375), (\n 0.0392156862745098, 0.7223203125, 0.7223203125), (\n 0.0431372549019608, 0.71305859375, 0.71305859375), (\n 0.0470588235294118, 0.70379296875, 0.70379296875), (\n 0.0509803921568627, 0.69453125, 0.69453125), (0.0549019607843137, \n 0.68526953125, 0.68526953125), (0.0588235294117647, 0.67600390625, \n 0.67600390625), (0.0627450980392157, 0.6667421875, 0.6667421875), (\n 0.0666666666666667, 0.6574765625, 0.6574765625), (\n 0.0705882352941176, 0.64821484375, 0.64821484375), (\n 0.0745098039215686, 0.63894921875, 0.63894921875), (\n 0.0784313725490196, 0.6296875, 0.6296875), (0.0823529411764706, \n 0.62042578125, 0.62042578125), (0.0862745098039216, 0.61116015625, \n 0.61116015625), (0.0901960784313725, 0.6018984375, 0.6018984375), (\n 0.0941176470588235, 0.5926328125, 0.5926328125), (\n 0.0980392156862745, 0.58337109375, 0.58337109375), (\n 0.101960784313725, 0.57410546875, 0.57410546875), (\n 0.105882352941176, 0.56484375, 0.56484375), (0.109803921568627, \n 0.55558203125, 0.55558203125), (0.113725490196078, 0.54631640625, \n 0.54631640625), (0.117647058823529, 0.5370546875, 0.5370546875), (\n 0.12156862745098, 0.5277890625, 0.5277890625), (0.125490196078431, \n 0.51852734375, 0.51852734375), (0.129411764705882, 0.50926171875, \n 0.50926171875), (0.133333333333333, 0.5, 0.5), (0.137254901960784, \n 0.50901953125, 0.50901953125), (0.141176470588235, 0.5180390625, \n 0.5180390625), (0.145098039215686, 0.52705859375, 0.52705859375), (\n 0.149019607843137, 0.536078125, 0.536078125), (0.152941176470588, \n 0.54509765625, 0.54509765625), (0.156862745098039, 0.55412109375, \n 0.55412109375), (0.16078431372549, 0.563140625, 0.563140625), (\n 0.164705882352941, 0.57216015625, 0.57216015625), (\n 0.168627450980392, 0.5811796875, 0.5811796875), (0.172549019607843,\n 0.59019921875, 0.59019921875), (0.176470588235294, 0.59921875, \n 0.59921875), (0.180392156862745, 0.60823828125, 0.60823828125), (\n 0.184313725490196, 0.6172578125, 0.6172578125), (0.188235294117647,\n 0.62627734375, 0.62627734375), (0.192156862745098, 0.635296875, \n 0.635296875), (0.196078431372549, 0.64431640625, 0.64431640625), (\n 0.2, 0.65333984375, 0.65333984375), (0.203921568627451, 0.662359375,\n 0.662359375), (0.207843137254902, 0.67137890625, 0.67137890625), (\n 0.211764705882353, 0.6803984375, 0.6803984375), (0.215686274509804,\n 0.68941796875, 0.68941796875), (0.219607843137255, 0.6984375, \n 0.6984375), (0.223529411764706, 0.70745703125, 0.70745703125), (\n 0.227450980392157, 0.7164765625, 0.7164765625), (0.231372549019608,\n 0.72549609375, 0.72549609375), (0.235294117647059, 0.734515625, \n 0.734515625), (0.23921568627451, 0.74353515625, 0.74353515625), (\n 0.243137254901961, 0.75255859375, 0.75255859375), (\n 0.247058823529412, 0.761578125, 0.761578125), (0.250980392156863, \n 0.77059765625, 0.77059765625), (0.254901960784314, 0.7796171875, \n 0.7796171875), (0.258823529411765, 0.78863671875, 0.78863671875), (\n 0.262745098039216, 0.79765625, 0.79765625), (0.266666666666667, \n 0.80667578125, 0.80667578125), (0.270588235294118, 0.8156953125, \n 0.8156953125), (0.274509803921569, 0.82471484375, 0.82471484375), (\n 0.27843137254902, 0.833734375, 0.833734375), (0.282352941176471, \n 0.84275390625, 0.84275390625), (0.286274509803922, 0.85177734375, \n 0.85177734375), (0.290196078431373, 0.860796875, 0.860796875), (\n 0.294117647058824, 0.86981640625, 0.86981640625), (\n 0.298039215686275, 0.8788359375, 0.8788359375), (0.301960784313725,\n 0.88785546875, 0.88785546875), (0.305882352941176, 0.896875, \n 0.896875), (0.309803921568627, 0.90589453125, 0.90589453125), (\n 0.313725490196078, 0.9149140625, 0.9149140625), (0.317647058823529,\n 0.92393359375, 0.92393359375), (0.32156862745098, 0.932953125, \n 0.932953125), (0.325490196078431, 0.94197265625, 0.94197265625), (\n 0.329411764705882, 0.95099609375, 0.95099609375), (\n 0.333333333333333, 0.960015625, 0.960015625), (0.337254901960784, \n 0.96903515625, 0.96903515625), (0.341176470588235, 0.9780546875, \n 0.9780546875), (0.345098039215686, 0.98707421875, 0.98707421875), (\n 0.349019607843137, 0.99609375, 0.99609375), (0.352941176470588, \n 0.9737734375, 0.9737734375), (0.356862745098039, 0.95144921875, \n 0.95144921875), (0.36078431372549, 0.92912890625, 0.92912890625), (\n 0.364705882352941, 0.90680859375, 0.90680859375), (\n 0.368627450980392, 0.88448828125, 0.88448828125), (\n 0.372549019607843, 0.8621640625, 0.8621640625), (0.376470588235294,\n 0.83984375, 0.83984375), (0.380392156862745, 0.8175234375, \n 0.8175234375), (0.384313725490196, 0.79519921875, 0.79519921875), (\n 0.388235294117647, 0.77287890625, 0.77287890625), (\n 0.392156862745098, 0.75055859375, 0.75055859375), (\n 0.396078431372549, 0.72823828125, 0.72823828125), (0.4, \n 0.7059140625, 0.7059140625), (0.403921568627451, 0.68359375, \n 0.68359375), (0.407843137254902, 0.6612734375, 0.6612734375), (\n 0.411764705882353, 0.63894921875, 0.63894921875), (\n 0.415686274509804, 0.61662890625, 0.61662890625), (\n 0.419607843137255, 0.59430859375, 0.59430859375), (\n 0.423529411764706, 0.57198828125, 0.57198828125), (\n 0.427450980392157, 0.5496640625, 0.5496640625), (0.431372549019608,\n 0.52734375, 0.52734375), (0.435294117647059, 0.5050234375, \n 0.5050234375), (0.43921568627451, 0.48269921875, 0.48269921875), (\n 0.443137254901961, 0.46037890625, 0.46037890625), (\n 0.447058823529412, 0.43805859375, 0.43805859375), (\n 0.450980392156863, 0.41573828125, 0.41573828125), (\n 0.454901960784314, 0.3934140625, 0.3934140625), (0.458823529411765,\n 0.37109375, 0.37109375), (0.462745098039216, 0.348772265625, \n 0.348772265625), (0.466666666666667, 0.32645078125, 0.32645078125),\n (0.470588235294118, 0.304129296875, 0.304129296875), (\n 0.474509803921569, 0.281808203125, 0.281808203125), (\n 0.47843137254902, 0.25948671875, 0.25948671875), (0.482352941176471,\n 0.237165234375, 0.237165234375), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.233370703125, 0.233370703125), (\n 0.494117647058824, 0.251897265625, 0.251897265625), (\n 0.498039215686275, 0.27042421875, 0.27042421875), (\n 0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.84889453125, 0.84889453125), (0.631372549019608, 0.83450390625, \n 0.83450390625), (0.635294117647059, 0.8201171875, 0.8201171875), (\n 0.63921568627451, 0.80573046875, 0.80573046875), (0.643137254901961,\n 0.79133984375, 0.79133984375), (0.647058823529412, 0.776953125, \n 0.776953125), (0.650980392156863, 0.76256640625, 0.76256640625), (\n 0.654901960784314, 0.74817578125, 0.74817578125), (\n 0.658823529411765, 0.7337890625, 0.7337890625), (0.662745098039216,\n 0.71940234375, 0.71940234375), (0.666666666666667, 0.70501171875, \n 0.70501171875), (0.670588235294118, 0.690625, 0.690625), (\n 0.674509803921569, 0.67623828125, 0.67623828125), (0.67843137254902,\n 0.66184765625, 0.66184765625), (0.682352941176471, 0.6474609375, \n 0.6474609375), (0.686274509803922, 0.63307421875, 0.63307421875), (\n 0.690196078431373, 0.61868359375, 0.61868359375), (\n 0.694117647058824, 0.604296875, 0.604296875), (0.698039215686274, \n 0.58991015625, 0.58991015625), (0.701960784313725, 0.57551953125, \n 0.57551953125), (0.705882352941177, 0.5611328125, 0.5611328125), (\n 0.709803921568627, 0.54674609375, 0.54674609375), (\n 0.713725490196078, 0.53235546875, 0.53235546875), (\n 0.717647058823529, 0.51796875, 0.51796875), (0.72156862745098, \n 0.50358203125, 0.50358203125), (0.725490196078431, 0.48919140625, \n 0.48919140625), (0.729411764705882, 0.4748046875, 0.4748046875), (\n 0.733333333333333, 0.46041796875, 0.46041796875), (\n 0.737254901960784, 0.44602734375, 0.44602734375), (\n 0.741176470588235, 0.431640625, 0.431640625), (0.745098039215686, \n 0.41725390625, 0.41725390625), (0.749019607843137, 0.40286328125, \n 0.40286328125), (0.752941176470588, 0.3884765625, 0.3884765625), (\n 0.756862745098039, 0.374088671875, 0.374088671875), (\n 0.76078431372549, 0.359700390625, 0.359700390625), (\n 0.764705882352941, 0.3453125, 0.3453125), (0.768627450980392, \n 0.330924609375, 0.330924609375), (0.772549019607843, 0.316536328125,\n 0.316536328125), (0.776470588235294, 0.3021484375, 0.3021484375), (\n 0.780392156862745, 0.287760546875, 0.287760546875), (\n 0.784313725490196, 0.273372265625, 0.273372265625), (\n 0.788235294117647, 0.258984375, 0.258984375), (0.792156862745098, \n 0.244596484375, 0.244596484375), (0.796078431372549, 0.230208203125,\n 0.230208203125), (0.8, 0.2158203125, 0.2158203125), (\n 0.803921568627451, 0.201432421875, 0.201432421875), (\n 0.807843137254902, 0.187044140625, 0.187044140625), (\n 0.811764705882353, 0.17265625, 0.17265625), (0.815686274509804, \n 0.158268359375, 0.158268359375), (0.819607843137255, 0.143880078125,\n 0.143880078125), (0.823529411764706, 0.1294921875, 0.1294921875), (\n 0.827450980392157, 0.115104296875, 0.115104296875), (\n 0.831372549019608, 0.100716015625, 0.100716015625), (\n 0.835294117647059, 0.086328125, 0.086328125), (0.83921568627451, \n 0.071940234375, 0.071940234375), (0.843137254901961, 0.057551953125,\n 0.057551953125), (0.847058823529412, 0.0431640625, 0.0431640625), (\n 0.850980392156863, 0.028776015625, 0.028776015625), (\n 0.854901960784314, 0.01438796875, 0.01438796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0, 0), (\n 0.866666666666667, 0, 0), (0.870588235294118, 0, 0), (\n 0.874509803921569, 0, 0), (0.87843137254902, 0, 0), (\n 0.882352941176471, 0, 0), (0.886274509803922, 0, 0), (\n 0.890196078431373, 0, 0), (0.894117647058824, 0, 0), (\n 0.898039215686275, 0, 0), (0.901960784313726, 0, 0), (\n 0.905882352941176, 0, 0), (0.909803921568627, 0, 0), (\n 0.913725490196078, 0, 0), (0.917647058823529, 0, 0), (\n 0.92156862745098, 0, 0), (0.925490196078431, 0, 0), (\n 0.929411764705882, 0, 0), (0.933333333333333, 0, 0), (\n 0.937254901960784, 0, 0), (0.941176470588235, 0, 0), (\n 0.945098039215686, 0, 0), (0.949019607843137, 0, 0), (\n 0.952941176470588, 0, 0), (0.956862745098039, 0, 0), (\n 0.96078431372549, 0, 0), (0.964705882352941, 0, 0), (\n 0.968627450980392, 0, 0), (0.972549019607843, 0, 0), (\n 0.976470588235294, 0, 0), (0.980392156862745, 0, 0), (\n 0.984313725490196, 0, 0), (0.988235294117647, 0, 0), (\n 0.992156862745098, 0, 0), (0.996078431372549, 0, 0), (1, 0, 0))}\n califa = mcol.LinearSegmentedColormap('califa', cdict)\n vcalifa = mcol.LinearSegmentedColormap('vcalifa', vcdict)\n if option == 'v':\n return vcalifa\n else:\n return califa\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\ndef SII_LINERS_curve_plot(ax=None, x_min=-0.3, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\ndef O3O1_line_c(x):\n val = 0.05612915 / (x + 0.39641533) + 0.60969495\n return val\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\ndef color_map_califa(option='v'):\n cdict = {'red': ((0.0, 0, 0), (0.00392156862745098, 0, 0), (\n 0.00784313725490196, 0, 0), (0.0117647058823529, 0, 0), (\n 0.0156862745098039, 0, 0), (0.0196078431372549, 0, 0), (\n 0.0235294117647059, 0, 0), (0.0274509803921569, 0, 0), (\n 0.0313725490196078, 0, 0), (0.0352941176470588, 0, 0), (\n 0.0392156862745098, 0, 0), (0.0431372549019608, 0, 0), (\n 0.0470588235294118, 0, 0), (0.0509803921568627, 0, 0), (\n 0.0549019607843137, 0, 0), (0.0588235294117647, 0, 0), (\n 0.0627450980392157, 0, 0), (0.0666666666666667, 0, 0), (\n 0.0705882352941176, 0, 0), (0.0745098039215686, 0, 0), (\n 0.0784313725490196, 0, 0), (0.0823529411764706, 0, 0), (\n 0.0862745098039216, 0, 0), (0.0901960784313725, 0, 0), (\n 0.0941176470588235, 0, 0), (0.0980392156862745, 0, 0), (\n 0.101960784313725, 0, 0), (0.105882352941176, 0, 0), (\n 0.109803921568627, 0, 0), (0.113725490196078, 0, 0), (\n 0.117647058823529, 0, 0), (0.12156862745098, 0, 0), (\n 0.125490196078431, 0, 0), (0.129411764705882, 0, 0), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0.019921875, 0.019921875), (0.2, 0.03984375, \n 0.03984375), (0.203921568627451, 0.059765625, 0.059765625), (\n 0.207843137254902, 0.0796875, 0.0796875), (0.211764705882353, \n 0.099609375, 0.099609375), (0.215686274509804, 0.11953125, \n 0.11953125), (0.219607843137255, 0.139453125, 0.139453125), (\n 0.223529411764706, 0.159375, 0.159375), (0.227450980392157, \n 0.179296875, 0.179296875), (0.231372549019608, 0.19921875, \n 0.19921875), (0.235294117647059, 0.219140625, 0.219140625), (\n 0.23921568627451, 0.2390625, 0.2390625), (0.243137254901961, \n 0.258984375, 0.258984375), (0.247058823529412, 0.27890625, \n 0.27890625), (0.250980392156863, 0.298828125, 0.298828125), (\n 0.254901960784314, 0.31875, 0.31875), (0.258823529411765, \n 0.338671875, 0.338671875), (0.262745098039216, 0.35859375, \n 0.35859375), (0.266666666666667, 0.378515625, 0.378515625), (\n 0.270588235294118, 0.3984375, 0.3984375), (0.274509803921569, \n 0.418359375, 0.418359375), (0.27843137254902, 0.43828125, \n 0.43828125), (0.282352941176471, 0.458203125, 0.458203125), (\n 0.286274509803922, 0.478125, 0.478125), (0.290196078431373, \n 0.498046875, 0.498046875), (0.294117647058824, 0.51796875, \n 0.51796875), (0.298039215686275, 0.537890625, 0.537890625), (\n 0.301960784313725, 0.5578125, 0.5578125), (0.305882352941176, \n 0.577734375, 0.577734375), (0.309803921568627, 0.59765625, \n 0.59765625), (0.313725490196078, 0.617578125, 0.617578125), (\n 0.317647058823529, 0.6375, 0.6375), (0.32156862745098, 0.657421875,\n 0.657421875), (0.325490196078431, 0.67734375, 0.67734375), (\n 0.329411764705882, 0.697265625, 0.697265625), (0.333333333333333, \n 0.7171875, 0.7171875), (0.337254901960784, 0.737109375, 0.737109375\n ), (0.341176470588235, 0.75703125, 0.75703125), (0.345098039215686,\n 0.776953125, 0.776953125), (0.349019607843137, 0.796875, 0.796875),\n (0.352941176470588, 0.816796875, 0.816796875), (0.356862745098039, \n 0.83671875, 0.83671875), (0.36078431372549, 0.856640625, \n 0.856640625), (0.364705882352941, 0.8765625, 0.8765625), (\n 0.368627450980392, 0.896484375, 0.896484375), (0.372549019607843, \n 0.91640625, 0.91640625), (0.376470588235294, 0.936328125, \n 0.936328125), (0.380392156862745, 0.95625, 0.95625), (\n 0.384313725490196, 0.976171875, 0.976171875), (0.388235294117647, \n 0.99609375, 0.99609375), (0.392156862745098, 0.99609375, 0.99609375\n ), (0.396078431372549, 0.99609375, 0.99609375), (0.4, 0.99609375, \n 0.99609375), (0.403921568627451, 0.99609375, 0.99609375), (\n 0.407843137254902, 0.99609375, 0.99609375), (0.411764705882353, \n 0.99609375, 0.99609375), (0.415686274509804, 0.99609375, 0.99609375\n ), (0.419607843137255, 0.99609375, 0.99609375), (0.423529411764706,\n 0.99609375, 0.99609375), (0.427450980392157, 0.99609375, 0.99609375\n ), (0.431372549019608, 0.99609375, 0.99609375), (0.435294117647059,\n 0.99609375, 0.99609375), (0.43921568627451, 0.99609375, 0.99609375),\n (0.443137254901961, 0.99609375, 0.99609375), (0.447058823529412, \n 0.99609375, 0.99609375), (0.450980392156863, 0.99609375, 0.99609375\n ), (0.454901960784314, 0.99609375, 0.99609375), (0.458823529411765,\n 0.99609375, 0.99609375), (0.462745098039216, 0.99609375, 0.99609375\n ), (0.466666666666667, 0.99609375, 0.99609375), (0.470588235294118,\n 0.99609375, 0.99609375), (0.474509803921569, 0.99609375, 0.99609375\n ), (0.47843137254902, 0.99609375, 0.99609375), (0.482352941176471, \n 0.99609375, 0.99609375), (0.486274509803922, 0.99609375, 0.99609375\n ), (0.490196078431373, 0.99609375, 0.99609375), (0.494117647058824,\n 0.99609375, 0.99609375), (0.498039215686275, 0.99609375, 0.99609375\n ), (0.501960784313725, 0.99609375, 0.99609375), (0.505882352941176,\n 0.99609375, 0.99609375), (0.509803921568627, 0.99609375, 0.99609375\n ), (0.513725490196078, 0.99609375, 0.99609375), (0.517647058823529,\n 0.99609375, 0.99609375), (0.52156862745098, 0.99609375, 0.99609375),\n (0.525490196078431, 0.99609375, 0.99609375), (0.529411764705882, \n 0.99609375, 0.99609375), (0.533333333333333, 0.99609375, 0.99609375\n ), (0.537254901960784, 0.99609375, 0.99609375), (0.541176470588235,\n 0.99609375, 0.99609375), (0.545098039215686, 0.99609375, 0.99609375\n ), (0.549019607843137, 0.99609375, 0.99609375), (0.552941176470588,\n 0.99609375, 0.99609375), (0.556862745098039, 0.99609375, 0.99609375\n ), (0.56078431372549, 0.99609375, 0.99609375), (0.564705882352941, \n 0.99609375, 0.99609375), (0.568627450980392, 0.99609375, 0.99609375\n ), (0.572549019607843, 0.99609375, 0.99609375), (0.576470588235294,\n 0.99609375, 0.99609375), (0.580392156862745, 0.99609375, 0.99609375\n ), (0.584313725490196, 0.99609375, 0.99609375), (0.588235294117647,\n 0.98046875, 0.98046875), (0.592156862745098, 0.96484375, 0.96484375\n ), (0.596078431372549, 0.94921875, 0.94921875), (0.6, 0.93359375, \n 0.93359375), (0.603921568627451, 0.91796875, 0.91796875), (\n 0.607843137254902, 0.90234375, 0.90234375), (0.611764705882353, \n 0.88671875, 0.88671875), (0.615686274509804, 0.87109375, 0.87109375\n ), (0.619607843137255, 0.85546875, 0.85546875), (0.623529411764706,\n 0.83984375, 0.83984375), (0.627450980392157, 0.82421875, 0.82421875\n ), (0.631372549019608, 0.80859375, 0.80859375), (0.635294117647059,\n 0.79296875, 0.79296875), (0.63921568627451, 0.77734375, 0.77734375),\n (0.643137254901961, 0.76171875, 0.76171875), (0.647058823529412, \n 0.74609375, 0.74609375), (0.650980392156863, 0.73046875, 0.73046875\n ), (0.654901960784314, 0.71484375, 0.71484375), (0.658823529411765,\n 0.69921875, 0.69921875), (0.662745098039216, 0.68359375, 0.68359375\n ), (0.666666666666667, 0.66796875, 0.66796875), (0.670588235294118,\n 0.65234375, 0.65234375), (0.674509803921569, 0.63671875, 0.63671875\n ), (0.67843137254902, 0.62109375, 0.62109375), (0.682352941176471, \n 0.60546875, 0.60546875), (0.686274509803922, 0.58984375, 0.58984375\n ), (0.690196078431373, 0.57421875, 0.57421875), (0.694117647058824,\n 0.55859375, 0.55859375), (0.698039215686274, 0.54296875, 0.54296875\n ), (0.701960784313725, 0.52734375, 0.52734375), (0.705882352941177,\n 0.51171875, 0.51171875), (0.709803921568627, 0.49609375, 0.49609375\n ), (0.713725490196078, 0.48046875, 0.48046875), (0.717647058823529,\n 0.46484375, 0.46484375), (0.72156862745098, 0.44921875, 0.44921875),\n (0.725490196078431, 0.43359375, 0.43359375), (0.729411764705882, \n 0.41796875, 0.41796875), (0.733333333333333, 0.40234375, 0.40234375\n ), (0.737254901960784, 0.38671875, 0.38671875), (0.741176470588235,\n 0.37109375, 0.37109375), (0.745098039215686, 0.35546875, 0.35546875\n ), (0.749019607843137, 0.33984375, 0.33984375), (0.752941176470588,\n 0.32421875, 0.32421875), (0.756862745098039, 0.30859375, 0.30859375\n ), (0.76078431372549, 0.29296875, 0.29296875), (0.764705882352941, \n 0.27734375, 0.27734375), (0.768627450980392, 0.26171875, 0.26171875\n ), (0.772549019607843, 0.24609375, 0.24609375), (0.776470588235294,\n 0.23046875, 0.23046875), (0.780392156862745, 0.21484375, 0.21484375\n ), (0.784313725490196, 0.22663359375, 0.22663359375), (\n 0.788235294117647, 0.2384234375, 0.2384234375), (0.792156862745098,\n 0.250212890625, 0.250212890625), (0.796078431372549, 0.262002734375,\n 0.262002734375), (0.8, 0.273792578125, 0.273792578125), (\n 0.803921568627451, 0.285582421875, 0.285582421875), (\n 0.807843137254902, 0.297372265625, 0.297372265625), (\n 0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125)), 'green': ((0.0, \n 0.02984375, 0.02984375), (0.00392156862745098, 0.02984375, \n 0.02984375), (0.00784313725490196, 0.044765625, 0.044765625), (\n 0.0117647058823529, 0.0596875, 0.0596875), (0.0156862745098039, \n 0.074609375, 0.074609375), (0.0196078431372549, 0.08953125, \n 0.08953125), (0.0235294117647059, 0.104453125, 0.104453125), (\n 0.0274509803921569, 0.119375, 0.119375), (0.0313725490196078, \n 0.134296875, 0.134296875), (0.0352941176470588, 0.14921875, \n 0.14921875), (0.0392156862745098, 0.164140625, 0.164140625), (\n 0.0431372549019608, 0.1790625, 0.1790625), (0.0470588235294118, \n 0.193984375, 0.193984375), (0.0509803921568627, 0.20890625, \n 0.20890625), (0.0549019607843137, 0.223828125, 0.223828125), (\n 0.0588235294117647, 0.23875, 0.23875), (0.0627450980392157, \n 0.253671875, 0.253671875), (0.0666666666666667, 0.26859375, \n 0.26859375), (0.0705882352941176, 0.283515625, 0.283515625), (\n 0.0745098039215686, 0.2984375, 0.2984375), (0.0784313725490196, \n 0.313359375, 0.313359375), (0.0823529411764706, 0.32828125, \n 0.32828125), (0.0862745098039216, 0.343203125, 0.343203125), (\n 0.0901960784313725, 0.358125, 0.358125), (0.0941176470588235, \n 0.373046875, 0.373046875), (0.0980392156862745, 0.38796875, \n 0.38796875), (0.101960784313725, 0.402890625, 0.402890625), (\n 0.105882352941176, 0.4178125, 0.4178125), (0.109803921568627, \n 0.432734375, 0.432734375), (0.113725490196078, 0.44765625, \n 0.44765625), (0.117647058823529, 0.462578125, 0.462578125), (\n 0.12156862745098, 0.4775, 0.4775), (0.125490196078431, 0.492421875,\n 0.492421875), (0.129411764705882, 0.50734375, 0.50734375), (\n 0.133333333333333, 0.522265625, 0.522265625), (0.137254901960784, \n 0.5371875, 0.5371875), (0.141176470588235, 0.552109375, 0.552109375\n ), (0.145098039215686, 0.56703125, 0.56703125), (0.149019607843137,\n 0.581953125, 0.581953125), (0.152941176470588, 0.596875, 0.596875),\n (0.156862745098039, 0.611796875, 0.611796875), (0.16078431372549, \n 0.62671875, 0.62671875), (0.164705882352941, 0.641640625, \n 0.641640625), (0.168627450980392, 0.6565625, 0.6565625), (\n 0.172549019607843, 0.671484375, 0.671484375), (0.176470588235294, \n 0.68640625, 0.68640625), (0.180392156862745, 0.701328125, \n 0.701328125), (0.184313725490196, 0.71625, 0.71625), (\n 0.188235294117647, 0.731171875, 0.731171875), (0.192156862745098, \n 0.74609375, 0.74609375), (0.196078431372549, 0.731171875, \n 0.731171875), (0.2, 0.71625, 0.71625), (0.203921568627451, \n 0.701328125, 0.701328125), (0.207843137254902, 0.68640625, \n 0.68640625), (0.211764705882353, 0.671484375, 0.671484375), (\n 0.215686274509804, 0.6565625, 0.6565625), (0.219607843137255, \n 0.641640625, 0.641640625), (0.223529411764706, 0.62671875, \n 0.62671875), (0.227450980392157, 0.611796875, 0.611796875), (\n 0.231372549019608, 0.596875, 0.596875), (0.235294117647059, \n 0.581953125, 0.581953125), (0.23921568627451, 0.56703125, \n 0.56703125), (0.243137254901961, 0.552109375, 0.552109375), (\n 0.247058823529412, 0.5371875, 0.5371875), (0.250980392156863, \n 0.522265625, 0.522265625), (0.254901960784314, 0.50734375, \n 0.50734375), (0.258823529411765, 0.492421875, 0.492421875), (\n 0.262745098039216, 0.4775, 0.4775), (0.266666666666667, 0.462578125,\n 0.462578125), (0.270588235294118, 0.44765625, 0.44765625), (\n 0.274509803921569, 0.432734375, 0.432734375), (0.27843137254902, \n 0.4178125, 0.4178125), (0.282352941176471, 0.402890625, 0.402890625\n ), (0.286274509803922, 0.38796875, 0.38796875), (0.290196078431373,\n 0.373046875, 0.373046875), (0.294117647058824, 0.358125, 0.358125),\n (0.298039215686275, 0.343203125, 0.343203125), (0.301960784313725, \n 0.32828125, 0.32828125), (0.305882352941176, 0.313359375, \n 0.313359375), (0.309803921568627, 0.2984375, 0.2984375), (\n 0.313725490196078, 0.283515625, 0.283515625), (0.317647058823529, \n 0.26859375, 0.26859375), (0.32156862745098, 0.253671875, \n 0.253671875), (0.325490196078431, 0.23875, 0.23875), (\n 0.329411764705882, 0.223828125, 0.223828125), (0.333333333333333, \n 0.20890625, 0.20890625), (0.337254901960784, 0.193984375, \n 0.193984375), (0.341176470588235, 0.1790625, 0.1790625), (\n 0.345098039215686, 0.164140625, 0.164140625), (0.349019607843137, \n 0.14921875, 0.14921875), (0.352941176470588, 0.134296875, \n 0.134296875), (0.356862745098039, 0.119375, 0.119375), (\n 0.36078431372549, 0.104453125, 0.104453125), (0.364705882352941, \n 0.08953125, 0.08953125), (0.368627450980392, 0.074609375, \n 0.074609375), (0.372549019607843, 0.0596875, 0.0596875), (\n 0.376470588235294, 0.044765625, 0.044765625), (0.380392156862745, \n 0.0298437890625, 0.0298437890625), (0.384313725490196, 0.014921875,\n 0.014921875), (0.388235294117647, 0, 0), (0.392156862745098, \n 0.012890625, 0.012890625), (0.396078431372549, 0.02578125, \n 0.02578125), (0.4, 0.038671875, 0.038671875), (0.403921568627451, \n 0.0515625, 0.0515625), (0.407843137254902, 0.064453125, 0.064453125\n ), (0.411764705882353, 0.07734375, 0.07734375), (0.415686274509804,\n 0.090234375, 0.090234375), (0.419607843137255, 0.103125, 0.103125),\n (0.423529411764706, 0.116015625, 0.116015625), (0.427450980392157, \n 0.12890625, 0.12890625), (0.431372549019608, 0.141796875, \n 0.141796875), (0.435294117647059, 0.1546875, 0.1546875), (\n 0.43921568627451, 0.167578125, 0.167578125), (0.443137254901961, \n 0.18046875, 0.18046875), (0.447058823529412, 0.193359375, \n 0.193359375), (0.450980392156863, 0.20625, 0.20625), (\n 0.454901960784314, 0.219140625, 0.219140625), (0.458823529411765, \n 0.23203125, 0.23203125), (0.462745098039216, 0.244921875, \n 0.244921875), (0.466666666666667, 0.2578125, 0.2578125), (\n 0.470588235294118, 0.270703125, 0.270703125), (0.474509803921569, \n 0.28359375, 0.28359375), (0.47843137254902, 0.296484375, \n 0.296484375), (0.482352941176471, 0.309375, 0.309375), (\n 0.486274509803922, 0.322265625, 0.322265625), (0.490196078431373, \n 0.33515625, 0.33515625), (0.494117647058824, 0.348046875, \n 0.348046875), (0.498039215686275, 0.3609375, 0.3609375), (\n 0.501960784313725, 0.373828125, 0.373828125), (0.505882352941176, \n 0.38671875, 0.38671875), (0.509803921568627, 0.399609375, \n 0.399609375), (0.513725490196078, 0.4125, 0.4125), (\n 0.517647058823529, 0.425390625, 0.425390625), (0.52156862745098, \n 0.43828125, 0.43828125), (0.525490196078431, 0.451171875, \n 0.451171875), (0.529411764705882, 0.4640625, 0.4640625), (\n 0.533333333333333, 0.476953125, 0.476953125), (0.537254901960784, \n 0.48984375, 0.48984375), (0.541176470588235, 0.502734375, \n 0.502734375), (0.545098039215686, 0.515625, 0.515625), (\n 0.549019607843137, 0.528515625, 0.528515625), (0.552941176470588, \n 0.54140625, 0.54140625), (0.556862745098039, 0.554296875, \n 0.554296875), (0.56078431372549, 0.5671875, 0.5671875), (\n 0.564705882352941, 0.580078125, 0.580078125), (0.568627450980392, \n 0.59296875, 0.59296875), (0.572549019607843, 0.605859375, \n 0.605859375), (0.576470588235294, 0.61875, 0.61875), (\n 0.580392156862745, 0.631640625, 0.631640625), (0.584313725490196, \n 0.64453125, 0.64453125), (0.588235294117647, 0.6359375, 0.6359375),\n (0.592156862745098, 0.62734375, 0.62734375), (0.596078431372549, \n 0.61875, 0.61875), (0.6, 0.61015625, 0.61015625), (\n 0.603921568627451, 0.6015625, 0.6015625), (0.607843137254902, \n 0.59296875, 0.59296875), (0.611764705882353, 0.584375, 0.584375), (\n 0.615686274509804, 0.57578125, 0.57578125), (0.619607843137255, \n 0.5671875, 0.5671875), (0.623529411764706, 0.55859375, 0.55859375),\n (0.627450980392157, 0.55, 0.55), (0.631372549019608, 0.54140625, \n 0.54140625), (0.635294117647059, 0.5328125, 0.5328125), (\n 0.63921568627451, 0.52421875, 0.52421875), (0.643137254901961, \n 0.515625, 0.515625), (0.647058823529412, 0.50703125, 0.50703125), (\n 0.650980392156863, 0.4984375, 0.4984375), (0.654901960784314, \n 0.48984375, 0.48984375), (0.658823529411765, 0.48125, 0.48125), (\n 0.662745098039216, 0.47265625, 0.47265625), (0.666666666666667, \n 0.4640625, 0.4640625), (0.670588235294118, 0.45546875, 0.45546875),\n (0.674509803921569, 0.446875, 0.446875), (0.67843137254902, \n 0.43828125, 0.43828125), (0.682352941176471, 0.4296875, 0.4296875),\n (0.686274509803922, 0.42109375, 0.42109375), (0.690196078431373, \n 0.4125, 0.4125), (0.694117647058824, 0.40390625, 0.40390625), (\n 0.698039215686274, 0.3953125, 0.3953125), (0.701960784313725, \n 0.38671875, 0.38671875), (0.705882352941177, 0.378125, 0.378125), (\n 0.709803921568627, 0.36953125, 0.36953125), (0.713725490196078, \n 0.3609375, 0.3609375), (0.717647058823529, 0.35234375, 0.35234375),\n (0.72156862745098, 0.34375, 0.34375), (0.725490196078431, \n 0.33515625, 0.33515625), (0.729411764705882, 0.3265625, 0.3265625),\n (0.733333333333333, 0.31796875, 0.31796875), (0.737254901960784, \n 0.309375, 0.309375), (0.741176470588235, 0.30078125, 0.30078125), (\n 0.745098039215686, 0.2921875, 0.2921875), (0.749019607843137, \n 0.28359375, 0.28359375), (0.752941176470588, 0.275, 0.275), (\n 0.756862745098039, 0.26640625, 0.26640625), (0.76078431372549, \n 0.2578125, 0.2578125), (0.764705882352941, 0.24921875, 0.24921875),\n (0.768627450980392, 0.240625, 0.240625), (0.772549019607843, \n 0.23203125, 0.23203125), (0.776470588235294, 0.2234375, 0.2234375),\n (0.780392156862745, 0.21484375, 0.21484375), (0.784313725490196, \n 0.222301171875, 0.222301171875), (0.788235294117647, 0.22975859375,\n 0.22975859375), (0.792156862745098, 0.237216015625, 0.237216015625),\n (0.796078431372549, 0.2446734375, 0.2446734375), (0.8, \n 0.252130859375, 0.252130859375), (0.803921568627451, 0.259587890625,\n 0.259587890625), (0.807843137254902, 0.2670453125, 0.2670453125), (\n 0.811764705882353, 0.274502734375, 0.274502734375), (\n 0.815686274509804, 0.28196015625, 0.28196015625), (\n 0.819607843137255, 0.289417578125, 0.289417578125), (\n 0.823529411764706, 0.296875, 0.296875), (0.827450980392157, \n 0.304332421875, 0.304332421875), (0.831372549019608, 0.31178984375,\n 0.31178984375), (0.835294117647059, 0.319247265625, 0.319247265625),\n (0.83921568627451, 0.3267046875, 0.3267046875), (0.843137254901961,\n 0.334162109375, 0.334162109375), (0.847058823529412, 0.34161953125,\n 0.34161953125), (0.850980392156863, 0.3490765625, 0.3490765625), (\n 0.854901960784314, 0.356533984375, 0.356533984375), (\n 0.858823529411765, 0.36399140625, 0.36399140625), (\n 0.862745098039216, 0.371448828125, 0.371448828125), (\n 0.866666666666667, 0.37890625, 0.37890625), (0.870588235294118, \n 0.386363671875, 0.386363671875), (0.874509803921569, 0.3938203125, \n 0.3938203125), (0.87843137254902, 0.40127734375, 0.40127734375), (\n 0.882352941176471, 0.408734375, 0.408734375), (0.886274509803922, \n 0.41619140625, 0.41619140625), (0.890196078431373, 0.42365234375, \n 0.42365234375), (0.894117647058824, 0.431109375, 0.431109375), (\n 0.898039215686275, 0.43856640625, 0.43856640625), (\n 0.901960784313726, 0.4460234375, 0.4460234375), (0.905882352941176,\n 0.45348046875, 0.45348046875), (0.909803921568627, 0.4609375, \n 0.4609375), (0.913725490196078, 0.46839453125, 0.46839453125), (\n 0.917647058823529, 0.4758515625, 0.4758515625), (0.92156862745098, \n 0.48330859375, 0.48330859375), (0.925490196078431, 0.490765625, \n 0.490765625), (0.929411764705882, 0.49822265625, 0.49822265625), (\n 0.933333333333333, 0.50568359375, 0.50568359375), (\n 0.937254901960784, 0.513140625, 0.513140625), (0.941176470588235, \n 0.52059765625, 0.52059765625), (0.945098039215686, 0.5280546875, \n 0.5280546875), (0.949019607843137, 0.53551171875, 0.53551171875), (\n 0.952941176470588, 0.54296875, 0.54296875), (0.956862745098039, \n 0.55042578125, 0.55042578125), (0.96078431372549, 0.5578828125, \n 0.5578828125), (0.964705882352941, 0.56533984375, 0.56533984375), (\n 0.968627450980392, 0.572796875, 0.572796875), (0.972549019607843, \n 0.58025390625, 0.58025390625), (0.976470588235294, 0.58771484375, \n 0.58771484375), (0.980392156862745, 0.595171875, 0.595171875), (\n 0.984313725490196, 0.60262890625, 0.60262890625), (\n 0.988235294117647, 0.6100859375, 0.6100859375), (0.992156862745098,\n 0.61754296875, 0.61754296875), (0.996078431372549, 0.625, 0.625), (\n 1.0, 0.625, 0.625)), 'blue': ((0.0, 0.51984375, 0.51984375), (\n 0.00392156862745098, 0.51984375, 0.51984375), (0.00784313725490196,\n 0.529765625, 0.529765625), (0.0117647058823529, 0.5396875, \n 0.5396875), (0.0156862745098039, 0.549609375, 0.549609375), (\n 0.0196078431372549, 0.55953125, 0.55953125), (0.0235294117647059, \n 0.569453125, 0.569453125), (0.0274509803921569, 0.579375, 0.579375),\n (0.0313725490196078, 0.589296875, 0.589296875), (0.0352941176470588,\n 0.59921875, 0.59921875), (0.0392156862745098, 0.609140625, \n 0.609140625), (0.0431372549019608, 0.6190625, 0.6190625), (\n 0.0470588235294118, 0.628984375, 0.628984375), (0.0509803921568627,\n 0.63890625, 0.63890625), (0.0549019607843137, 0.648828125, \n 0.648828125), (0.0588235294117647, 0.65875, 0.65875), (\n 0.0627450980392157, 0.668671875, 0.668671875), (0.0666666666666667,\n 0.67859375, 0.67859375), (0.0705882352941176, 0.688515625, \n 0.688515625), (0.0745098039215686, 0.6984375, 0.6984375), (\n 0.0784313725490196, 0.708359375, 0.708359375), (0.0823529411764706,\n 0.71828125, 0.71828125), (0.0862745098039216, 0.728203125, \n 0.728203125), (0.0901960784313725, 0.738125, 0.738125), (\n 0.0941176470588235, 0.748046875, 0.748046875), (0.0980392156862745,\n 0.75796875, 0.75796875), (0.101960784313725, 0.767890625, \n 0.767890625), (0.105882352941176, 0.7778125, 0.7778125), (\n 0.109803921568627, 0.787734375, 0.787734375), (0.113725490196078, \n 0.79765625, 0.79765625), (0.117647058823529, 0.807578125, \n 0.807578125), (0.12156862745098, 0.8175, 0.8175), (\n 0.125490196078431, 0.827421875, 0.827421875), (0.129411764705882, \n 0.83734375, 0.83734375), (0.133333333333333, 0.847265625, \n 0.847265625), (0.137254901960784, 0.8571875, 0.8571875), (\n 0.141176470588235, 0.867109375, 0.867109375), (0.145098039215686, \n 0.87703125, 0.87703125), (0.149019607843137, 0.886953125, \n 0.886953125), (0.152941176470588, 0.896875, 0.896875), (\n 0.156862745098039, 0.906796875, 0.906796875), (0.16078431372549, \n 0.91671875, 0.91671875), (0.164705882352941, 0.926640625, \n 0.926640625), (0.168627450980392, 0.9365625, 0.9365625), (\n 0.172549019607843, 0.946484375, 0.946484375), (0.176470588235294, \n 0.95640625, 0.95640625), (0.180392156862745, 0.966328125, \n 0.966328125), (0.184313725490196, 0.97625, 0.97625), (\n 0.188235294117647, 0.986171875, 0.986171875), (0.192156862745098, \n 0.99609375, 0.99609375), (0.196078431372549, 0.976171875, \n 0.976171875), (0.2, 0.95625, 0.95625), (0.203921568627451, \n 0.936328125, 0.936328125), (0.207843137254902, 0.91640625, \n 0.91640625), (0.211764705882353, 0.896484375, 0.896484375), (\n 0.215686274509804, 0.8765625, 0.8765625), (0.219607843137255, \n 0.856640625, 0.856640625), (0.223529411764706, 0.83671875, \n 0.83671875), (0.227450980392157, 0.816796875, 0.816796875), (\n 0.231372549019608, 0.796875, 0.796875), (0.235294117647059, \n 0.776953125, 0.776953125), (0.23921568627451, 0.75703125, \n 0.75703125), (0.243137254901961, 0.737109375, 0.737109375), (\n 0.247058823529412, 0.7171875, 0.7171875), (0.250980392156863, \n 0.697265625, 0.697265625), (0.254901960784314, 0.67734375, \n 0.67734375), (0.258823529411765, 0.657421875, 0.657421875), (\n 0.262745098039216, 0.6375, 0.6375), (0.266666666666667, 0.617578125,\n 0.617578125), (0.270588235294118, 0.59765625, 0.59765625), (\n 0.274509803921569, 0.577734375, 0.577734375), (0.27843137254902, \n 0.5578125, 0.5578125), (0.282352941176471, 0.537890625, 0.537890625\n ), (0.286274509803922, 0.51796875, 0.51796875), (0.290196078431373,\n 0.498046875, 0.498046875), (0.294117647058824, 0.478125, 0.478125),\n (0.298039215686275, 0.458203125, 0.458203125), (0.301960784313725, \n 0.43828125, 0.43828125), (0.305882352941176, 0.418359375, \n 0.418359375), (0.309803921568627, 0.3984375, 0.3984375), (\n 0.313725490196078, 0.378515625, 0.378515625), (0.317647058823529, \n 0.35859375, 0.35859375), (0.32156862745098, 0.338671875, \n 0.338671875), (0.325490196078431, 0.31875, 0.31875), (\n 0.329411764705882, 0.298828125, 0.298828125), (0.333333333333333, \n 0.27890625, 0.27890625), (0.337254901960784, 0.258984375, \n 0.258984375), (0.341176470588235, 0.2390625, 0.2390625), (\n 0.345098039215686, 0.219140625, 0.219140625), (0.349019607843137, \n 0.19921875, 0.19921875), (0.352941176470588, 0.179296875, \n 0.179296875), (0.356862745098039, 0.159375, 0.159375), (\n 0.36078431372549, 0.139453125, 0.139453125), (0.364705882352941, \n 0.11953125, 0.11953125), (0.368627450980392, 0.099609375, \n 0.099609375), (0.372549019607843, 0.0796875, 0.0796875), (\n 0.376470588235294, 0.059765625, 0.059765625), (0.380392156862745, \n 0.03984375, 0.03984375), (0.384313725490196, 0.019921875, \n 0.019921875), (0.388235294117647, 0, 0), (0.392156862745098, 0, 0),\n (0.396078431372549, 0, 0), (0.4, 0, 0), (0.403921568627451, 0, 0),\n (0.407843137254902, 0, 0), (0.411764705882353, 0, 0), (\n 0.415686274509804, 0, 0), (0.419607843137255, 0, 0), (\n 0.423529411764706, 0, 0), (0.427450980392157, 0, 0), (\n 0.431372549019608, 0, 0), (0.435294117647059, 0, 0), (\n 0.43921568627451, 0, 0), (0.443137254901961, 0, 0), (\n 0.447058823529412, 0, 0), (0.450980392156863, 0, 0), (\n 0.454901960784314, 0, 0), (0.458823529411765, 0, 0), (\n 0.462745098039216, 0, 0), (0.466666666666667, 0, 0), (\n 0.470588235294118, 0, 0), (0.474509803921569, 0, 0), (\n 0.47843137254902, 0, 0), (0.482352941176471, 0, 0), (\n 0.486274509803922, 0, 0), (0.490196078431373, 0, 0), (\n 0.494117647058824, 0, 0), (0.498039215686275, 0, 0), (\n 0.501960784313725, 0, 0), (0.505882352941176, 0, 0), (\n 0.509803921568627, 0, 0), (0.513725490196078, 0, 0), (\n 0.517647058823529, 0, 0), (0.52156862745098, 0, 0), (\n 0.525490196078431, 0, 0), (0.529411764705882, 0, 0), (\n 0.533333333333333, 0, 0), (0.537254901960784, 0, 0), (\n 0.541176470588235, 0, 0), (0.545098039215686, 0, 0), (\n 0.549019607843137, 0, 0), (0.552941176470588, 0, 0), (\n 0.556862745098039, 0, 0), (0.56078431372549, 0, 0), (\n 0.564705882352941, 0, 0), (0.568627450980392, 0, 0), (\n 0.572549019607843, 0, 0), (0.576470588235294, 0, 0), (\n 0.580392156862745, 0, 0), (0.584313725490196, 0, 0), (\n 0.588235294117647, 0.004296875, 0.004296875), (0.592156862745098, \n 0.00859375, 0.00859375), (0.596078431372549, 0.012890625, \n 0.012890625), (0.6, 0.0171875, 0.0171875), (0.603921568627451, \n 0.021484375, 0.021484375), (0.607843137254902, 0.02578125, \n 0.02578125), (0.611764705882353, 0.030078125, 0.030078125), (\n 0.615686274509804, 0.034375, 0.034375), (0.619607843137255, \n 0.038671875, 0.038671875), (0.623529411764706, 0.04296875, \n 0.04296875), (0.627450980392157, 0.047265625, 0.047265625), (\n 0.631372549019608, 0.0515625, 0.0515625), (0.635294117647059, \n 0.055859375, 0.055859375), (0.63921568627451, 0.06015625, \n 0.06015625), (0.643137254901961, 0.064453125, 0.064453125), (\n 0.647058823529412, 0.06875, 0.06875), (0.650980392156863, \n 0.073046875, 0.073046875), (0.654901960784314, 0.07734375, \n 0.07734375), (0.658823529411765, 0.081640625, 0.081640625), (\n 0.662745098039216, 0.0859375, 0.0859375), (0.666666666666667, \n 0.090234375, 0.090234375), (0.670588235294118, 0.09453125, \n 0.09453125), (0.674509803921569, 0.098828125, 0.098828125), (\n 0.67843137254902, 0.103125, 0.103125), (0.682352941176471, \n 0.107421875, 0.107421875), (0.686274509803922, 0.11171875, \n 0.11171875), (0.690196078431373, 0.116015625, 0.116015625), (\n 0.694117647058824, 0.1203125, 0.1203125), (0.698039215686274, \n 0.124609375, 0.124609375), (0.701960784313725, 0.12890625, \n 0.12890625), (0.705882352941177, 0.133203125, 0.133203125), (\n 0.709803921568627, 0.1375, 0.1375), (0.713725490196078, 0.141796875,\n 0.141796875), (0.717647058823529, 0.14609375, 0.14609375), (\n 0.72156862745098, 0.150390625, 0.150390625), (0.725490196078431, \n 0.1546875, 0.1546875), (0.729411764705882, 0.158984375, 0.158984375\n ), (0.733333333333333, 0.16328125, 0.16328125), (0.737254901960784,\n 0.167578125, 0.167578125), (0.741176470588235, 0.171875, 0.171875),\n (0.745098039215686, 0.176171875, 0.176171875), (0.749019607843137, \n 0.18046875, 0.18046875), (0.752941176470588, 0.184765625, \n 0.184765625), (0.756862745098039, 0.1890625, 0.1890625), (\n 0.76078431372549, 0.193359375, 0.193359375), (0.764705882352941, \n 0.19765625, 0.19765625), (0.768627450980392, 0.201953125, \n 0.201953125), (0.772549019607843, 0.20625, 0.20625), (\n 0.776470588235294, 0.210546875, 0.210546875), (0.780392156862745, \n 0.21484375, 0.21484375), (0.784313725490196, 0.22663359375, \n 0.22663359375), (0.788235294117647, 0.2384234375, 0.2384234375), (\n 0.792156862745098, 0.250212890625, 0.250212890625), (\n 0.796078431372549, 0.262002734375, 0.262002734375), (0.8, \n 0.273792578125, 0.273792578125), (0.803921568627451, 0.285582421875,\n 0.285582421875), (0.807843137254902, 0.297372265625, 0.297372265625\n ), (0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125))}\n vcdict = {'red': ((0, 1, 1), (0.00392156862745098, 0.54508984375, \n 0.54508984375), (0.00784313725490196, 0.5285703125, 0.5285703125),\n (0.0117647058823529, 0.5120546875, 0.5120546875), (\n 0.0156862745098039, 0.49553515625, 0.49553515625), (\n 0.0196078431372549, 0.47901953125, 0.47901953125), (\n 0.0235294117647059, 0.4625, 0.4625), (0.0274509803921569, \n 0.44598046875, 0.44598046875), (0.0313725490196078, 0.42946484375, \n 0.42946484375), (0.0352941176470588, 0.4129453125, 0.4129453125), (\n 0.0392156862745098, 0.3964296875, 0.3964296875), (\n 0.0431372549019608, 0.379910546875, 0.379910546875), (\n 0.0470588235294118, 0.36339296875, 0.36339296875), (\n 0.0509803921568627, 0.346875, 0.346875), (0.0549019607843137, \n 0.33035703125, 0.33035703125), (0.0588235294117647, 0.313839453125,\n 0.313839453125), (0.0627450980392157, 0.297321484375, \n 0.297321484375), (0.0666666666666667, 0.280803515625, \n 0.280803515625), (0.0705882352941176, 0.2642859375, 0.2642859375),\n (0.0745098039215686, 0.24776796875, 0.24776796875), (\n 0.0784313725490196, 0.23125, 0.23125), (0.0823529411764706, \n 0.21473203125, 0.21473203125), (0.0862745098039216, 0.198214453125,\n 0.198214453125), (0.0901960784313725, 0.181696484375, \n 0.181696484375), (0.0941176470588235, 0.165178515625, \n 0.165178515625), (0.0980392156862745, 0.148660546875, \n 0.148660546875), (0.101960784313725, 0.13214296875, 0.13214296875),\n (0.105882352941176, 0.115625, 0.115625), (0.109803921568627, \n 0.09910703125, 0.09910703125), (0.113725490196078, 0.082589453125, \n 0.082589453125), (0.117647058823529, 0.066071484375, 0.066071484375\n ), (0.12156862745098, 0.049553515625, 0.049553515625), (\n 0.125490196078431, 0.0330357421875, 0.0330357421875), (\n 0.129411764705882, 0.016517890625, 0.016517890625), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0, 0), (0.2, 0, 0), (0.203921568627451, 0, 0), (\n 0.207843137254902, 0, 0), (0.211764705882353, 0, 0), (\n 0.215686274509804, 0, 0), (0.219607843137255, 0, 0), (\n 0.223529411764706, 0, 0), (0.227450980392157, 0, 0), (\n 0.231372549019608, 0, 0), (0.235294117647059, 0, 0), (\n 0.23921568627451, 0, 0), (0.243137254901961, 0, 0), (\n 0.247058823529412, 0, 0), (0.250980392156863, 0, 0), (\n 0.254901960784314, 0, 0), (0.258823529411765, 0, 0), (\n 0.262745098039216, 0, 0), (0.266666666666667, 0, 0), (\n 0.270588235294118, 0, 0), (0.274509803921569, 0, 0), (\n 0.27843137254902, 0, 0), (0.282352941176471, 0, 0), (\n 0.286274509803922, 0, 0), (0.290196078431373, 0, 0), (\n 0.294117647058824, 0, 0), (0.298039215686275, 0, 0), (\n 0.301960784313725, 0, 0), (0.305882352941176, 0, 0), (\n 0.309803921568627, 0, 0), (0.313725490196078, 0, 0), (\n 0.317647058823529, 0, 0), (0.32156862745098, 0, 0), (\n 0.325490196078431, 0, 0), (0.329411764705882, 0, 0), (\n 0.333333333333333, 0, 0), (0.337254901960784, 0, 0), (\n 0.341176470588235, 0, 0), (0.345098039215686, 0, 0), (\n 0.349019607843137, 0, 0), (0.352941176470588, 0.0061383984375, \n 0.0061383984375), (0.356862745098039, 0.012276796875, \n 0.012276796875), (0.36078431372549, 0.0184151953125, \n 0.0184151953125), (0.364705882352941, 0.0245535546875, \n 0.0245535546875), (0.368627450980392, 0.030691953125, \n 0.030691953125), (0.372549019607843, 0.0368303515625, \n 0.0368303515625), (0.376470588235294, 0.04296875, 0.04296875), (\n 0.380392156862745, 0.04910703125, 0.04910703125), (\n 0.384313725490196, 0.055245703125, 0.055245703125), (\n 0.388235294117647, 0.061383984375, 0.061383984375), (\n 0.392156862745098, 0.067522265625, 0.067522265625), (\n 0.396078431372549, 0.073660546875, 0.073660546875), (0.4, \n 0.07979921875, 0.07979921875), (0.403921568627451, 0.0859375, \n 0.0859375), (0.407843137254902, 0.09207578125, 0.09207578125), (\n 0.411764705882353, 0.098214453125, 0.098214453125), (\n 0.415686274509804, 0.104352734375, 0.104352734375), (\n 0.419607843137255, 0.110491015625, 0.110491015625), (\n 0.423529411764706, 0.116629296875, 0.116629296875), (\n 0.427450980392157, 0.12276796875, 0.12276796875), (\n 0.431372549019608, 0.12890625, 0.12890625), (0.435294117647059, \n 0.13504453125, 0.13504453125), (0.43921568627451, 0.141183203125, \n 0.141183203125), (0.443137254901961, 0.147321484375, 0.147321484375\n ), (0.447058823529412, 0.153459765625, 0.153459765625), (\n 0.450980392156863, 0.159598046875, 0.159598046875), (\n 0.454901960784314, 0.16573671875, 0.16573671875), (\n 0.458823529411765, 0.171875, 0.171875), (0.462745098039216, \n 0.17801328125, 0.17801328125), (0.466666666666667, 0.184151953125, \n 0.184151953125), (0.470588235294118, 0.190290234375, 0.190290234375\n ), (0.474509803921569, 0.196428515625, 0.196428515625), (\n 0.47843137254902, 0.202566796875, 0.202566796875), (\n 0.482352941176471, 0.20870546875, 0.20870546875), (\n 0.486274509803922, 0.21484375, 0.21484375), (0.490196078431373, \n 0.233370703125, 0.233370703125), (0.494117647058824, 0.251897265625,\n 0.251897265625), (0.498039215686275, 0.27042421875, 0.27042421875),\n (0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.86549609375, 0.86549609375), (0.631372549019608, 0.86770703125, \n 0.86770703125), (0.635294117647059, 0.869921875, 0.869921875), (\n 0.63921568627451, 0.87213671875, 0.87213671875), (0.643137254901961,\n 0.87434765625, 0.87434765625), (0.647058823529412, 0.8765625, \n 0.8765625), (0.650980392156863, 0.87877734375, 0.87877734375), (\n 0.654901960784314, 0.88098828125, 0.88098828125), (\n 0.658823529411765, 0.883203125, 0.883203125), (0.662745098039216, \n 0.88541796875, 0.88541796875), (0.666666666666667, 0.88762890625, \n 0.88762890625), (0.670588235294118, 0.88984375, 0.88984375), (\n 0.674509803921569, 0.89205859375, 0.89205859375), (0.67843137254902,\n 0.89426953125, 0.89426953125), (0.682352941176471, 0.896484375, \n 0.896484375), (0.686274509803922, 0.89869921875, 0.89869921875), (\n 0.690196078431373, 0.90091015625, 0.90091015625), (\n 0.694117647058824, 0.903125, 0.903125), (0.698039215686274, \n 0.90533984375, 0.90533984375), (0.701960784313725, 0.90755078125, \n 0.90755078125), (0.705882352941177, 0.909765625, 0.909765625), (\n 0.709803921568627, 0.91198046875, 0.91198046875), (\n 0.713725490196078, 0.91419140625, 0.91419140625), (\n 0.717647058823529, 0.91640625, 0.91640625), (0.72156862745098, \n 0.91862109375, 0.91862109375), (0.725490196078431, 0.92083203125, \n 0.92083203125), (0.729411764705882, 0.923046875, 0.923046875), (\n 0.733333333333333, 0.92526171875, 0.92526171875), (\n 0.737254901960784, 0.92747265625, 0.92747265625), (\n 0.741176470588235, 0.9296875, 0.9296875), (0.745098039215686, \n 0.93190234375, 0.93190234375), (0.749019607843137, 0.93411328125, \n 0.93411328125), (0.752941176470588, 0.936328125, 0.936328125), (\n 0.756862745098039, 0.93854296875, 0.93854296875), (0.76078431372549,\n 0.94075390625, 0.94075390625), (0.764705882352941, 0.94296875, \n 0.94296875), (0.768627450980392, 0.94518359375, 0.94518359375), (\n 0.772549019607843, 0.94739453125, 0.94739453125), (\n 0.776470588235294, 0.949609375, 0.949609375), (0.780392156862745, \n 0.95182421875, 0.95182421875), (0.784313725490196, 0.95403515625, \n 0.95403515625), (0.788235294117647, 0.95625, 0.95625), (\n 0.792156862745098, 0.95846484375, 0.95846484375), (\n 0.796078431372549, 0.96067578125, 0.96067578125), (0.8, 0.962890625,\n 0.962890625), (0.803921568627451, 0.96510546875, 0.96510546875), (\n 0.807843137254902, 0.96731640625, 0.96731640625), (\n 0.811764705882353, 0.96953125, 0.96953125), (0.815686274509804, \n 0.97174609375, 0.97174609375), (0.819607843137255, 0.97395703125, \n 0.97395703125), (0.823529411764706, 0.976171875, 0.976171875), (\n 0.827450980392157, 0.97838671875, 0.97838671875), (\n 0.831372549019608, 0.98059765625, 0.98059765625), (\n 0.835294117647059, 0.9828125, 0.9828125), (0.83921568627451, \n 0.98502734375, 0.98502734375), (0.843137254901961, 0.98723828125, \n 0.98723828125), (0.847058823529412, 0.989453125, 0.989453125), (\n 0.850980392156863, 0.99166796875, 0.99166796875), (\n 0.854901960784314, 0.99387890625, 0.99387890625), (\n 0.858823529411765, 0.99609375, 0.99609375), (0.862745098039216, \n 0.99609375, 0.99609375), (0.866666666666667, 0.99609375, 0.99609375\n ), (0.870588235294118, 0.99609375, 0.99609375), (0.874509803921569,\n 0.99609375, 0.99609375), (0.87843137254902, 0.99609375, 0.99609375),\n (0.882352941176471, 0.99609375, 0.99609375), (0.886274509803922, \n 0.99609375, 0.99609375), (0.890196078431373, 0.99609375, 0.99609375\n ), (0.894117647058824, 0.99609375, 0.99609375), (0.898039215686275,\n 0.99609375, 0.99609375), (0.901960784313726, 0.99609375, 0.99609375\n ), (0.905882352941176, 0.99609375, 0.99609375), (0.909803921568627,\n 0.99609375, 0.99609375), (0.913725490196078, 0.99609375, 0.99609375\n ), (0.917647058823529, 0.99609375, 0.99609375), (0.92156862745098, \n 0.99609375, 0.99609375), (0.925490196078431, 0.99609375, 0.99609375\n ), (0.929411764705882, 0.99609375, 0.99609375), (0.933333333333333,\n 0.99609375, 0.99609375), (0.937254901960784, 0.99609375, 0.99609375\n ), (0.941176470588235, 0.99609375, 0.99609375), (0.945098039215686,\n 0.99609375, 0.99609375), (0.949019607843137, 0.99609375, 0.99609375\n ), (0.952941176470588, 0.99609375, 0.99609375), (0.956862745098039,\n 0.99609375, 0.99609375), (0.96078431372549, 0.99609375, 0.99609375),\n (0.964705882352941, 0.99609375, 0.99609375), (0.968627450980392, \n 0.99609375, 0.99609375), (0.972549019607843, 0.99609375, 0.99609375\n ), (0.976470588235294, 0.99609375, 0.99609375), (0.980392156862745,\n 0.99609375, 0.99609375), (0.984313725490196, 0.99609375, 0.99609375\n ), (0.988235294117647, 0.99609375, 0.99609375), (0.992156862745098,\n 0.99609375, 0.99609375), (0.996078431372549, 0.99609375, 0.99609375\n ), (1, 0.99609375, 0.99609375)), 'green': ((0, 1, 1), (\n 0.00392156862745098, 0, 0), (0.00784313725490196, 0, 0), (\n 0.0117647058823529, 0, 0), (0.0156862745098039, 0, 0), (\n 0.0196078431372549, 0, 0), (0.0235294117647059, 0, 0), (\n 0.0274509803921569, 0, 0), (0.0313725490196078, 0, 0), (\n 0.0352941176470588, 0, 0), (0.0392156862745098, 0, 0), (\n 0.0431372549019608, 0, 0), (0.0470588235294118, 0, 0), (\n 0.0509803921568627, 0, 0), (0.0549019607843137, 0, 0), (\n 0.0588235294117647, 0, 0), (0.0627450980392157, 0, 0), (\n 0.0666666666666667, 0, 0), (0.0705882352941176, 0, 0), (\n 0.0745098039215686, 0, 0), (0.0784313725490196, 0, 0), (\n 0.0823529411764706, 0, 0), (0.0862745098039216, 0, 0), (\n 0.0901960784313725, 0, 0), (0.0941176470588235, 0, 0), (\n 0.0980392156862745, 0, 0), (0.101960784313725, 0, 0), (\n 0.105882352941176, 0, 0), (0.109803921568627, 0, 0), (\n 0.113725490196078, 0, 0), (0.117647058823529, 0, 0), (\n 0.12156862745098, 0, 0), (0.125490196078431, 0, 0), (\n 0.129411764705882, 0, 0), (0.133333333333333, 0, 0), (\n 0.137254901960784, 0.0135653515625, 0.0135653515625), (\n 0.141176470588235, 0.0271306640625, 0.0271306640625), (\n 0.145098039215686, 0.04069609375, 0.04069609375), (\n 0.149019607843137, 0.054261328125, 0.054261328125), (\n 0.152941176470588, 0.0678265625, 0.0678265625), (0.156862745098039,\n 0.0813921875, 0.0813921875), (0.16078431372549, 0.094957421875, \n 0.094957421875), (0.164705882352941, 0.10852265625, 0.10852265625),\n (0.168627450980392, 0.122087890625, 0.122087890625), (\n 0.172549019607843, 0.135653515625, 0.135653515625), (\n 0.176470588235294, 0.14921875, 0.14921875), (0.180392156862745, \n 0.162783984375, 0.162783984375), (0.184313725490196, 0.176349609375,\n 0.176349609375), (0.188235294117647, 0.18991484375, 0.18991484375),\n (0.192156862745098, 0.203480078125, 0.203480078125), (\n 0.196078431372549, 0.2170453125, 0.2170453125), (0.2, 0.2306109375,\n 0.2306109375), (0.203921568627451, 0.244176171875, 0.244176171875),\n (0.207843137254902, 0.25774140625, 0.25774140625), (\n 0.211764705882353, 0.27130703125, 0.27130703125), (\n 0.215686274509804, 0.284872265625, 0.284872265625), (\n 0.219607843137255, 0.2984375, 0.2984375), (0.223529411764706, \n 0.312002734375, 0.312002734375), (0.227450980392157, 0.325568359375,\n 0.325568359375), (0.231372549019608, 0.33913359375, 0.33913359375),\n (0.235294117647059, 0.352698828125, 0.352698828125), (\n 0.23921568627451, 0.3662640625, 0.3662640625), (0.243137254901961, \n 0.3798296875, 0.3798296875), (0.247058823529412, 0.39339453125, \n 0.39339453125), (0.250980392156863, 0.4069609375, 0.4069609375), (\n 0.254901960784314, 0.42052734375, 0.42052734375), (\n 0.258823529411765, 0.43408984375, 0.43408984375), (\n 0.262745098039216, 0.44765625, 0.44765625), (0.266666666666667, \n 0.46122265625, 0.46122265625), (0.270588235294118, 0.47478515625, \n 0.47478515625), (0.274509803921569, 0.4883515625, 0.4883515625), (\n 0.27843137254902, 0.50191796875, 0.50191796875), (0.282352941176471,\n 0.515484375, 0.515484375), (0.286274509803922, 0.529046875, \n 0.529046875), (0.290196078431373, 0.54261328125, 0.54261328125), (\n 0.294117647058824, 0.5561796875, 0.5561796875), (0.298039215686275,\n 0.56974609375, 0.56974609375), (0.301960784313725, 0.58330859375, \n 0.58330859375), (0.305882352941176, 0.596875, 0.596875), (\n 0.309803921568627, 0.61044140625, 0.61044140625), (\n 0.313725490196078, 0.62400390625, 0.62400390625), (\n 0.317647058823529, 0.6375703125, 0.6375703125), (0.32156862745098, \n 0.65113671875, 0.65113671875), (0.325490196078431, 0.664703125, \n 0.664703125), (0.329411764705882, 0.678265625, 0.678265625), (\n 0.333333333333333, 0.69183203125, 0.69183203125), (\n 0.337254901960784, 0.7053984375, 0.7053984375), (0.341176470588235,\n 0.71896484375, 0.71896484375), (0.345098039215686, 0.73252734375, \n 0.73252734375), (0.349019607843137, 0.74609375, 0.74609375), (\n 0.352941176470588, 0.7309140625, 0.7309140625), (0.356862745098039,\n 0.71573828125, 0.71573828125), (0.36078431372549, 0.70055859375, \n 0.70055859375), (0.364705882352941, 0.68537890625, 0.68537890625),\n (0.368627450980392, 0.67019921875, 0.67019921875), (\n 0.372549019607843, 0.6550234375, 0.6550234375), (0.376470588235294,\n 0.63984375, 0.63984375), (0.380392156862745, 0.6246640625, \n 0.6246640625), (0.384313725490196, 0.60948828125, 0.60948828125), (\n 0.388235294117647, 0.59430859375, 0.59430859375), (\n 0.392156862745098, 0.57912890625, 0.57912890625), (\n 0.396078431372549, 0.56394921875, 0.56394921875), (0.4, \n 0.5487734375, 0.5487734375), (0.403921568627451, 0.53359375, \n 0.53359375), (0.407843137254902, 0.5184140625, 0.5184140625), (\n 0.411764705882353, 0.50323828125, 0.50323828125), (\n 0.415686274509804, 0.48805859375, 0.48805859375), (\n 0.419607843137255, 0.47287890625, 0.47287890625), (\n 0.423529411764706, 0.45769921875, 0.45769921875), (\n 0.427450980392157, 0.4425234375, 0.4425234375), (0.431372549019608,\n 0.42734375, 0.42734375), (0.435294117647059, 0.4121640625, \n 0.4121640625), (0.43921568627451, 0.39698828125, 0.39698828125), (\n 0.443137254901961, 0.381808203125, 0.381808203125), (\n 0.447058823529412, 0.366629296875, 0.366629296875), (\n 0.450980392156863, 0.35145078125, 0.35145078125), (\n 0.454901960784314, 0.336272265625, 0.336272265625), (\n 0.458823529411765, 0.32109375, 0.32109375), (0.462745098039216, \n 0.305915234375, 0.305915234375), (0.466666666666667, 0.29073671875,\n 0.29073671875), (0.470588235294118, 0.2755578125, 0.2755578125), (\n 0.474509803921569, 0.260379296875, 0.260379296875), (\n 0.47843137254902, 0.24520078125, 0.24520078125), (0.482352941176471,\n 0.230022265625, 0.230022265625), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.2265625, 0.2265625), (\n 0.494117647058824, 0.23828125, 0.23828125), (0.498039215686275, \n 0.25, 0.25), (0.501960784313725, 0.26171875, 0.26171875), (\n 0.505882352941176, 0.2734375, 0.2734375), (0.509803921568627, \n 0.28515625, 0.28515625), (0.513725490196078, 0.296875, 0.296875), (\n 0.517647058823529, 0.30859375, 0.30859375), (0.52156862745098, \n 0.3203125, 0.3203125), (0.525490196078431, 0.33203125, 0.33203125),\n (0.529411764705882, 0.34375, 0.34375), (0.533333333333333, \n 0.35546875, 0.35546875), (0.537254901960784, 0.3671875, 0.3671875),\n (0.541176470588235, 0.37890625, 0.37890625), (0.545098039215686, \n 0.390625, 0.390625), (0.549019607843137, 0.40234375, 0.40234375), (\n 0.552941176470588, 0.4140625, 0.4140625), (0.556862745098039, \n 0.42578125, 0.42578125), (0.56078431372549, 0.4375, 0.4375), (\n 0.564705882352941, 0.44921875, 0.44921875), (0.568627450980392, \n 0.4609375, 0.4609375), (0.572549019607843, 0.47265625, 0.47265625),\n (0.576470588235294, 0.484375, 0.484375), (0.580392156862745, \n 0.49609375, 0.49609375), (0.584313725490196, 0.5078125, 0.5078125),\n (0.588235294117647, 0.51953125, 0.51953125), (0.592156862745098, \n 0.53125, 0.53125), (0.596078431372549, 0.54296875, 0.54296875), (\n 0.6, 0.5546875, 0.5546875), (0.603921568627451, 0.56640625, \n 0.56640625), (0.607843137254902, 0.578125, 0.578125), (\n 0.611764705882353, 0.58984375, 0.58984375), (0.615686274509804, \n 0.6015625, 0.6015625), (0.619607843137255, 0.61328125, 0.61328125),\n (0.623529411764706, 0.625, 0.625), (0.627450980392157, \n 0.61458203125, 0.61458203125), (0.631372549019608, 0.60416796875, \n 0.60416796875), (0.635294117647059, 0.59375, 0.59375), (\n 0.63921568627451, 0.58333203125, 0.58333203125), (0.643137254901961,\n 0.57291796875, 0.57291796875), (0.647058823529412, 0.5625, 0.5625),\n (0.650980392156863, 0.55208203125, 0.55208203125), (\n 0.654901960784314, 0.54166796875, 0.54166796875), (\n 0.658823529411765, 0.53125, 0.53125), (0.662745098039216, \n 0.52083203125, 0.52083203125), (0.666666666666667, 0.51041796875, \n 0.51041796875), (0.670588235294118, 0.5, 0.5), (0.674509803921569, \n 0.48958203125, 0.48958203125), (0.67843137254902, 0.47916796875, \n 0.47916796875), (0.682352941176471, 0.46875, 0.46875), (\n 0.686274509803922, 0.45833203125, 0.45833203125), (\n 0.690196078431373, 0.44791796875, 0.44791796875), (\n 0.694117647058824, 0.4375, 0.4375), (0.698039215686274, \n 0.42708203125, 0.42708203125), (0.701960784313725, 0.41666796875, \n 0.41666796875), (0.705882352941177, 0.40625, 0.40625), (\n 0.709803921568627, 0.39583203125, 0.39583203125), (\n 0.713725490196078, 0.385416796875, 0.385416796875), (\n 0.717647058823529, 0.375, 0.375), (0.72156862745098, 0.364583203125,\n 0.364583203125), (0.725490196078431, 0.354166796875, 0.354166796875\n ), (0.729411764705882, 0.34375, 0.34375), (0.733333333333333, \n 0.333333203125, 0.333333203125), (0.737254901960784, 0.322916796875,\n 0.322916796875), (0.741176470588235, 0.3125, 0.3125), (\n 0.745098039215686, 0.302083203125, 0.302083203125), (\n 0.749019607843137, 0.291666796875, 0.291666796875), (\n 0.752941176470588, 0.28125, 0.28125), (0.756862745098039, \n 0.270833203125, 0.270833203125), (0.76078431372549, 0.260416796875,\n 0.260416796875), (0.764705882352941, 0.25, 0.25), (\n 0.768627450980392, 0.239583203125, 0.239583203125), (\n 0.772549019607843, 0.229166796875, 0.229166796875), (\n 0.776470588235294, 0.21875, 0.21875), (0.780392156862745, \n 0.208333203125, 0.208333203125), (0.784313725490196, 0.197916796875,\n 0.197916796875), (0.788235294117647, 0.1875, 0.1875), (\n 0.792156862745098, 0.177083203125, 0.177083203125), (\n 0.796078431372549, 0.166666796875, 0.166666796875), (0.8, 0.15625, \n 0.15625), (0.803921568627451, 0.145833203125, 0.145833203125), (\n 0.807843137254902, 0.135416796875, 0.135416796875), (\n 0.811764705882353, 0.125, 0.125), (0.815686274509804, \n 0.114583203125, 0.114583203125), (0.819607843137255, 0.104166796875,\n 0.104166796875), (0.823529411764706, 0.09375, 0.09375), (\n 0.827450980392157, 0.083333203125, 0.083333203125), (\n 0.831372549019608, 0.072916796875, 0.072916796875), (\n 0.835294117647059, 0.0625, 0.0625), (0.83921568627451, \n 0.052083203125, 0.052083203125), (0.843137254901961, 0.041666796875,\n 0.041666796875), (0.847058823529412, 0.03125, 0.03125), (\n 0.850980392156863, 0.0208333203125, 0.0208333203125), (\n 0.854901960784314, 0.0104166796875, 0.0104166796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0.0184151953125, \n 0.0184151953125), (0.866666666666667, 0.0368303515625, \n 0.0368303515625), (0.870588235294118, 0.055245703125, \n 0.055245703125), (0.874509803921569, 0.073660546875, 0.073660546875\n ), (0.87843137254902, 0.09207578125, 0.09207578125), (\n 0.882352941176471, 0.110491015625, 0.110491015625), (\n 0.886274509803922, 0.12890625, 0.12890625), (0.890196078431373, \n 0.147321484375, 0.147321484375), (0.894117647058824, 0.16573671875,\n 0.16573671875), (0.898039215686275, 0.184151953125, 0.184151953125),\n (0.901960784313726, 0.202566796875, 0.202566796875), (\n 0.905882352941176, 0.22098203125, 0.22098203125), (\n 0.909803921568627, 0.239397265625, 0.239397265625), (\n 0.913725490196078, 0.2578125, 0.2578125), (0.917647058823529, \n 0.276227734375, 0.276227734375), (0.92156862745098, 0.29464296875, \n 0.29464296875), (0.925490196078431, 0.313058203125, 0.313058203125),\n (0.929411764705882, 0.331473046875, 0.331473046875), (\n 0.933333333333333, 0.34988828125, 0.34988828125), (\n 0.937254901960784, 0.368303515625, 0.368303515625), (\n 0.941176470588235, 0.38671875, 0.38671875), (0.945098039215686, \n 0.4051328125, 0.4051328125), (0.949019607843137, 0.42355078125, \n 0.42355078125), (0.952941176470588, 0.44196484375, 0.44196484375),\n (0.956862745098039, 0.46037890625, 0.46037890625), (\n 0.96078431372549, 0.47879296875, 0.47879296875), (0.964705882352941,\n 0.4972109375, 0.4972109375), (0.968627450980392, 0.515625, 0.515625\n ), (0.972549019607843, 0.5340390625, 0.5340390625), (\n 0.976470588235294, 0.55245703125, 0.55245703125), (\n 0.980392156862745, 0.57087109375, 0.57087109375), (\n 0.984313725490196, 0.58928515625, 0.58928515625), (\n 0.988235294117647, 0.60769921875, 0.60769921875), (\n 0.992156862745098, 0.6261171875, 0.6261171875), (0.996078431372549,\n 0.64453125, 0.64453125), (1, 0.64453125, 0.64453125)), 'blue': ((0,\n 1, 1), (0.00392156862745098, 0.80569140625, 0.80569140625), (\n 0.00784313725490196, 0.7964296875, 0.7964296875), (\n 0.0117647058823529, 0.7871640625, 0.7871640625), (\n 0.0156862745098039, 0.77790234375, 0.77790234375), (\n 0.0196078431372549, 0.76863671875, 0.76863671875), (\n 0.0235294117647059, 0.759375, 0.759375), (0.0274509803921569, \n 0.75011328125, 0.75011328125), (0.0313725490196078, 0.74084765625, \n 0.74084765625), (0.0352941176470588, 0.7315859375, 0.7315859375), (\n 0.0392156862745098, 0.7223203125, 0.7223203125), (\n 0.0431372549019608, 0.71305859375, 0.71305859375), (\n 0.0470588235294118, 0.70379296875, 0.70379296875), (\n 0.0509803921568627, 0.69453125, 0.69453125), (0.0549019607843137, \n 0.68526953125, 0.68526953125), (0.0588235294117647, 0.67600390625, \n 0.67600390625), (0.0627450980392157, 0.6667421875, 0.6667421875), (\n 0.0666666666666667, 0.6574765625, 0.6574765625), (\n 0.0705882352941176, 0.64821484375, 0.64821484375), (\n 0.0745098039215686, 0.63894921875, 0.63894921875), (\n 0.0784313725490196, 0.6296875, 0.6296875), (0.0823529411764706, \n 0.62042578125, 0.62042578125), (0.0862745098039216, 0.61116015625, \n 0.61116015625), (0.0901960784313725, 0.6018984375, 0.6018984375), (\n 0.0941176470588235, 0.5926328125, 0.5926328125), (\n 0.0980392156862745, 0.58337109375, 0.58337109375), (\n 0.101960784313725, 0.57410546875, 0.57410546875), (\n 0.105882352941176, 0.56484375, 0.56484375), (0.109803921568627, \n 0.55558203125, 0.55558203125), (0.113725490196078, 0.54631640625, \n 0.54631640625), (0.117647058823529, 0.5370546875, 0.5370546875), (\n 0.12156862745098, 0.5277890625, 0.5277890625), (0.125490196078431, \n 0.51852734375, 0.51852734375), (0.129411764705882, 0.50926171875, \n 0.50926171875), (0.133333333333333, 0.5, 0.5), (0.137254901960784, \n 0.50901953125, 0.50901953125), (0.141176470588235, 0.5180390625, \n 0.5180390625), (0.145098039215686, 0.52705859375, 0.52705859375), (\n 0.149019607843137, 0.536078125, 0.536078125), (0.152941176470588, \n 0.54509765625, 0.54509765625), (0.156862745098039, 0.55412109375, \n 0.55412109375), (0.16078431372549, 0.563140625, 0.563140625), (\n 0.164705882352941, 0.57216015625, 0.57216015625), (\n 0.168627450980392, 0.5811796875, 0.5811796875), (0.172549019607843,\n 0.59019921875, 0.59019921875), (0.176470588235294, 0.59921875, \n 0.59921875), (0.180392156862745, 0.60823828125, 0.60823828125), (\n 0.184313725490196, 0.6172578125, 0.6172578125), (0.188235294117647,\n 0.62627734375, 0.62627734375), (0.192156862745098, 0.635296875, \n 0.635296875), (0.196078431372549, 0.64431640625, 0.64431640625), (\n 0.2, 0.65333984375, 0.65333984375), (0.203921568627451, 0.662359375,\n 0.662359375), (0.207843137254902, 0.67137890625, 0.67137890625), (\n 0.211764705882353, 0.6803984375, 0.6803984375), (0.215686274509804,\n 0.68941796875, 0.68941796875), (0.219607843137255, 0.6984375, \n 0.6984375), (0.223529411764706, 0.70745703125, 0.70745703125), (\n 0.227450980392157, 0.7164765625, 0.7164765625), (0.231372549019608,\n 0.72549609375, 0.72549609375), (0.235294117647059, 0.734515625, \n 0.734515625), (0.23921568627451, 0.74353515625, 0.74353515625), (\n 0.243137254901961, 0.75255859375, 0.75255859375), (\n 0.247058823529412, 0.761578125, 0.761578125), (0.250980392156863, \n 0.77059765625, 0.77059765625), (0.254901960784314, 0.7796171875, \n 0.7796171875), (0.258823529411765, 0.78863671875, 0.78863671875), (\n 0.262745098039216, 0.79765625, 0.79765625), (0.266666666666667, \n 0.80667578125, 0.80667578125), (0.270588235294118, 0.8156953125, \n 0.8156953125), (0.274509803921569, 0.82471484375, 0.82471484375), (\n 0.27843137254902, 0.833734375, 0.833734375), (0.282352941176471, \n 0.84275390625, 0.84275390625), (0.286274509803922, 0.85177734375, \n 0.85177734375), (0.290196078431373, 0.860796875, 0.860796875), (\n 0.294117647058824, 0.86981640625, 0.86981640625), (\n 0.298039215686275, 0.8788359375, 0.8788359375), (0.301960784313725,\n 0.88785546875, 0.88785546875), (0.305882352941176, 0.896875, \n 0.896875), (0.309803921568627, 0.90589453125, 0.90589453125), (\n 0.313725490196078, 0.9149140625, 0.9149140625), (0.317647058823529,\n 0.92393359375, 0.92393359375), (0.32156862745098, 0.932953125, \n 0.932953125), (0.325490196078431, 0.94197265625, 0.94197265625), (\n 0.329411764705882, 0.95099609375, 0.95099609375), (\n 0.333333333333333, 0.960015625, 0.960015625), (0.337254901960784, \n 0.96903515625, 0.96903515625), (0.341176470588235, 0.9780546875, \n 0.9780546875), (0.345098039215686, 0.98707421875, 0.98707421875), (\n 0.349019607843137, 0.99609375, 0.99609375), (0.352941176470588, \n 0.9737734375, 0.9737734375), (0.356862745098039, 0.95144921875, \n 0.95144921875), (0.36078431372549, 0.92912890625, 0.92912890625), (\n 0.364705882352941, 0.90680859375, 0.90680859375), (\n 0.368627450980392, 0.88448828125, 0.88448828125), (\n 0.372549019607843, 0.8621640625, 0.8621640625), (0.376470588235294,\n 0.83984375, 0.83984375), (0.380392156862745, 0.8175234375, \n 0.8175234375), (0.384313725490196, 0.79519921875, 0.79519921875), (\n 0.388235294117647, 0.77287890625, 0.77287890625), (\n 0.392156862745098, 0.75055859375, 0.75055859375), (\n 0.396078431372549, 0.72823828125, 0.72823828125), (0.4, \n 0.7059140625, 0.7059140625), (0.403921568627451, 0.68359375, \n 0.68359375), (0.407843137254902, 0.6612734375, 0.6612734375), (\n 0.411764705882353, 0.63894921875, 0.63894921875), (\n 0.415686274509804, 0.61662890625, 0.61662890625), (\n 0.419607843137255, 0.59430859375, 0.59430859375), (\n 0.423529411764706, 0.57198828125, 0.57198828125), (\n 0.427450980392157, 0.5496640625, 0.5496640625), (0.431372549019608,\n 0.52734375, 0.52734375), (0.435294117647059, 0.5050234375, \n 0.5050234375), (0.43921568627451, 0.48269921875, 0.48269921875), (\n 0.443137254901961, 0.46037890625, 0.46037890625), (\n 0.447058823529412, 0.43805859375, 0.43805859375), (\n 0.450980392156863, 0.41573828125, 0.41573828125), (\n 0.454901960784314, 0.3934140625, 0.3934140625), (0.458823529411765,\n 0.37109375, 0.37109375), (0.462745098039216, 0.348772265625, \n 0.348772265625), (0.466666666666667, 0.32645078125, 0.32645078125),\n (0.470588235294118, 0.304129296875, 0.304129296875), (\n 0.474509803921569, 0.281808203125, 0.281808203125), (\n 0.47843137254902, 0.25948671875, 0.25948671875), (0.482352941176471,\n 0.237165234375, 0.237165234375), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.233370703125, 0.233370703125), (\n 0.494117647058824, 0.251897265625, 0.251897265625), (\n 0.498039215686275, 0.27042421875, 0.27042421875), (\n 0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.84889453125, 0.84889453125), (0.631372549019608, 0.83450390625, \n 0.83450390625), (0.635294117647059, 0.8201171875, 0.8201171875), (\n 0.63921568627451, 0.80573046875, 0.80573046875), (0.643137254901961,\n 0.79133984375, 0.79133984375), (0.647058823529412, 0.776953125, \n 0.776953125), (0.650980392156863, 0.76256640625, 0.76256640625), (\n 0.654901960784314, 0.74817578125, 0.74817578125), (\n 0.658823529411765, 0.7337890625, 0.7337890625), (0.662745098039216,\n 0.71940234375, 0.71940234375), (0.666666666666667, 0.70501171875, \n 0.70501171875), (0.670588235294118, 0.690625, 0.690625), (\n 0.674509803921569, 0.67623828125, 0.67623828125), (0.67843137254902,\n 0.66184765625, 0.66184765625), (0.682352941176471, 0.6474609375, \n 0.6474609375), (0.686274509803922, 0.63307421875, 0.63307421875), (\n 0.690196078431373, 0.61868359375, 0.61868359375), (\n 0.694117647058824, 0.604296875, 0.604296875), (0.698039215686274, \n 0.58991015625, 0.58991015625), (0.701960784313725, 0.57551953125, \n 0.57551953125), (0.705882352941177, 0.5611328125, 0.5611328125), (\n 0.709803921568627, 0.54674609375, 0.54674609375), (\n 0.713725490196078, 0.53235546875, 0.53235546875), (\n 0.717647058823529, 0.51796875, 0.51796875), (0.72156862745098, \n 0.50358203125, 0.50358203125), (0.725490196078431, 0.48919140625, \n 0.48919140625), (0.729411764705882, 0.4748046875, 0.4748046875), (\n 0.733333333333333, 0.46041796875, 0.46041796875), (\n 0.737254901960784, 0.44602734375, 0.44602734375), (\n 0.741176470588235, 0.431640625, 0.431640625), (0.745098039215686, \n 0.41725390625, 0.41725390625), (0.749019607843137, 0.40286328125, \n 0.40286328125), (0.752941176470588, 0.3884765625, 0.3884765625), (\n 0.756862745098039, 0.374088671875, 0.374088671875), (\n 0.76078431372549, 0.359700390625, 0.359700390625), (\n 0.764705882352941, 0.3453125, 0.3453125), (0.768627450980392, \n 0.330924609375, 0.330924609375), (0.772549019607843, 0.316536328125,\n 0.316536328125), (0.776470588235294, 0.3021484375, 0.3021484375), (\n 0.780392156862745, 0.287760546875, 0.287760546875), (\n 0.784313725490196, 0.273372265625, 0.273372265625), (\n 0.788235294117647, 0.258984375, 0.258984375), (0.792156862745098, \n 0.244596484375, 0.244596484375), (0.796078431372549, 0.230208203125,\n 0.230208203125), (0.8, 0.2158203125, 0.2158203125), (\n 0.803921568627451, 0.201432421875, 0.201432421875), (\n 0.807843137254902, 0.187044140625, 0.187044140625), (\n 0.811764705882353, 0.17265625, 0.17265625), (0.815686274509804, \n 0.158268359375, 0.158268359375), (0.819607843137255, 0.143880078125,\n 0.143880078125), (0.823529411764706, 0.1294921875, 0.1294921875), (\n 0.827450980392157, 0.115104296875, 0.115104296875), (\n 0.831372549019608, 0.100716015625, 0.100716015625), (\n 0.835294117647059, 0.086328125, 0.086328125), (0.83921568627451, \n 0.071940234375, 0.071940234375), (0.843137254901961, 0.057551953125,\n 0.057551953125), (0.847058823529412, 0.0431640625, 0.0431640625), (\n 0.850980392156863, 0.028776015625, 0.028776015625), (\n 0.854901960784314, 0.01438796875, 0.01438796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0, 0), (\n 0.866666666666667, 0, 0), (0.870588235294118, 0, 0), (\n 0.874509803921569, 0, 0), (0.87843137254902, 0, 0), (\n 0.882352941176471, 0, 0), (0.886274509803922, 0, 0), (\n 0.890196078431373, 0, 0), (0.894117647058824, 0, 0), (\n 0.898039215686275, 0, 0), (0.901960784313726, 0, 0), (\n 0.905882352941176, 0, 0), (0.909803921568627, 0, 0), (\n 0.913725490196078, 0, 0), (0.917647058823529, 0, 0), (\n 0.92156862745098, 0, 0), (0.925490196078431, 0, 0), (\n 0.929411764705882, 0, 0), (0.933333333333333, 0, 0), (\n 0.937254901960784, 0, 0), (0.941176470588235, 0, 0), (\n 0.945098039215686, 0, 0), (0.949019607843137, 0, 0), (\n 0.952941176470588, 0, 0), (0.956862745098039, 0, 0), (\n 0.96078431372549, 0, 0), (0.964705882352941, 0, 0), (\n 0.968627450980392, 0, 0), (0.972549019607843, 0, 0), (\n 0.976470588235294, 0, 0), (0.980392156862745, 0, 0), (\n 0.984313725490196, 0, 0), (0.988235294117647, 0, 0), (\n 0.992156862745098, 0, 0), (0.996078431372549, 0, 0), (1, 0, 0))}\n califa = mcol.LinearSegmentedColormap('califa', cdict)\n vcalifa = mcol.LinearSegmentedColormap('vcalifa', vcdict)\n if option == 'v':\n return vcalifa\n else:\n return califa\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\ndef O3O1_line_c(x):\n val = 0.05612915 / (x + 0.39641533) + 0.60969495\n return val\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\ndef color_map_califa(option='v'):\n cdict = {'red': ((0.0, 0, 0), (0.00392156862745098, 0, 0), (\n 0.00784313725490196, 0, 0), (0.0117647058823529, 0, 0), (\n 0.0156862745098039, 0, 0), (0.0196078431372549, 0, 0), (\n 0.0235294117647059, 0, 0), (0.0274509803921569, 0, 0), (\n 0.0313725490196078, 0, 0), (0.0352941176470588, 0, 0), (\n 0.0392156862745098, 0, 0), (0.0431372549019608, 0, 0), (\n 0.0470588235294118, 0, 0), (0.0509803921568627, 0, 0), (\n 0.0549019607843137, 0, 0), (0.0588235294117647, 0, 0), (\n 0.0627450980392157, 0, 0), (0.0666666666666667, 0, 0), (\n 0.0705882352941176, 0, 0), (0.0745098039215686, 0, 0), (\n 0.0784313725490196, 0, 0), (0.0823529411764706, 0, 0), (\n 0.0862745098039216, 0, 0), (0.0901960784313725, 0, 0), (\n 0.0941176470588235, 0, 0), (0.0980392156862745, 0, 0), (\n 0.101960784313725, 0, 0), (0.105882352941176, 0, 0), (\n 0.109803921568627, 0, 0), (0.113725490196078, 0, 0), (\n 0.117647058823529, 0, 0), (0.12156862745098, 0, 0), (\n 0.125490196078431, 0, 0), (0.129411764705882, 0, 0), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0.019921875, 0.019921875), (0.2, 0.03984375, \n 0.03984375), (0.203921568627451, 0.059765625, 0.059765625), (\n 0.207843137254902, 0.0796875, 0.0796875), (0.211764705882353, \n 0.099609375, 0.099609375), (0.215686274509804, 0.11953125, \n 0.11953125), (0.219607843137255, 0.139453125, 0.139453125), (\n 0.223529411764706, 0.159375, 0.159375), (0.227450980392157, \n 0.179296875, 0.179296875), (0.231372549019608, 0.19921875, \n 0.19921875), (0.235294117647059, 0.219140625, 0.219140625), (\n 0.23921568627451, 0.2390625, 0.2390625), (0.243137254901961, \n 0.258984375, 0.258984375), (0.247058823529412, 0.27890625, \n 0.27890625), (0.250980392156863, 0.298828125, 0.298828125), (\n 0.254901960784314, 0.31875, 0.31875), (0.258823529411765, \n 0.338671875, 0.338671875), (0.262745098039216, 0.35859375, \n 0.35859375), (0.266666666666667, 0.378515625, 0.378515625), (\n 0.270588235294118, 0.3984375, 0.3984375), (0.274509803921569, \n 0.418359375, 0.418359375), (0.27843137254902, 0.43828125, \n 0.43828125), (0.282352941176471, 0.458203125, 0.458203125), (\n 0.286274509803922, 0.478125, 0.478125), (0.290196078431373, \n 0.498046875, 0.498046875), (0.294117647058824, 0.51796875, \n 0.51796875), (0.298039215686275, 0.537890625, 0.537890625), (\n 0.301960784313725, 0.5578125, 0.5578125), (0.305882352941176, \n 0.577734375, 0.577734375), (0.309803921568627, 0.59765625, \n 0.59765625), (0.313725490196078, 0.617578125, 0.617578125), (\n 0.317647058823529, 0.6375, 0.6375), (0.32156862745098, 0.657421875,\n 0.657421875), (0.325490196078431, 0.67734375, 0.67734375), (\n 0.329411764705882, 0.697265625, 0.697265625), (0.333333333333333, \n 0.7171875, 0.7171875), (0.337254901960784, 0.737109375, 0.737109375\n ), (0.341176470588235, 0.75703125, 0.75703125), (0.345098039215686,\n 0.776953125, 0.776953125), (0.349019607843137, 0.796875, 0.796875),\n (0.352941176470588, 0.816796875, 0.816796875), (0.356862745098039, \n 0.83671875, 0.83671875), (0.36078431372549, 0.856640625, \n 0.856640625), (0.364705882352941, 0.8765625, 0.8765625), (\n 0.368627450980392, 0.896484375, 0.896484375), (0.372549019607843, \n 0.91640625, 0.91640625), (0.376470588235294, 0.936328125, \n 0.936328125), (0.380392156862745, 0.95625, 0.95625), (\n 0.384313725490196, 0.976171875, 0.976171875), (0.388235294117647, \n 0.99609375, 0.99609375), (0.392156862745098, 0.99609375, 0.99609375\n ), (0.396078431372549, 0.99609375, 0.99609375), (0.4, 0.99609375, \n 0.99609375), (0.403921568627451, 0.99609375, 0.99609375), (\n 0.407843137254902, 0.99609375, 0.99609375), (0.411764705882353, \n 0.99609375, 0.99609375), (0.415686274509804, 0.99609375, 0.99609375\n ), (0.419607843137255, 0.99609375, 0.99609375), (0.423529411764706,\n 0.99609375, 0.99609375), (0.427450980392157, 0.99609375, 0.99609375\n ), (0.431372549019608, 0.99609375, 0.99609375), (0.435294117647059,\n 0.99609375, 0.99609375), (0.43921568627451, 0.99609375, 0.99609375),\n (0.443137254901961, 0.99609375, 0.99609375), (0.447058823529412, \n 0.99609375, 0.99609375), (0.450980392156863, 0.99609375, 0.99609375\n ), (0.454901960784314, 0.99609375, 0.99609375), (0.458823529411765,\n 0.99609375, 0.99609375), (0.462745098039216, 0.99609375, 0.99609375\n ), (0.466666666666667, 0.99609375, 0.99609375), (0.470588235294118,\n 0.99609375, 0.99609375), (0.474509803921569, 0.99609375, 0.99609375\n ), (0.47843137254902, 0.99609375, 0.99609375), (0.482352941176471, \n 0.99609375, 0.99609375), (0.486274509803922, 0.99609375, 0.99609375\n ), (0.490196078431373, 0.99609375, 0.99609375), (0.494117647058824,\n 0.99609375, 0.99609375), (0.498039215686275, 0.99609375, 0.99609375\n ), (0.501960784313725, 0.99609375, 0.99609375), (0.505882352941176,\n 0.99609375, 0.99609375), (0.509803921568627, 0.99609375, 0.99609375\n ), (0.513725490196078, 0.99609375, 0.99609375), (0.517647058823529,\n 0.99609375, 0.99609375), (0.52156862745098, 0.99609375, 0.99609375),\n (0.525490196078431, 0.99609375, 0.99609375), (0.529411764705882, \n 0.99609375, 0.99609375), (0.533333333333333, 0.99609375, 0.99609375\n ), (0.537254901960784, 0.99609375, 0.99609375), (0.541176470588235,\n 0.99609375, 0.99609375), (0.545098039215686, 0.99609375, 0.99609375\n ), (0.549019607843137, 0.99609375, 0.99609375), (0.552941176470588,\n 0.99609375, 0.99609375), (0.556862745098039, 0.99609375, 0.99609375\n ), (0.56078431372549, 0.99609375, 0.99609375), (0.564705882352941, \n 0.99609375, 0.99609375), (0.568627450980392, 0.99609375, 0.99609375\n ), (0.572549019607843, 0.99609375, 0.99609375), (0.576470588235294,\n 0.99609375, 0.99609375), (0.580392156862745, 0.99609375, 0.99609375\n ), (0.584313725490196, 0.99609375, 0.99609375), (0.588235294117647,\n 0.98046875, 0.98046875), (0.592156862745098, 0.96484375, 0.96484375\n ), (0.596078431372549, 0.94921875, 0.94921875), (0.6, 0.93359375, \n 0.93359375), (0.603921568627451, 0.91796875, 0.91796875), (\n 0.607843137254902, 0.90234375, 0.90234375), (0.611764705882353, \n 0.88671875, 0.88671875), (0.615686274509804, 0.87109375, 0.87109375\n ), (0.619607843137255, 0.85546875, 0.85546875), (0.623529411764706,\n 0.83984375, 0.83984375), (0.627450980392157, 0.82421875, 0.82421875\n ), (0.631372549019608, 0.80859375, 0.80859375), (0.635294117647059,\n 0.79296875, 0.79296875), (0.63921568627451, 0.77734375, 0.77734375),\n (0.643137254901961, 0.76171875, 0.76171875), (0.647058823529412, \n 0.74609375, 0.74609375), (0.650980392156863, 0.73046875, 0.73046875\n ), (0.654901960784314, 0.71484375, 0.71484375), (0.658823529411765,\n 0.69921875, 0.69921875), (0.662745098039216, 0.68359375, 0.68359375\n ), (0.666666666666667, 0.66796875, 0.66796875), (0.670588235294118,\n 0.65234375, 0.65234375), (0.674509803921569, 0.63671875, 0.63671875\n ), (0.67843137254902, 0.62109375, 0.62109375), (0.682352941176471, \n 0.60546875, 0.60546875), (0.686274509803922, 0.58984375, 0.58984375\n ), (0.690196078431373, 0.57421875, 0.57421875), (0.694117647058824,\n 0.55859375, 0.55859375), (0.698039215686274, 0.54296875, 0.54296875\n ), (0.701960784313725, 0.52734375, 0.52734375), (0.705882352941177,\n 0.51171875, 0.51171875), (0.709803921568627, 0.49609375, 0.49609375\n ), (0.713725490196078, 0.48046875, 0.48046875), (0.717647058823529,\n 0.46484375, 0.46484375), (0.72156862745098, 0.44921875, 0.44921875),\n (0.725490196078431, 0.43359375, 0.43359375), (0.729411764705882, \n 0.41796875, 0.41796875), (0.733333333333333, 0.40234375, 0.40234375\n ), (0.737254901960784, 0.38671875, 0.38671875), (0.741176470588235,\n 0.37109375, 0.37109375), (0.745098039215686, 0.35546875, 0.35546875\n ), (0.749019607843137, 0.33984375, 0.33984375), (0.752941176470588,\n 0.32421875, 0.32421875), (0.756862745098039, 0.30859375, 0.30859375\n ), (0.76078431372549, 0.29296875, 0.29296875), (0.764705882352941, \n 0.27734375, 0.27734375), (0.768627450980392, 0.26171875, 0.26171875\n ), (0.772549019607843, 0.24609375, 0.24609375), (0.776470588235294,\n 0.23046875, 0.23046875), (0.780392156862745, 0.21484375, 0.21484375\n ), (0.784313725490196, 0.22663359375, 0.22663359375), (\n 0.788235294117647, 0.2384234375, 0.2384234375), (0.792156862745098,\n 0.250212890625, 0.250212890625), (0.796078431372549, 0.262002734375,\n 0.262002734375), (0.8, 0.273792578125, 0.273792578125), (\n 0.803921568627451, 0.285582421875, 0.285582421875), (\n 0.807843137254902, 0.297372265625, 0.297372265625), (\n 0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125)), 'green': ((0.0, \n 0.02984375, 0.02984375), (0.00392156862745098, 0.02984375, \n 0.02984375), (0.00784313725490196, 0.044765625, 0.044765625), (\n 0.0117647058823529, 0.0596875, 0.0596875), (0.0156862745098039, \n 0.074609375, 0.074609375), (0.0196078431372549, 0.08953125, \n 0.08953125), (0.0235294117647059, 0.104453125, 0.104453125), (\n 0.0274509803921569, 0.119375, 0.119375), (0.0313725490196078, \n 0.134296875, 0.134296875), (0.0352941176470588, 0.14921875, \n 0.14921875), (0.0392156862745098, 0.164140625, 0.164140625), (\n 0.0431372549019608, 0.1790625, 0.1790625), (0.0470588235294118, \n 0.193984375, 0.193984375), (0.0509803921568627, 0.20890625, \n 0.20890625), (0.0549019607843137, 0.223828125, 0.223828125), (\n 0.0588235294117647, 0.23875, 0.23875), (0.0627450980392157, \n 0.253671875, 0.253671875), (0.0666666666666667, 0.26859375, \n 0.26859375), (0.0705882352941176, 0.283515625, 0.283515625), (\n 0.0745098039215686, 0.2984375, 0.2984375), (0.0784313725490196, \n 0.313359375, 0.313359375), (0.0823529411764706, 0.32828125, \n 0.32828125), (0.0862745098039216, 0.343203125, 0.343203125), (\n 0.0901960784313725, 0.358125, 0.358125), (0.0941176470588235, \n 0.373046875, 0.373046875), (0.0980392156862745, 0.38796875, \n 0.38796875), (0.101960784313725, 0.402890625, 0.402890625), (\n 0.105882352941176, 0.4178125, 0.4178125), (0.109803921568627, \n 0.432734375, 0.432734375), (0.113725490196078, 0.44765625, \n 0.44765625), (0.117647058823529, 0.462578125, 0.462578125), (\n 0.12156862745098, 0.4775, 0.4775), (0.125490196078431, 0.492421875,\n 0.492421875), (0.129411764705882, 0.50734375, 0.50734375), (\n 0.133333333333333, 0.522265625, 0.522265625), (0.137254901960784, \n 0.5371875, 0.5371875), (0.141176470588235, 0.552109375, 0.552109375\n ), (0.145098039215686, 0.56703125, 0.56703125), (0.149019607843137,\n 0.581953125, 0.581953125), (0.152941176470588, 0.596875, 0.596875),\n (0.156862745098039, 0.611796875, 0.611796875), (0.16078431372549, \n 0.62671875, 0.62671875), (0.164705882352941, 0.641640625, \n 0.641640625), (0.168627450980392, 0.6565625, 0.6565625), (\n 0.172549019607843, 0.671484375, 0.671484375), (0.176470588235294, \n 0.68640625, 0.68640625), (0.180392156862745, 0.701328125, \n 0.701328125), (0.184313725490196, 0.71625, 0.71625), (\n 0.188235294117647, 0.731171875, 0.731171875), (0.192156862745098, \n 0.74609375, 0.74609375), (0.196078431372549, 0.731171875, \n 0.731171875), (0.2, 0.71625, 0.71625), (0.203921568627451, \n 0.701328125, 0.701328125), (0.207843137254902, 0.68640625, \n 0.68640625), (0.211764705882353, 0.671484375, 0.671484375), (\n 0.215686274509804, 0.6565625, 0.6565625), (0.219607843137255, \n 0.641640625, 0.641640625), (0.223529411764706, 0.62671875, \n 0.62671875), (0.227450980392157, 0.611796875, 0.611796875), (\n 0.231372549019608, 0.596875, 0.596875), (0.235294117647059, \n 0.581953125, 0.581953125), (0.23921568627451, 0.56703125, \n 0.56703125), (0.243137254901961, 0.552109375, 0.552109375), (\n 0.247058823529412, 0.5371875, 0.5371875), (0.250980392156863, \n 0.522265625, 0.522265625), (0.254901960784314, 0.50734375, \n 0.50734375), (0.258823529411765, 0.492421875, 0.492421875), (\n 0.262745098039216, 0.4775, 0.4775), (0.266666666666667, 0.462578125,\n 0.462578125), (0.270588235294118, 0.44765625, 0.44765625), (\n 0.274509803921569, 0.432734375, 0.432734375), (0.27843137254902, \n 0.4178125, 0.4178125), (0.282352941176471, 0.402890625, 0.402890625\n ), (0.286274509803922, 0.38796875, 0.38796875), (0.290196078431373,\n 0.373046875, 0.373046875), (0.294117647058824, 0.358125, 0.358125),\n (0.298039215686275, 0.343203125, 0.343203125), (0.301960784313725, \n 0.32828125, 0.32828125), (0.305882352941176, 0.313359375, \n 0.313359375), (0.309803921568627, 0.2984375, 0.2984375), (\n 0.313725490196078, 0.283515625, 0.283515625), (0.317647058823529, \n 0.26859375, 0.26859375), (0.32156862745098, 0.253671875, \n 0.253671875), (0.325490196078431, 0.23875, 0.23875), (\n 0.329411764705882, 0.223828125, 0.223828125), (0.333333333333333, \n 0.20890625, 0.20890625), (0.337254901960784, 0.193984375, \n 0.193984375), (0.341176470588235, 0.1790625, 0.1790625), (\n 0.345098039215686, 0.164140625, 0.164140625), (0.349019607843137, \n 0.14921875, 0.14921875), (0.352941176470588, 0.134296875, \n 0.134296875), (0.356862745098039, 0.119375, 0.119375), (\n 0.36078431372549, 0.104453125, 0.104453125), (0.364705882352941, \n 0.08953125, 0.08953125), (0.368627450980392, 0.074609375, \n 0.074609375), (0.372549019607843, 0.0596875, 0.0596875), (\n 0.376470588235294, 0.044765625, 0.044765625), (0.380392156862745, \n 0.0298437890625, 0.0298437890625), (0.384313725490196, 0.014921875,\n 0.014921875), (0.388235294117647, 0, 0), (0.392156862745098, \n 0.012890625, 0.012890625), (0.396078431372549, 0.02578125, \n 0.02578125), (0.4, 0.038671875, 0.038671875), (0.403921568627451, \n 0.0515625, 0.0515625), (0.407843137254902, 0.064453125, 0.064453125\n ), (0.411764705882353, 0.07734375, 0.07734375), (0.415686274509804,\n 0.090234375, 0.090234375), (0.419607843137255, 0.103125, 0.103125),\n (0.423529411764706, 0.116015625, 0.116015625), (0.427450980392157, \n 0.12890625, 0.12890625), (0.431372549019608, 0.141796875, \n 0.141796875), (0.435294117647059, 0.1546875, 0.1546875), (\n 0.43921568627451, 0.167578125, 0.167578125), (0.443137254901961, \n 0.18046875, 0.18046875), (0.447058823529412, 0.193359375, \n 0.193359375), (0.450980392156863, 0.20625, 0.20625), (\n 0.454901960784314, 0.219140625, 0.219140625), (0.458823529411765, \n 0.23203125, 0.23203125), (0.462745098039216, 0.244921875, \n 0.244921875), (0.466666666666667, 0.2578125, 0.2578125), (\n 0.470588235294118, 0.270703125, 0.270703125), (0.474509803921569, \n 0.28359375, 0.28359375), (0.47843137254902, 0.296484375, \n 0.296484375), (0.482352941176471, 0.309375, 0.309375), (\n 0.486274509803922, 0.322265625, 0.322265625), (0.490196078431373, \n 0.33515625, 0.33515625), (0.494117647058824, 0.348046875, \n 0.348046875), (0.498039215686275, 0.3609375, 0.3609375), (\n 0.501960784313725, 0.373828125, 0.373828125), (0.505882352941176, \n 0.38671875, 0.38671875), (0.509803921568627, 0.399609375, \n 0.399609375), (0.513725490196078, 0.4125, 0.4125), (\n 0.517647058823529, 0.425390625, 0.425390625), (0.52156862745098, \n 0.43828125, 0.43828125), (0.525490196078431, 0.451171875, \n 0.451171875), (0.529411764705882, 0.4640625, 0.4640625), (\n 0.533333333333333, 0.476953125, 0.476953125), (0.537254901960784, \n 0.48984375, 0.48984375), (0.541176470588235, 0.502734375, \n 0.502734375), (0.545098039215686, 0.515625, 0.515625), (\n 0.549019607843137, 0.528515625, 0.528515625), (0.552941176470588, \n 0.54140625, 0.54140625), (0.556862745098039, 0.554296875, \n 0.554296875), (0.56078431372549, 0.5671875, 0.5671875), (\n 0.564705882352941, 0.580078125, 0.580078125), (0.568627450980392, \n 0.59296875, 0.59296875), (0.572549019607843, 0.605859375, \n 0.605859375), (0.576470588235294, 0.61875, 0.61875), (\n 0.580392156862745, 0.631640625, 0.631640625), (0.584313725490196, \n 0.64453125, 0.64453125), (0.588235294117647, 0.6359375, 0.6359375),\n (0.592156862745098, 0.62734375, 0.62734375), (0.596078431372549, \n 0.61875, 0.61875), (0.6, 0.61015625, 0.61015625), (\n 0.603921568627451, 0.6015625, 0.6015625), (0.607843137254902, \n 0.59296875, 0.59296875), (0.611764705882353, 0.584375, 0.584375), (\n 0.615686274509804, 0.57578125, 0.57578125), (0.619607843137255, \n 0.5671875, 0.5671875), (0.623529411764706, 0.55859375, 0.55859375),\n (0.627450980392157, 0.55, 0.55), (0.631372549019608, 0.54140625, \n 0.54140625), (0.635294117647059, 0.5328125, 0.5328125), (\n 0.63921568627451, 0.52421875, 0.52421875), (0.643137254901961, \n 0.515625, 0.515625), (0.647058823529412, 0.50703125, 0.50703125), (\n 0.650980392156863, 0.4984375, 0.4984375), (0.654901960784314, \n 0.48984375, 0.48984375), (0.658823529411765, 0.48125, 0.48125), (\n 0.662745098039216, 0.47265625, 0.47265625), (0.666666666666667, \n 0.4640625, 0.4640625), (0.670588235294118, 0.45546875, 0.45546875),\n (0.674509803921569, 0.446875, 0.446875), (0.67843137254902, \n 0.43828125, 0.43828125), (0.682352941176471, 0.4296875, 0.4296875),\n (0.686274509803922, 0.42109375, 0.42109375), (0.690196078431373, \n 0.4125, 0.4125), (0.694117647058824, 0.40390625, 0.40390625), (\n 0.698039215686274, 0.3953125, 0.3953125), (0.701960784313725, \n 0.38671875, 0.38671875), (0.705882352941177, 0.378125, 0.378125), (\n 0.709803921568627, 0.36953125, 0.36953125), (0.713725490196078, \n 0.3609375, 0.3609375), (0.717647058823529, 0.35234375, 0.35234375),\n (0.72156862745098, 0.34375, 0.34375), (0.725490196078431, \n 0.33515625, 0.33515625), (0.729411764705882, 0.3265625, 0.3265625),\n (0.733333333333333, 0.31796875, 0.31796875), (0.737254901960784, \n 0.309375, 0.309375), (0.741176470588235, 0.30078125, 0.30078125), (\n 0.745098039215686, 0.2921875, 0.2921875), (0.749019607843137, \n 0.28359375, 0.28359375), (0.752941176470588, 0.275, 0.275), (\n 0.756862745098039, 0.26640625, 0.26640625), (0.76078431372549, \n 0.2578125, 0.2578125), (0.764705882352941, 0.24921875, 0.24921875),\n (0.768627450980392, 0.240625, 0.240625), (0.772549019607843, \n 0.23203125, 0.23203125), (0.776470588235294, 0.2234375, 0.2234375),\n (0.780392156862745, 0.21484375, 0.21484375), (0.784313725490196, \n 0.222301171875, 0.222301171875), (0.788235294117647, 0.22975859375,\n 0.22975859375), (0.792156862745098, 0.237216015625, 0.237216015625),\n (0.796078431372549, 0.2446734375, 0.2446734375), (0.8, \n 0.252130859375, 0.252130859375), (0.803921568627451, 0.259587890625,\n 0.259587890625), (0.807843137254902, 0.2670453125, 0.2670453125), (\n 0.811764705882353, 0.274502734375, 0.274502734375), (\n 0.815686274509804, 0.28196015625, 0.28196015625), (\n 0.819607843137255, 0.289417578125, 0.289417578125), (\n 0.823529411764706, 0.296875, 0.296875), (0.827450980392157, \n 0.304332421875, 0.304332421875), (0.831372549019608, 0.31178984375,\n 0.31178984375), (0.835294117647059, 0.319247265625, 0.319247265625),\n (0.83921568627451, 0.3267046875, 0.3267046875), (0.843137254901961,\n 0.334162109375, 0.334162109375), (0.847058823529412, 0.34161953125,\n 0.34161953125), (0.850980392156863, 0.3490765625, 0.3490765625), (\n 0.854901960784314, 0.356533984375, 0.356533984375), (\n 0.858823529411765, 0.36399140625, 0.36399140625), (\n 0.862745098039216, 0.371448828125, 0.371448828125), (\n 0.866666666666667, 0.37890625, 0.37890625), (0.870588235294118, \n 0.386363671875, 0.386363671875), (0.874509803921569, 0.3938203125, \n 0.3938203125), (0.87843137254902, 0.40127734375, 0.40127734375), (\n 0.882352941176471, 0.408734375, 0.408734375), (0.886274509803922, \n 0.41619140625, 0.41619140625), (0.890196078431373, 0.42365234375, \n 0.42365234375), (0.894117647058824, 0.431109375, 0.431109375), (\n 0.898039215686275, 0.43856640625, 0.43856640625), (\n 0.901960784313726, 0.4460234375, 0.4460234375), (0.905882352941176,\n 0.45348046875, 0.45348046875), (0.909803921568627, 0.4609375, \n 0.4609375), (0.913725490196078, 0.46839453125, 0.46839453125), (\n 0.917647058823529, 0.4758515625, 0.4758515625), (0.92156862745098, \n 0.48330859375, 0.48330859375), (0.925490196078431, 0.490765625, \n 0.490765625), (0.929411764705882, 0.49822265625, 0.49822265625), (\n 0.933333333333333, 0.50568359375, 0.50568359375), (\n 0.937254901960784, 0.513140625, 0.513140625), (0.941176470588235, \n 0.52059765625, 0.52059765625), (0.945098039215686, 0.5280546875, \n 0.5280546875), (0.949019607843137, 0.53551171875, 0.53551171875), (\n 0.952941176470588, 0.54296875, 0.54296875), (0.956862745098039, \n 0.55042578125, 0.55042578125), (0.96078431372549, 0.5578828125, \n 0.5578828125), (0.964705882352941, 0.56533984375, 0.56533984375), (\n 0.968627450980392, 0.572796875, 0.572796875), (0.972549019607843, \n 0.58025390625, 0.58025390625), (0.976470588235294, 0.58771484375, \n 0.58771484375), (0.980392156862745, 0.595171875, 0.595171875), (\n 0.984313725490196, 0.60262890625, 0.60262890625), (\n 0.988235294117647, 0.6100859375, 0.6100859375), (0.992156862745098,\n 0.61754296875, 0.61754296875), (0.996078431372549, 0.625, 0.625), (\n 1.0, 0.625, 0.625)), 'blue': ((0.0, 0.51984375, 0.51984375), (\n 0.00392156862745098, 0.51984375, 0.51984375), (0.00784313725490196,\n 0.529765625, 0.529765625), (0.0117647058823529, 0.5396875, \n 0.5396875), (0.0156862745098039, 0.549609375, 0.549609375), (\n 0.0196078431372549, 0.55953125, 0.55953125), (0.0235294117647059, \n 0.569453125, 0.569453125), (0.0274509803921569, 0.579375, 0.579375),\n (0.0313725490196078, 0.589296875, 0.589296875), (0.0352941176470588,\n 0.59921875, 0.59921875), (0.0392156862745098, 0.609140625, \n 0.609140625), (0.0431372549019608, 0.6190625, 0.6190625), (\n 0.0470588235294118, 0.628984375, 0.628984375), (0.0509803921568627,\n 0.63890625, 0.63890625), (0.0549019607843137, 0.648828125, \n 0.648828125), (0.0588235294117647, 0.65875, 0.65875), (\n 0.0627450980392157, 0.668671875, 0.668671875), (0.0666666666666667,\n 0.67859375, 0.67859375), (0.0705882352941176, 0.688515625, \n 0.688515625), (0.0745098039215686, 0.6984375, 0.6984375), (\n 0.0784313725490196, 0.708359375, 0.708359375), (0.0823529411764706,\n 0.71828125, 0.71828125), (0.0862745098039216, 0.728203125, \n 0.728203125), (0.0901960784313725, 0.738125, 0.738125), (\n 0.0941176470588235, 0.748046875, 0.748046875), (0.0980392156862745,\n 0.75796875, 0.75796875), (0.101960784313725, 0.767890625, \n 0.767890625), (0.105882352941176, 0.7778125, 0.7778125), (\n 0.109803921568627, 0.787734375, 0.787734375), (0.113725490196078, \n 0.79765625, 0.79765625), (0.117647058823529, 0.807578125, \n 0.807578125), (0.12156862745098, 0.8175, 0.8175), (\n 0.125490196078431, 0.827421875, 0.827421875), (0.129411764705882, \n 0.83734375, 0.83734375), (0.133333333333333, 0.847265625, \n 0.847265625), (0.137254901960784, 0.8571875, 0.8571875), (\n 0.141176470588235, 0.867109375, 0.867109375), (0.145098039215686, \n 0.87703125, 0.87703125), (0.149019607843137, 0.886953125, \n 0.886953125), (0.152941176470588, 0.896875, 0.896875), (\n 0.156862745098039, 0.906796875, 0.906796875), (0.16078431372549, \n 0.91671875, 0.91671875), (0.164705882352941, 0.926640625, \n 0.926640625), (0.168627450980392, 0.9365625, 0.9365625), (\n 0.172549019607843, 0.946484375, 0.946484375), (0.176470588235294, \n 0.95640625, 0.95640625), (0.180392156862745, 0.966328125, \n 0.966328125), (0.184313725490196, 0.97625, 0.97625), (\n 0.188235294117647, 0.986171875, 0.986171875), (0.192156862745098, \n 0.99609375, 0.99609375), (0.196078431372549, 0.976171875, \n 0.976171875), (0.2, 0.95625, 0.95625), (0.203921568627451, \n 0.936328125, 0.936328125), (0.207843137254902, 0.91640625, \n 0.91640625), (0.211764705882353, 0.896484375, 0.896484375), (\n 0.215686274509804, 0.8765625, 0.8765625), (0.219607843137255, \n 0.856640625, 0.856640625), (0.223529411764706, 0.83671875, \n 0.83671875), (0.227450980392157, 0.816796875, 0.816796875), (\n 0.231372549019608, 0.796875, 0.796875), (0.235294117647059, \n 0.776953125, 0.776953125), (0.23921568627451, 0.75703125, \n 0.75703125), (0.243137254901961, 0.737109375, 0.737109375), (\n 0.247058823529412, 0.7171875, 0.7171875), (0.250980392156863, \n 0.697265625, 0.697265625), (0.254901960784314, 0.67734375, \n 0.67734375), (0.258823529411765, 0.657421875, 0.657421875), (\n 0.262745098039216, 0.6375, 0.6375), (0.266666666666667, 0.617578125,\n 0.617578125), (0.270588235294118, 0.59765625, 0.59765625), (\n 0.274509803921569, 0.577734375, 0.577734375), (0.27843137254902, \n 0.5578125, 0.5578125), (0.282352941176471, 0.537890625, 0.537890625\n ), (0.286274509803922, 0.51796875, 0.51796875), (0.290196078431373,\n 0.498046875, 0.498046875), (0.294117647058824, 0.478125, 0.478125),\n (0.298039215686275, 0.458203125, 0.458203125), (0.301960784313725, \n 0.43828125, 0.43828125), (0.305882352941176, 0.418359375, \n 0.418359375), (0.309803921568627, 0.3984375, 0.3984375), (\n 0.313725490196078, 0.378515625, 0.378515625), (0.317647058823529, \n 0.35859375, 0.35859375), (0.32156862745098, 0.338671875, \n 0.338671875), (0.325490196078431, 0.31875, 0.31875), (\n 0.329411764705882, 0.298828125, 0.298828125), (0.333333333333333, \n 0.27890625, 0.27890625), (0.337254901960784, 0.258984375, \n 0.258984375), (0.341176470588235, 0.2390625, 0.2390625), (\n 0.345098039215686, 0.219140625, 0.219140625), (0.349019607843137, \n 0.19921875, 0.19921875), (0.352941176470588, 0.179296875, \n 0.179296875), (0.356862745098039, 0.159375, 0.159375), (\n 0.36078431372549, 0.139453125, 0.139453125), (0.364705882352941, \n 0.11953125, 0.11953125), (0.368627450980392, 0.099609375, \n 0.099609375), (0.372549019607843, 0.0796875, 0.0796875), (\n 0.376470588235294, 0.059765625, 0.059765625), (0.380392156862745, \n 0.03984375, 0.03984375), (0.384313725490196, 0.019921875, \n 0.019921875), (0.388235294117647, 0, 0), (0.392156862745098, 0, 0),\n (0.396078431372549, 0, 0), (0.4, 0, 0), (0.403921568627451, 0, 0),\n (0.407843137254902, 0, 0), (0.411764705882353, 0, 0), (\n 0.415686274509804, 0, 0), (0.419607843137255, 0, 0), (\n 0.423529411764706, 0, 0), (0.427450980392157, 0, 0), (\n 0.431372549019608, 0, 0), (0.435294117647059, 0, 0), (\n 0.43921568627451, 0, 0), (0.443137254901961, 0, 0), (\n 0.447058823529412, 0, 0), (0.450980392156863, 0, 0), (\n 0.454901960784314, 0, 0), (0.458823529411765, 0, 0), (\n 0.462745098039216, 0, 0), (0.466666666666667, 0, 0), (\n 0.470588235294118, 0, 0), (0.474509803921569, 0, 0), (\n 0.47843137254902, 0, 0), (0.482352941176471, 0, 0), (\n 0.486274509803922, 0, 0), (0.490196078431373, 0, 0), (\n 0.494117647058824, 0, 0), (0.498039215686275, 0, 0), (\n 0.501960784313725, 0, 0), (0.505882352941176, 0, 0), (\n 0.509803921568627, 0, 0), (0.513725490196078, 0, 0), (\n 0.517647058823529, 0, 0), (0.52156862745098, 0, 0), (\n 0.525490196078431, 0, 0), (0.529411764705882, 0, 0), (\n 0.533333333333333, 0, 0), (0.537254901960784, 0, 0), (\n 0.541176470588235, 0, 0), (0.545098039215686, 0, 0), (\n 0.549019607843137, 0, 0), (0.552941176470588, 0, 0), (\n 0.556862745098039, 0, 0), (0.56078431372549, 0, 0), (\n 0.564705882352941, 0, 0), (0.568627450980392, 0, 0), (\n 0.572549019607843, 0, 0), (0.576470588235294, 0, 0), (\n 0.580392156862745, 0, 0), (0.584313725490196, 0, 0), (\n 0.588235294117647, 0.004296875, 0.004296875), (0.592156862745098, \n 0.00859375, 0.00859375), (0.596078431372549, 0.012890625, \n 0.012890625), (0.6, 0.0171875, 0.0171875), (0.603921568627451, \n 0.021484375, 0.021484375), (0.607843137254902, 0.02578125, \n 0.02578125), (0.611764705882353, 0.030078125, 0.030078125), (\n 0.615686274509804, 0.034375, 0.034375), (0.619607843137255, \n 0.038671875, 0.038671875), (0.623529411764706, 0.04296875, \n 0.04296875), (0.627450980392157, 0.047265625, 0.047265625), (\n 0.631372549019608, 0.0515625, 0.0515625), (0.635294117647059, \n 0.055859375, 0.055859375), (0.63921568627451, 0.06015625, \n 0.06015625), (0.643137254901961, 0.064453125, 0.064453125), (\n 0.647058823529412, 0.06875, 0.06875), (0.650980392156863, \n 0.073046875, 0.073046875), (0.654901960784314, 0.07734375, \n 0.07734375), (0.658823529411765, 0.081640625, 0.081640625), (\n 0.662745098039216, 0.0859375, 0.0859375), (0.666666666666667, \n 0.090234375, 0.090234375), (0.670588235294118, 0.09453125, \n 0.09453125), (0.674509803921569, 0.098828125, 0.098828125), (\n 0.67843137254902, 0.103125, 0.103125), (0.682352941176471, \n 0.107421875, 0.107421875), (0.686274509803922, 0.11171875, \n 0.11171875), (0.690196078431373, 0.116015625, 0.116015625), (\n 0.694117647058824, 0.1203125, 0.1203125), (0.698039215686274, \n 0.124609375, 0.124609375), (0.701960784313725, 0.12890625, \n 0.12890625), (0.705882352941177, 0.133203125, 0.133203125), (\n 0.709803921568627, 0.1375, 0.1375), (0.713725490196078, 0.141796875,\n 0.141796875), (0.717647058823529, 0.14609375, 0.14609375), (\n 0.72156862745098, 0.150390625, 0.150390625), (0.725490196078431, \n 0.1546875, 0.1546875), (0.729411764705882, 0.158984375, 0.158984375\n ), (0.733333333333333, 0.16328125, 0.16328125), (0.737254901960784,\n 0.167578125, 0.167578125), (0.741176470588235, 0.171875, 0.171875),\n (0.745098039215686, 0.176171875, 0.176171875), (0.749019607843137, \n 0.18046875, 0.18046875), (0.752941176470588, 0.184765625, \n 0.184765625), (0.756862745098039, 0.1890625, 0.1890625), (\n 0.76078431372549, 0.193359375, 0.193359375), (0.764705882352941, \n 0.19765625, 0.19765625), (0.768627450980392, 0.201953125, \n 0.201953125), (0.772549019607843, 0.20625, 0.20625), (\n 0.776470588235294, 0.210546875, 0.210546875), (0.780392156862745, \n 0.21484375, 0.21484375), (0.784313725490196, 0.22663359375, \n 0.22663359375), (0.788235294117647, 0.2384234375, 0.2384234375), (\n 0.792156862745098, 0.250212890625, 0.250212890625), (\n 0.796078431372549, 0.262002734375, 0.262002734375), (0.8, \n 0.273792578125, 0.273792578125), (0.803921568627451, 0.285582421875,\n 0.285582421875), (0.807843137254902, 0.297372265625, 0.297372265625\n ), (0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125))}\n vcdict = {'red': ((0, 1, 1), (0.00392156862745098, 0.54508984375, \n 0.54508984375), (0.00784313725490196, 0.5285703125, 0.5285703125),\n (0.0117647058823529, 0.5120546875, 0.5120546875), (\n 0.0156862745098039, 0.49553515625, 0.49553515625), (\n 0.0196078431372549, 0.47901953125, 0.47901953125), (\n 0.0235294117647059, 0.4625, 0.4625), (0.0274509803921569, \n 0.44598046875, 0.44598046875), (0.0313725490196078, 0.42946484375, \n 0.42946484375), (0.0352941176470588, 0.4129453125, 0.4129453125), (\n 0.0392156862745098, 0.3964296875, 0.3964296875), (\n 0.0431372549019608, 0.379910546875, 0.379910546875), (\n 0.0470588235294118, 0.36339296875, 0.36339296875), (\n 0.0509803921568627, 0.346875, 0.346875), (0.0549019607843137, \n 0.33035703125, 0.33035703125), (0.0588235294117647, 0.313839453125,\n 0.313839453125), (0.0627450980392157, 0.297321484375, \n 0.297321484375), (0.0666666666666667, 0.280803515625, \n 0.280803515625), (0.0705882352941176, 0.2642859375, 0.2642859375),\n (0.0745098039215686, 0.24776796875, 0.24776796875), (\n 0.0784313725490196, 0.23125, 0.23125), (0.0823529411764706, \n 0.21473203125, 0.21473203125), (0.0862745098039216, 0.198214453125,\n 0.198214453125), (0.0901960784313725, 0.181696484375, \n 0.181696484375), (0.0941176470588235, 0.165178515625, \n 0.165178515625), (0.0980392156862745, 0.148660546875, \n 0.148660546875), (0.101960784313725, 0.13214296875, 0.13214296875),\n (0.105882352941176, 0.115625, 0.115625), (0.109803921568627, \n 0.09910703125, 0.09910703125), (0.113725490196078, 0.082589453125, \n 0.082589453125), (0.117647058823529, 0.066071484375, 0.066071484375\n ), (0.12156862745098, 0.049553515625, 0.049553515625), (\n 0.125490196078431, 0.0330357421875, 0.0330357421875), (\n 0.129411764705882, 0.016517890625, 0.016517890625), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0, 0), (0.2, 0, 0), (0.203921568627451, 0, 0), (\n 0.207843137254902, 0, 0), (0.211764705882353, 0, 0), (\n 0.215686274509804, 0, 0), (0.219607843137255, 0, 0), (\n 0.223529411764706, 0, 0), (0.227450980392157, 0, 0), (\n 0.231372549019608, 0, 0), (0.235294117647059, 0, 0), (\n 0.23921568627451, 0, 0), (0.243137254901961, 0, 0), (\n 0.247058823529412, 0, 0), (0.250980392156863, 0, 0), (\n 0.254901960784314, 0, 0), (0.258823529411765, 0, 0), (\n 0.262745098039216, 0, 0), (0.266666666666667, 0, 0), (\n 0.270588235294118, 0, 0), (0.274509803921569, 0, 0), (\n 0.27843137254902, 0, 0), (0.282352941176471, 0, 0), (\n 0.286274509803922, 0, 0), (0.290196078431373, 0, 0), (\n 0.294117647058824, 0, 0), (0.298039215686275, 0, 0), (\n 0.301960784313725, 0, 0), (0.305882352941176, 0, 0), (\n 0.309803921568627, 0, 0), (0.313725490196078, 0, 0), (\n 0.317647058823529, 0, 0), (0.32156862745098, 0, 0), (\n 0.325490196078431, 0, 0), (0.329411764705882, 0, 0), (\n 0.333333333333333, 0, 0), (0.337254901960784, 0, 0), (\n 0.341176470588235, 0, 0), (0.345098039215686, 0, 0), (\n 0.349019607843137, 0, 0), (0.352941176470588, 0.0061383984375, \n 0.0061383984375), (0.356862745098039, 0.012276796875, \n 0.012276796875), (0.36078431372549, 0.0184151953125, \n 0.0184151953125), (0.364705882352941, 0.0245535546875, \n 0.0245535546875), (0.368627450980392, 0.030691953125, \n 0.030691953125), (0.372549019607843, 0.0368303515625, \n 0.0368303515625), (0.376470588235294, 0.04296875, 0.04296875), (\n 0.380392156862745, 0.04910703125, 0.04910703125), (\n 0.384313725490196, 0.055245703125, 0.055245703125), (\n 0.388235294117647, 0.061383984375, 0.061383984375), (\n 0.392156862745098, 0.067522265625, 0.067522265625), (\n 0.396078431372549, 0.073660546875, 0.073660546875), (0.4, \n 0.07979921875, 0.07979921875), (0.403921568627451, 0.0859375, \n 0.0859375), (0.407843137254902, 0.09207578125, 0.09207578125), (\n 0.411764705882353, 0.098214453125, 0.098214453125), (\n 0.415686274509804, 0.104352734375, 0.104352734375), (\n 0.419607843137255, 0.110491015625, 0.110491015625), (\n 0.423529411764706, 0.116629296875, 0.116629296875), (\n 0.427450980392157, 0.12276796875, 0.12276796875), (\n 0.431372549019608, 0.12890625, 0.12890625), (0.435294117647059, \n 0.13504453125, 0.13504453125), (0.43921568627451, 0.141183203125, \n 0.141183203125), (0.443137254901961, 0.147321484375, 0.147321484375\n ), (0.447058823529412, 0.153459765625, 0.153459765625), (\n 0.450980392156863, 0.159598046875, 0.159598046875), (\n 0.454901960784314, 0.16573671875, 0.16573671875), (\n 0.458823529411765, 0.171875, 0.171875), (0.462745098039216, \n 0.17801328125, 0.17801328125), (0.466666666666667, 0.184151953125, \n 0.184151953125), (0.470588235294118, 0.190290234375, 0.190290234375\n ), (0.474509803921569, 0.196428515625, 0.196428515625), (\n 0.47843137254902, 0.202566796875, 0.202566796875), (\n 0.482352941176471, 0.20870546875, 0.20870546875), (\n 0.486274509803922, 0.21484375, 0.21484375), (0.490196078431373, \n 0.233370703125, 0.233370703125), (0.494117647058824, 0.251897265625,\n 0.251897265625), (0.498039215686275, 0.27042421875, 0.27042421875),\n (0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.86549609375, 0.86549609375), (0.631372549019608, 0.86770703125, \n 0.86770703125), (0.635294117647059, 0.869921875, 0.869921875), (\n 0.63921568627451, 0.87213671875, 0.87213671875), (0.643137254901961,\n 0.87434765625, 0.87434765625), (0.647058823529412, 0.8765625, \n 0.8765625), (0.650980392156863, 0.87877734375, 0.87877734375), (\n 0.654901960784314, 0.88098828125, 0.88098828125), (\n 0.658823529411765, 0.883203125, 0.883203125), (0.662745098039216, \n 0.88541796875, 0.88541796875), (0.666666666666667, 0.88762890625, \n 0.88762890625), (0.670588235294118, 0.88984375, 0.88984375), (\n 0.674509803921569, 0.89205859375, 0.89205859375), (0.67843137254902,\n 0.89426953125, 0.89426953125), (0.682352941176471, 0.896484375, \n 0.896484375), (0.686274509803922, 0.89869921875, 0.89869921875), (\n 0.690196078431373, 0.90091015625, 0.90091015625), (\n 0.694117647058824, 0.903125, 0.903125), (0.698039215686274, \n 0.90533984375, 0.90533984375), (0.701960784313725, 0.90755078125, \n 0.90755078125), (0.705882352941177, 0.909765625, 0.909765625), (\n 0.709803921568627, 0.91198046875, 0.91198046875), (\n 0.713725490196078, 0.91419140625, 0.91419140625), (\n 0.717647058823529, 0.91640625, 0.91640625), (0.72156862745098, \n 0.91862109375, 0.91862109375), (0.725490196078431, 0.92083203125, \n 0.92083203125), (0.729411764705882, 0.923046875, 0.923046875), (\n 0.733333333333333, 0.92526171875, 0.92526171875), (\n 0.737254901960784, 0.92747265625, 0.92747265625), (\n 0.741176470588235, 0.9296875, 0.9296875), (0.745098039215686, \n 0.93190234375, 0.93190234375), (0.749019607843137, 0.93411328125, \n 0.93411328125), (0.752941176470588, 0.936328125, 0.936328125), (\n 0.756862745098039, 0.93854296875, 0.93854296875), (0.76078431372549,\n 0.94075390625, 0.94075390625), (0.764705882352941, 0.94296875, \n 0.94296875), (0.768627450980392, 0.94518359375, 0.94518359375), (\n 0.772549019607843, 0.94739453125, 0.94739453125), (\n 0.776470588235294, 0.949609375, 0.949609375), (0.780392156862745, \n 0.95182421875, 0.95182421875), (0.784313725490196, 0.95403515625, \n 0.95403515625), (0.788235294117647, 0.95625, 0.95625), (\n 0.792156862745098, 0.95846484375, 0.95846484375), (\n 0.796078431372549, 0.96067578125, 0.96067578125), (0.8, 0.962890625,\n 0.962890625), (0.803921568627451, 0.96510546875, 0.96510546875), (\n 0.807843137254902, 0.96731640625, 0.96731640625), (\n 0.811764705882353, 0.96953125, 0.96953125), (0.815686274509804, \n 0.97174609375, 0.97174609375), (0.819607843137255, 0.97395703125, \n 0.97395703125), (0.823529411764706, 0.976171875, 0.976171875), (\n 0.827450980392157, 0.97838671875, 0.97838671875), (\n 0.831372549019608, 0.98059765625, 0.98059765625), (\n 0.835294117647059, 0.9828125, 0.9828125), (0.83921568627451, \n 0.98502734375, 0.98502734375), (0.843137254901961, 0.98723828125, \n 0.98723828125), (0.847058823529412, 0.989453125, 0.989453125), (\n 0.850980392156863, 0.99166796875, 0.99166796875), (\n 0.854901960784314, 0.99387890625, 0.99387890625), (\n 0.858823529411765, 0.99609375, 0.99609375), (0.862745098039216, \n 0.99609375, 0.99609375), (0.866666666666667, 0.99609375, 0.99609375\n ), (0.870588235294118, 0.99609375, 0.99609375), (0.874509803921569,\n 0.99609375, 0.99609375), (0.87843137254902, 0.99609375, 0.99609375),\n (0.882352941176471, 0.99609375, 0.99609375), (0.886274509803922, \n 0.99609375, 0.99609375), (0.890196078431373, 0.99609375, 0.99609375\n ), (0.894117647058824, 0.99609375, 0.99609375), (0.898039215686275,\n 0.99609375, 0.99609375), (0.901960784313726, 0.99609375, 0.99609375\n ), (0.905882352941176, 0.99609375, 0.99609375), (0.909803921568627,\n 0.99609375, 0.99609375), (0.913725490196078, 0.99609375, 0.99609375\n ), (0.917647058823529, 0.99609375, 0.99609375), (0.92156862745098, \n 0.99609375, 0.99609375), (0.925490196078431, 0.99609375, 0.99609375\n ), (0.929411764705882, 0.99609375, 0.99609375), (0.933333333333333,\n 0.99609375, 0.99609375), (0.937254901960784, 0.99609375, 0.99609375\n ), (0.941176470588235, 0.99609375, 0.99609375), (0.945098039215686,\n 0.99609375, 0.99609375), (0.949019607843137, 0.99609375, 0.99609375\n ), (0.952941176470588, 0.99609375, 0.99609375), (0.956862745098039,\n 0.99609375, 0.99609375), (0.96078431372549, 0.99609375, 0.99609375),\n (0.964705882352941, 0.99609375, 0.99609375), (0.968627450980392, \n 0.99609375, 0.99609375), (0.972549019607843, 0.99609375, 0.99609375\n ), (0.976470588235294, 0.99609375, 0.99609375), (0.980392156862745,\n 0.99609375, 0.99609375), (0.984313725490196, 0.99609375, 0.99609375\n ), (0.988235294117647, 0.99609375, 0.99609375), (0.992156862745098,\n 0.99609375, 0.99609375), (0.996078431372549, 0.99609375, 0.99609375\n ), (1, 0.99609375, 0.99609375)), 'green': ((0, 1, 1), (\n 0.00392156862745098, 0, 0), (0.00784313725490196, 0, 0), (\n 0.0117647058823529, 0, 0), (0.0156862745098039, 0, 0), (\n 0.0196078431372549, 0, 0), (0.0235294117647059, 0, 0), (\n 0.0274509803921569, 0, 0), (0.0313725490196078, 0, 0), (\n 0.0352941176470588, 0, 0), (0.0392156862745098, 0, 0), (\n 0.0431372549019608, 0, 0), (0.0470588235294118, 0, 0), (\n 0.0509803921568627, 0, 0), (0.0549019607843137, 0, 0), (\n 0.0588235294117647, 0, 0), (0.0627450980392157, 0, 0), (\n 0.0666666666666667, 0, 0), (0.0705882352941176, 0, 0), (\n 0.0745098039215686, 0, 0), (0.0784313725490196, 0, 0), (\n 0.0823529411764706, 0, 0), (0.0862745098039216, 0, 0), (\n 0.0901960784313725, 0, 0), (0.0941176470588235, 0, 0), (\n 0.0980392156862745, 0, 0), (0.101960784313725, 0, 0), (\n 0.105882352941176, 0, 0), (0.109803921568627, 0, 0), (\n 0.113725490196078, 0, 0), (0.117647058823529, 0, 0), (\n 0.12156862745098, 0, 0), (0.125490196078431, 0, 0), (\n 0.129411764705882, 0, 0), (0.133333333333333, 0, 0), (\n 0.137254901960784, 0.0135653515625, 0.0135653515625), (\n 0.141176470588235, 0.0271306640625, 0.0271306640625), (\n 0.145098039215686, 0.04069609375, 0.04069609375), (\n 0.149019607843137, 0.054261328125, 0.054261328125), (\n 0.152941176470588, 0.0678265625, 0.0678265625), (0.156862745098039,\n 0.0813921875, 0.0813921875), (0.16078431372549, 0.094957421875, \n 0.094957421875), (0.164705882352941, 0.10852265625, 0.10852265625),\n (0.168627450980392, 0.122087890625, 0.122087890625), (\n 0.172549019607843, 0.135653515625, 0.135653515625), (\n 0.176470588235294, 0.14921875, 0.14921875), (0.180392156862745, \n 0.162783984375, 0.162783984375), (0.184313725490196, 0.176349609375,\n 0.176349609375), (0.188235294117647, 0.18991484375, 0.18991484375),\n (0.192156862745098, 0.203480078125, 0.203480078125), (\n 0.196078431372549, 0.2170453125, 0.2170453125), (0.2, 0.2306109375,\n 0.2306109375), (0.203921568627451, 0.244176171875, 0.244176171875),\n (0.207843137254902, 0.25774140625, 0.25774140625), (\n 0.211764705882353, 0.27130703125, 0.27130703125), (\n 0.215686274509804, 0.284872265625, 0.284872265625), (\n 0.219607843137255, 0.2984375, 0.2984375), (0.223529411764706, \n 0.312002734375, 0.312002734375), (0.227450980392157, 0.325568359375,\n 0.325568359375), (0.231372549019608, 0.33913359375, 0.33913359375),\n (0.235294117647059, 0.352698828125, 0.352698828125), (\n 0.23921568627451, 0.3662640625, 0.3662640625), (0.243137254901961, \n 0.3798296875, 0.3798296875), (0.247058823529412, 0.39339453125, \n 0.39339453125), (0.250980392156863, 0.4069609375, 0.4069609375), (\n 0.254901960784314, 0.42052734375, 0.42052734375), (\n 0.258823529411765, 0.43408984375, 0.43408984375), (\n 0.262745098039216, 0.44765625, 0.44765625), (0.266666666666667, \n 0.46122265625, 0.46122265625), (0.270588235294118, 0.47478515625, \n 0.47478515625), (0.274509803921569, 0.4883515625, 0.4883515625), (\n 0.27843137254902, 0.50191796875, 0.50191796875), (0.282352941176471,\n 0.515484375, 0.515484375), (0.286274509803922, 0.529046875, \n 0.529046875), (0.290196078431373, 0.54261328125, 0.54261328125), (\n 0.294117647058824, 0.5561796875, 0.5561796875), (0.298039215686275,\n 0.56974609375, 0.56974609375), (0.301960784313725, 0.58330859375, \n 0.58330859375), (0.305882352941176, 0.596875, 0.596875), (\n 0.309803921568627, 0.61044140625, 0.61044140625), (\n 0.313725490196078, 0.62400390625, 0.62400390625), (\n 0.317647058823529, 0.6375703125, 0.6375703125), (0.32156862745098, \n 0.65113671875, 0.65113671875), (0.325490196078431, 0.664703125, \n 0.664703125), (0.329411764705882, 0.678265625, 0.678265625), (\n 0.333333333333333, 0.69183203125, 0.69183203125), (\n 0.337254901960784, 0.7053984375, 0.7053984375), (0.341176470588235,\n 0.71896484375, 0.71896484375), (0.345098039215686, 0.73252734375, \n 0.73252734375), (0.349019607843137, 0.74609375, 0.74609375), (\n 0.352941176470588, 0.7309140625, 0.7309140625), (0.356862745098039,\n 0.71573828125, 0.71573828125), (0.36078431372549, 0.70055859375, \n 0.70055859375), (0.364705882352941, 0.68537890625, 0.68537890625),\n (0.368627450980392, 0.67019921875, 0.67019921875), (\n 0.372549019607843, 0.6550234375, 0.6550234375), (0.376470588235294,\n 0.63984375, 0.63984375), (0.380392156862745, 0.6246640625, \n 0.6246640625), (0.384313725490196, 0.60948828125, 0.60948828125), (\n 0.388235294117647, 0.59430859375, 0.59430859375), (\n 0.392156862745098, 0.57912890625, 0.57912890625), (\n 0.396078431372549, 0.56394921875, 0.56394921875), (0.4, \n 0.5487734375, 0.5487734375), (0.403921568627451, 0.53359375, \n 0.53359375), (0.407843137254902, 0.5184140625, 0.5184140625), (\n 0.411764705882353, 0.50323828125, 0.50323828125), (\n 0.415686274509804, 0.48805859375, 0.48805859375), (\n 0.419607843137255, 0.47287890625, 0.47287890625), (\n 0.423529411764706, 0.45769921875, 0.45769921875), (\n 0.427450980392157, 0.4425234375, 0.4425234375), (0.431372549019608,\n 0.42734375, 0.42734375), (0.435294117647059, 0.4121640625, \n 0.4121640625), (0.43921568627451, 0.39698828125, 0.39698828125), (\n 0.443137254901961, 0.381808203125, 0.381808203125), (\n 0.447058823529412, 0.366629296875, 0.366629296875), (\n 0.450980392156863, 0.35145078125, 0.35145078125), (\n 0.454901960784314, 0.336272265625, 0.336272265625), (\n 0.458823529411765, 0.32109375, 0.32109375), (0.462745098039216, \n 0.305915234375, 0.305915234375), (0.466666666666667, 0.29073671875,\n 0.29073671875), (0.470588235294118, 0.2755578125, 0.2755578125), (\n 0.474509803921569, 0.260379296875, 0.260379296875), (\n 0.47843137254902, 0.24520078125, 0.24520078125), (0.482352941176471,\n 0.230022265625, 0.230022265625), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.2265625, 0.2265625), (\n 0.494117647058824, 0.23828125, 0.23828125), (0.498039215686275, \n 0.25, 0.25), (0.501960784313725, 0.26171875, 0.26171875), (\n 0.505882352941176, 0.2734375, 0.2734375), (0.509803921568627, \n 0.28515625, 0.28515625), (0.513725490196078, 0.296875, 0.296875), (\n 0.517647058823529, 0.30859375, 0.30859375), (0.52156862745098, \n 0.3203125, 0.3203125), (0.525490196078431, 0.33203125, 0.33203125),\n (0.529411764705882, 0.34375, 0.34375), (0.533333333333333, \n 0.35546875, 0.35546875), (0.537254901960784, 0.3671875, 0.3671875),\n (0.541176470588235, 0.37890625, 0.37890625), (0.545098039215686, \n 0.390625, 0.390625), (0.549019607843137, 0.40234375, 0.40234375), (\n 0.552941176470588, 0.4140625, 0.4140625), (0.556862745098039, \n 0.42578125, 0.42578125), (0.56078431372549, 0.4375, 0.4375), (\n 0.564705882352941, 0.44921875, 0.44921875), (0.568627450980392, \n 0.4609375, 0.4609375), (0.572549019607843, 0.47265625, 0.47265625),\n (0.576470588235294, 0.484375, 0.484375), (0.580392156862745, \n 0.49609375, 0.49609375), (0.584313725490196, 0.5078125, 0.5078125),\n (0.588235294117647, 0.51953125, 0.51953125), (0.592156862745098, \n 0.53125, 0.53125), (0.596078431372549, 0.54296875, 0.54296875), (\n 0.6, 0.5546875, 0.5546875), (0.603921568627451, 0.56640625, \n 0.56640625), (0.607843137254902, 0.578125, 0.578125), (\n 0.611764705882353, 0.58984375, 0.58984375), (0.615686274509804, \n 0.6015625, 0.6015625), (0.619607843137255, 0.61328125, 0.61328125),\n (0.623529411764706, 0.625, 0.625), (0.627450980392157, \n 0.61458203125, 0.61458203125), (0.631372549019608, 0.60416796875, \n 0.60416796875), (0.635294117647059, 0.59375, 0.59375), (\n 0.63921568627451, 0.58333203125, 0.58333203125), (0.643137254901961,\n 0.57291796875, 0.57291796875), (0.647058823529412, 0.5625, 0.5625),\n (0.650980392156863, 0.55208203125, 0.55208203125), (\n 0.654901960784314, 0.54166796875, 0.54166796875), (\n 0.658823529411765, 0.53125, 0.53125), (0.662745098039216, \n 0.52083203125, 0.52083203125), (0.666666666666667, 0.51041796875, \n 0.51041796875), (0.670588235294118, 0.5, 0.5), (0.674509803921569, \n 0.48958203125, 0.48958203125), (0.67843137254902, 0.47916796875, \n 0.47916796875), (0.682352941176471, 0.46875, 0.46875), (\n 0.686274509803922, 0.45833203125, 0.45833203125), (\n 0.690196078431373, 0.44791796875, 0.44791796875), (\n 0.694117647058824, 0.4375, 0.4375), (0.698039215686274, \n 0.42708203125, 0.42708203125), (0.701960784313725, 0.41666796875, \n 0.41666796875), (0.705882352941177, 0.40625, 0.40625), (\n 0.709803921568627, 0.39583203125, 0.39583203125), (\n 0.713725490196078, 0.385416796875, 0.385416796875), (\n 0.717647058823529, 0.375, 0.375), (0.72156862745098, 0.364583203125,\n 0.364583203125), (0.725490196078431, 0.354166796875, 0.354166796875\n ), (0.729411764705882, 0.34375, 0.34375), (0.733333333333333, \n 0.333333203125, 0.333333203125), (0.737254901960784, 0.322916796875,\n 0.322916796875), (0.741176470588235, 0.3125, 0.3125), (\n 0.745098039215686, 0.302083203125, 0.302083203125), (\n 0.749019607843137, 0.291666796875, 0.291666796875), (\n 0.752941176470588, 0.28125, 0.28125), (0.756862745098039, \n 0.270833203125, 0.270833203125), (0.76078431372549, 0.260416796875,\n 0.260416796875), (0.764705882352941, 0.25, 0.25), (\n 0.768627450980392, 0.239583203125, 0.239583203125), (\n 0.772549019607843, 0.229166796875, 0.229166796875), (\n 0.776470588235294, 0.21875, 0.21875), (0.780392156862745, \n 0.208333203125, 0.208333203125), (0.784313725490196, 0.197916796875,\n 0.197916796875), (0.788235294117647, 0.1875, 0.1875), (\n 0.792156862745098, 0.177083203125, 0.177083203125), (\n 0.796078431372549, 0.166666796875, 0.166666796875), (0.8, 0.15625, \n 0.15625), (0.803921568627451, 0.145833203125, 0.145833203125), (\n 0.807843137254902, 0.135416796875, 0.135416796875), (\n 0.811764705882353, 0.125, 0.125), (0.815686274509804, \n 0.114583203125, 0.114583203125), (0.819607843137255, 0.104166796875,\n 0.104166796875), (0.823529411764706, 0.09375, 0.09375), (\n 0.827450980392157, 0.083333203125, 0.083333203125), (\n 0.831372549019608, 0.072916796875, 0.072916796875), (\n 0.835294117647059, 0.0625, 0.0625), (0.83921568627451, \n 0.052083203125, 0.052083203125), (0.843137254901961, 0.041666796875,\n 0.041666796875), (0.847058823529412, 0.03125, 0.03125), (\n 0.850980392156863, 0.0208333203125, 0.0208333203125), (\n 0.854901960784314, 0.0104166796875, 0.0104166796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0.0184151953125, \n 0.0184151953125), (0.866666666666667, 0.0368303515625, \n 0.0368303515625), (0.870588235294118, 0.055245703125, \n 0.055245703125), (0.874509803921569, 0.073660546875, 0.073660546875\n ), (0.87843137254902, 0.09207578125, 0.09207578125), (\n 0.882352941176471, 0.110491015625, 0.110491015625), (\n 0.886274509803922, 0.12890625, 0.12890625), (0.890196078431373, \n 0.147321484375, 0.147321484375), (0.894117647058824, 0.16573671875,\n 0.16573671875), (0.898039215686275, 0.184151953125, 0.184151953125),\n (0.901960784313726, 0.202566796875, 0.202566796875), (\n 0.905882352941176, 0.22098203125, 0.22098203125), (\n 0.909803921568627, 0.239397265625, 0.239397265625), (\n 0.913725490196078, 0.2578125, 0.2578125), (0.917647058823529, \n 0.276227734375, 0.276227734375), (0.92156862745098, 0.29464296875, \n 0.29464296875), (0.925490196078431, 0.313058203125, 0.313058203125),\n (0.929411764705882, 0.331473046875, 0.331473046875), (\n 0.933333333333333, 0.34988828125, 0.34988828125), (\n 0.937254901960784, 0.368303515625, 0.368303515625), (\n 0.941176470588235, 0.38671875, 0.38671875), (0.945098039215686, \n 0.4051328125, 0.4051328125), (0.949019607843137, 0.42355078125, \n 0.42355078125), (0.952941176470588, 0.44196484375, 0.44196484375),\n (0.956862745098039, 0.46037890625, 0.46037890625), (\n 0.96078431372549, 0.47879296875, 0.47879296875), (0.964705882352941,\n 0.4972109375, 0.4972109375), (0.968627450980392, 0.515625, 0.515625\n ), (0.972549019607843, 0.5340390625, 0.5340390625), (\n 0.976470588235294, 0.55245703125, 0.55245703125), (\n 0.980392156862745, 0.57087109375, 0.57087109375), (\n 0.984313725490196, 0.58928515625, 0.58928515625), (\n 0.988235294117647, 0.60769921875, 0.60769921875), (\n 0.992156862745098, 0.6261171875, 0.6261171875), (0.996078431372549,\n 0.64453125, 0.64453125), (1, 0.64453125, 0.64453125)), 'blue': ((0,\n 1, 1), (0.00392156862745098, 0.80569140625, 0.80569140625), (\n 0.00784313725490196, 0.7964296875, 0.7964296875), (\n 0.0117647058823529, 0.7871640625, 0.7871640625), (\n 0.0156862745098039, 0.77790234375, 0.77790234375), (\n 0.0196078431372549, 0.76863671875, 0.76863671875), (\n 0.0235294117647059, 0.759375, 0.759375), (0.0274509803921569, \n 0.75011328125, 0.75011328125), (0.0313725490196078, 0.74084765625, \n 0.74084765625), (0.0352941176470588, 0.7315859375, 0.7315859375), (\n 0.0392156862745098, 0.7223203125, 0.7223203125), (\n 0.0431372549019608, 0.71305859375, 0.71305859375), (\n 0.0470588235294118, 0.70379296875, 0.70379296875), (\n 0.0509803921568627, 0.69453125, 0.69453125), (0.0549019607843137, \n 0.68526953125, 0.68526953125), (0.0588235294117647, 0.67600390625, \n 0.67600390625), (0.0627450980392157, 0.6667421875, 0.6667421875), (\n 0.0666666666666667, 0.6574765625, 0.6574765625), (\n 0.0705882352941176, 0.64821484375, 0.64821484375), (\n 0.0745098039215686, 0.63894921875, 0.63894921875), (\n 0.0784313725490196, 0.6296875, 0.6296875), (0.0823529411764706, \n 0.62042578125, 0.62042578125), (0.0862745098039216, 0.61116015625, \n 0.61116015625), (0.0901960784313725, 0.6018984375, 0.6018984375), (\n 0.0941176470588235, 0.5926328125, 0.5926328125), (\n 0.0980392156862745, 0.58337109375, 0.58337109375), (\n 0.101960784313725, 0.57410546875, 0.57410546875), (\n 0.105882352941176, 0.56484375, 0.56484375), (0.109803921568627, \n 0.55558203125, 0.55558203125), (0.113725490196078, 0.54631640625, \n 0.54631640625), (0.117647058823529, 0.5370546875, 0.5370546875), (\n 0.12156862745098, 0.5277890625, 0.5277890625), (0.125490196078431, \n 0.51852734375, 0.51852734375), (0.129411764705882, 0.50926171875, \n 0.50926171875), (0.133333333333333, 0.5, 0.5), (0.137254901960784, \n 0.50901953125, 0.50901953125), (0.141176470588235, 0.5180390625, \n 0.5180390625), (0.145098039215686, 0.52705859375, 0.52705859375), (\n 0.149019607843137, 0.536078125, 0.536078125), (0.152941176470588, \n 0.54509765625, 0.54509765625), (0.156862745098039, 0.55412109375, \n 0.55412109375), (0.16078431372549, 0.563140625, 0.563140625), (\n 0.164705882352941, 0.57216015625, 0.57216015625), (\n 0.168627450980392, 0.5811796875, 0.5811796875), (0.172549019607843,\n 0.59019921875, 0.59019921875), (0.176470588235294, 0.59921875, \n 0.59921875), (0.180392156862745, 0.60823828125, 0.60823828125), (\n 0.184313725490196, 0.6172578125, 0.6172578125), (0.188235294117647,\n 0.62627734375, 0.62627734375), (0.192156862745098, 0.635296875, \n 0.635296875), (0.196078431372549, 0.64431640625, 0.64431640625), (\n 0.2, 0.65333984375, 0.65333984375), (0.203921568627451, 0.662359375,\n 0.662359375), (0.207843137254902, 0.67137890625, 0.67137890625), (\n 0.211764705882353, 0.6803984375, 0.6803984375), (0.215686274509804,\n 0.68941796875, 0.68941796875), (0.219607843137255, 0.6984375, \n 0.6984375), (0.223529411764706, 0.70745703125, 0.70745703125), (\n 0.227450980392157, 0.7164765625, 0.7164765625), (0.231372549019608,\n 0.72549609375, 0.72549609375), (0.235294117647059, 0.734515625, \n 0.734515625), (0.23921568627451, 0.74353515625, 0.74353515625), (\n 0.243137254901961, 0.75255859375, 0.75255859375), (\n 0.247058823529412, 0.761578125, 0.761578125), (0.250980392156863, \n 0.77059765625, 0.77059765625), (0.254901960784314, 0.7796171875, \n 0.7796171875), (0.258823529411765, 0.78863671875, 0.78863671875), (\n 0.262745098039216, 0.79765625, 0.79765625), (0.266666666666667, \n 0.80667578125, 0.80667578125), (0.270588235294118, 0.8156953125, \n 0.8156953125), (0.274509803921569, 0.82471484375, 0.82471484375), (\n 0.27843137254902, 0.833734375, 0.833734375), (0.282352941176471, \n 0.84275390625, 0.84275390625), (0.286274509803922, 0.85177734375, \n 0.85177734375), (0.290196078431373, 0.860796875, 0.860796875), (\n 0.294117647058824, 0.86981640625, 0.86981640625), (\n 0.298039215686275, 0.8788359375, 0.8788359375), (0.301960784313725,\n 0.88785546875, 0.88785546875), (0.305882352941176, 0.896875, \n 0.896875), (0.309803921568627, 0.90589453125, 0.90589453125), (\n 0.313725490196078, 0.9149140625, 0.9149140625), (0.317647058823529,\n 0.92393359375, 0.92393359375), (0.32156862745098, 0.932953125, \n 0.932953125), (0.325490196078431, 0.94197265625, 0.94197265625), (\n 0.329411764705882, 0.95099609375, 0.95099609375), (\n 0.333333333333333, 0.960015625, 0.960015625), (0.337254901960784, \n 0.96903515625, 0.96903515625), (0.341176470588235, 0.9780546875, \n 0.9780546875), (0.345098039215686, 0.98707421875, 0.98707421875), (\n 0.349019607843137, 0.99609375, 0.99609375), (0.352941176470588, \n 0.9737734375, 0.9737734375), (0.356862745098039, 0.95144921875, \n 0.95144921875), (0.36078431372549, 0.92912890625, 0.92912890625), (\n 0.364705882352941, 0.90680859375, 0.90680859375), (\n 0.368627450980392, 0.88448828125, 0.88448828125), (\n 0.372549019607843, 0.8621640625, 0.8621640625), (0.376470588235294,\n 0.83984375, 0.83984375), (0.380392156862745, 0.8175234375, \n 0.8175234375), (0.384313725490196, 0.79519921875, 0.79519921875), (\n 0.388235294117647, 0.77287890625, 0.77287890625), (\n 0.392156862745098, 0.75055859375, 0.75055859375), (\n 0.396078431372549, 0.72823828125, 0.72823828125), (0.4, \n 0.7059140625, 0.7059140625), (0.403921568627451, 0.68359375, \n 0.68359375), (0.407843137254902, 0.6612734375, 0.6612734375), (\n 0.411764705882353, 0.63894921875, 0.63894921875), (\n 0.415686274509804, 0.61662890625, 0.61662890625), (\n 0.419607843137255, 0.59430859375, 0.59430859375), (\n 0.423529411764706, 0.57198828125, 0.57198828125), (\n 0.427450980392157, 0.5496640625, 0.5496640625), (0.431372549019608,\n 0.52734375, 0.52734375), (0.435294117647059, 0.5050234375, \n 0.5050234375), (0.43921568627451, 0.48269921875, 0.48269921875), (\n 0.443137254901961, 0.46037890625, 0.46037890625), (\n 0.447058823529412, 0.43805859375, 0.43805859375), (\n 0.450980392156863, 0.41573828125, 0.41573828125), (\n 0.454901960784314, 0.3934140625, 0.3934140625), (0.458823529411765,\n 0.37109375, 0.37109375), (0.462745098039216, 0.348772265625, \n 0.348772265625), (0.466666666666667, 0.32645078125, 0.32645078125),\n (0.470588235294118, 0.304129296875, 0.304129296875), (\n 0.474509803921569, 0.281808203125, 0.281808203125), (\n 0.47843137254902, 0.25948671875, 0.25948671875), (0.482352941176471,\n 0.237165234375, 0.237165234375), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.233370703125, 0.233370703125), (\n 0.494117647058824, 0.251897265625, 0.251897265625), (\n 0.498039215686275, 0.27042421875, 0.27042421875), (\n 0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.84889453125, 0.84889453125), (0.631372549019608, 0.83450390625, \n 0.83450390625), (0.635294117647059, 0.8201171875, 0.8201171875), (\n 0.63921568627451, 0.80573046875, 0.80573046875), (0.643137254901961,\n 0.79133984375, 0.79133984375), (0.647058823529412, 0.776953125, \n 0.776953125), (0.650980392156863, 0.76256640625, 0.76256640625), (\n 0.654901960784314, 0.74817578125, 0.74817578125), (\n 0.658823529411765, 0.7337890625, 0.7337890625), (0.662745098039216,\n 0.71940234375, 0.71940234375), (0.666666666666667, 0.70501171875, \n 0.70501171875), (0.670588235294118, 0.690625, 0.690625), (\n 0.674509803921569, 0.67623828125, 0.67623828125), (0.67843137254902,\n 0.66184765625, 0.66184765625), (0.682352941176471, 0.6474609375, \n 0.6474609375), (0.686274509803922, 0.63307421875, 0.63307421875), (\n 0.690196078431373, 0.61868359375, 0.61868359375), (\n 0.694117647058824, 0.604296875, 0.604296875), (0.698039215686274, \n 0.58991015625, 0.58991015625), (0.701960784313725, 0.57551953125, \n 0.57551953125), (0.705882352941177, 0.5611328125, 0.5611328125), (\n 0.709803921568627, 0.54674609375, 0.54674609375), (\n 0.713725490196078, 0.53235546875, 0.53235546875), (\n 0.717647058823529, 0.51796875, 0.51796875), (0.72156862745098, \n 0.50358203125, 0.50358203125), (0.725490196078431, 0.48919140625, \n 0.48919140625), (0.729411764705882, 0.4748046875, 0.4748046875), (\n 0.733333333333333, 0.46041796875, 0.46041796875), (\n 0.737254901960784, 0.44602734375, 0.44602734375), (\n 0.741176470588235, 0.431640625, 0.431640625), (0.745098039215686, \n 0.41725390625, 0.41725390625), (0.749019607843137, 0.40286328125, \n 0.40286328125), (0.752941176470588, 0.3884765625, 0.3884765625), (\n 0.756862745098039, 0.374088671875, 0.374088671875), (\n 0.76078431372549, 0.359700390625, 0.359700390625), (\n 0.764705882352941, 0.3453125, 0.3453125), (0.768627450980392, \n 0.330924609375, 0.330924609375), (0.772549019607843, 0.316536328125,\n 0.316536328125), (0.776470588235294, 0.3021484375, 0.3021484375), (\n 0.780392156862745, 0.287760546875, 0.287760546875), (\n 0.784313725490196, 0.273372265625, 0.273372265625), (\n 0.788235294117647, 0.258984375, 0.258984375), (0.792156862745098, \n 0.244596484375, 0.244596484375), (0.796078431372549, 0.230208203125,\n 0.230208203125), (0.8, 0.2158203125, 0.2158203125), (\n 0.803921568627451, 0.201432421875, 0.201432421875), (\n 0.807843137254902, 0.187044140625, 0.187044140625), (\n 0.811764705882353, 0.17265625, 0.17265625), (0.815686274509804, \n 0.158268359375, 0.158268359375), (0.819607843137255, 0.143880078125,\n 0.143880078125), (0.823529411764706, 0.1294921875, 0.1294921875), (\n 0.827450980392157, 0.115104296875, 0.115104296875), (\n 0.831372549019608, 0.100716015625, 0.100716015625), (\n 0.835294117647059, 0.086328125, 0.086328125), (0.83921568627451, \n 0.071940234375, 0.071940234375), (0.843137254901961, 0.057551953125,\n 0.057551953125), (0.847058823529412, 0.0431640625, 0.0431640625), (\n 0.850980392156863, 0.028776015625, 0.028776015625), (\n 0.854901960784314, 0.01438796875, 0.01438796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0, 0), (\n 0.866666666666667, 0, 0), (0.870588235294118, 0, 0), (\n 0.874509803921569, 0, 0), (0.87843137254902, 0, 0), (\n 0.882352941176471, 0, 0), (0.886274509803922, 0, 0), (\n 0.890196078431373, 0, 0), (0.894117647058824, 0, 0), (\n 0.898039215686275, 0, 0), (0.901960784313726, 0, 0), (\n 0.905882352941176, 0, 0), (0.909803921568627, 0, 0), (\n 0.913725490196078, 0, 0), (0.917647058823529, 0, 0), (\n 0.92156862745098, 0, 0), (0.925490196078431, 0, 0), (\n 0.929411764705882, 0, 0), (0.933333333333333, 0, 0), (\n 0.937254901960784, 0, 0), (0.941176470588235, 0, 0), (\n 0.945098039215686, 0, 0), (0.949019607843137, 0, 0), (\n 0.952941176470588, 0, 0), (0.956862745098039, 0, 0), (\n 0.96078431372549, 0, 0), (0.964705882352941, 0, 0), (\n 0.968627450980392, 0, 0), (0.972549019607843, 0, 0), (\n 0.976470588235294, 0, 0), (0.980392156862745, 0, 0), (\n 0.984313725490196, 0, 0), (0.988235294117647, 0, 0), (\n 0.992156862745098, 0, 0), (0.996078431372549, 0, 0), (1, 0, 0))}\n califa = mcol.LinearSegmentedColormap('califa', cdict)\n vcalifa = mcol.LinearSegmentedColormap('vcalifa', vcdict)\n if option == 'v':\n return vcalifa\n else:\n return califa\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\n<function token>\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\ndef color_map_califa(option='v'):\n cdict = {'red': ((0.0, 0, 0), (0.00392156862745098, 0, 0), (\n 0.00784313725490196, 0, 0), (0.0117647058823529, 0, 0), (\n 0.0156862745098039, 0, 0), (0.0196078431372549, 0, 0), (\n 0.0235294117647059, 0, 0), (0.0274509803921569, 0, 0), (\n 0.0313725490196078, 0, 0), (0.0352941176470588, 0, 0), (\n 0.0392156862745098, 0, 0), (0.0431372549019608, 0, 0), (\n 0.0470588235294118, 0, 0), (0.0509803921568627, 0, 0), (\n 0.0549019607843137, 0, 0), (0.0588235294117647, 0, 0), (\n 0.0627450980392157, 0, 0), (0.0666666666666667, 0, 0), (\n 0.0705882352941176, 0, 0), (0.0745098039215686, 0, 0), (\n 0.0784313725490196, 0, 0), (0.0823529411764706, 0, 0), (\n 0.0862745098039216, 0, 0), (0.0901960784313725, 0, 0), (\n 0.0941176470588235, 0, 0), (0.0980392156862745, 0, 0), (\n 0.101960784313725, 0, 0), (0.105882352941176, 0, 0), (\n 0.109803921568627, 0, 0), (0.113725490196078, 0, 0), (\n 0.117647058823529, 0, 0), (0.12156862745098, 0, 0), (\n 0.125490196078431, 0, 0), (0.129411764705882, 0, 0), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0.019921875, 0.019921875), (0.2, 0.03984375, \n 0.03984375), (0.203921568627451, 0.059765625, 0.059765625), (\n 0.207843137254902, 0.0796875, 0.0796875), (0.211764705882353, \n 0.099609375, 0.099609375), (0.215686274509804, 0.11953125, \n 0.11953125), (0.219607843137255, 0.139453125, 0.139453125), (\n 0.223529411764706, 0.159375, 0.159375), (0.227450980392157, \n 0.179296875, 0.179296875), (0.231372549019608, 0.19921875, \n 0.19921875), (0.235294117647059, 0.219140625, 0.219140625), (\n 0.23921568627451, 0.2390625, 0.2390625), (0.243137254901961, \n 0.258984375, 0.258984375), (0.247058823529412, 0.27890625, \n 0.27890625), (0.250980392156863, 0.298828125, 0.298828125), (\n 0.254901960784314, 0.31875, 0.31875), (0.258823529411765, \n 0.338671875, 0.338671875), (0.262745098039216, 0.35859375, \n 0.35859375), (0.266666666666667, 0.378515625, 0.378515625), (\n 0.270588235294118, 0.3984375, 0.3984375), (0.274509803921569, \n 0.418359375, 0.418359375), (0.27843137254902, 0.43828125, \n 0.43828125), (0.282352941176471, 0.458203125, 0.458203125), (\n 0.286274509803922, 0.478125, 0.478125), (0.290196078431373, \n 0.498046875, 0.498046875), (0.294117647058824, 0.51796875, \n 0.51796875), (0.298039215686275, 0.537890625, 0.537890625), (\n 0.301960784313725, 0.5578125, 0.5578125), (0.305882352941176, \n 0.577734375, 0.577734375), (0.309803921568627, 0.59765625, \n 0.59765625), (0.313725490196078, 0.617578125, 0.617578125), (\n 0.317647058823529, 0.6375, 0.6375), (0.32156862745098, 0.657421875,\n 0.657421875), (0.325490196078431, 0.67734375, 0.67734375), (\n 0.329411764705882, 0.697265625, 0.697265625), (0.333333333333333, \n 0.7171875, 0.7171875), (0.337254901960784, 0.737109375, 0.737109375\n ), (0.341176470588235, 0.75703125, 0.75703125), (0.345098039215686,\n 0.776953125, 0.776953125), (0.349019607843137, 0.796875, 0.796875),\n (0.352941176470588, 0.816796875, 0.816796875), (0.356862745098039, \n 0.83671875, 0.83671875), (0.36078431372549, 0.856640625, \n 0.856640625), (0.364705882352941, 0.8765625, 0.8765625), (\n 0.368627450980392, 0.896484375, 0.896484375), (0.372549019607843, \n 0.91640625, 0.91640625), (0.376470588235294, 0.936328125, \n 0.936328125), (0.380392156862745, 0.95625, 0.95625), (\n 0.384313725490196, 0.976171875, 0.976171875), (0.388235294117647, \n 0.99609375, 0.99609375), (0.392156862745098, 0.99609375, 0.99609375\n ), (0.396078431372549, 0.99609375, 0.99609375), (0.4, 0.99609375, \n 0.99609375), (0.403921568627451, 0.99609375, 0.99609375), (\n 0.407843137254902, 0.99609375, 0.99609375), (0.411764705882353, \n 0.99609375, 0.99609375), (0.415686274509804, 0.99609375, 0.99609375\n ), (0.419607843137255, 0.99609375, 0.99609375), (0.423529411764706,\n 0.99609375, 0.99609375), (0.427450980392157, 0.99609375, 0.99609375\n ), (0.431372549019608, 0.99609375, 0.99609375), (0.435294117647059,\n 0.99609375, 0.99609375), (0.43921568627451, 0.99609375, 0.99609375),\n (0.443137254901961, 0.99609375, 0.99609375), (0.447058823529412, \n 0.99609375, 0.99609375), (0.450980392156863, 0.99609375, 0.99609375\n ), (0.454901960784314, 0.99609375, 0.99609375), (0.458823529411765,\n 0.99609375, 0.99609375), (0.462745098039216, 0.99609375, 0.99609375\n ), (0.466666666666667, 0.99609375, 0.99609375), (0.470588235294118,\n 0.99609375, 0.99609375), (0.474509803921569, 0.99609375, 0.99609375\n ), (0.47843137254902, 0.99609375, 0.99609375), (0.482352941176471, \n 0.99609375, 0.99609375), (0.486274509803922, 0.99609375, 0.99609375\n ), (0.490196078431373, 0.99609375, 0.99609375), (0.494117647058824,\n 0.99609375, 0.99609375), (0.498039215686275, 0.99609375, 0.99609375\n ), (0.501960784313725, 0.99609375, 0.99609375), (0.505882352941176,\n 0.99609375, 0.99609375), (0.509803921568627, 0.99609375, 0.99609375\n ), (0.513725490196078, 0.99609375, 0.99609375), (0.517647058823529,\n 0.99609375, 0.99609375), (0.52156862745098, 0.99609375, 0.99609375),\n (0.525490196078431, 0.99609375, 0.99609375), (0.529411764705882, \n 0.99609375, 0.99609375), (0.533333333333333, 0.99609375, 0.99609375\n ), (0.537254901960784, 0.99609375, 0.99609375), (0.541176470588235,\n 0.99609375, 0.99609375), (0.545098039215686, 0.99609375, 0.99609375\n ), (0.549019607843137, 0.99609375, 0.99609375), (0.552941176470588,\n 0.99609375, 0.99609375), (0.556862745098039, 0.99609375, 0.99609375\n ), (0.56078431372549, 0.99609375, 0.99609375), (0.564705882352941, \n 0.99609375, 0.99609375), (0.568627450980392, 0.99609375, 0.99609375\n ), (0.572549019607843, 0.99609375, 0.99609375), (0.576470588235294,\n 0.99609375, 0.99609375), (0.580392156862745, 0.99609375, 0.99609375\n ), (0.584313725490196, 0.99609375, 0.99609375), (0.588235294117647,\n 0.98046875, 0.98046875), (0.592156862745098, 0.96484375, 0.96484375\n ), (0.596078431372549, 0.94921875, 0.94921875), (0.6, 0.93359375, \n 0.93359375), (0.603921568627451, 0.91796875, 0.91796875), (\n 0.607843137254902, 0.90234375, 0.90234375), (0.611764705882353, \n 0.88671875, 0.88671875), (0.615686274509804, 0.87109375, 0.87109375\n ), (0.619607843137255, 0.85546875, 0.85546875), (0.623529411764706,\n 0.83984375, 0.83984375), (0.627450980392157, 0.82421875, 0.82421875\n ), (0.631372549019608, 0.80859375, 0.80859375), (0.635294117647059,\n 0.79296875, 0.79296875), (0.63921568627451, 0.77734375, 0.77734375),\n (0.643137254901961, 0.76171875, 0.76171875), (0.647058823529412, \n 0.74609375, 0.74609375), (0.650980392156863, 0.73046875, 0.73046875\n ), (0.654901960784314, 0.71484375, 0.71484375), (0.658823529411765,\n 0.69921875, 0.69921875), (0.662745098039216, 0.68359375, 0.68359375\n ), (0.666666666666667, 0.66796875, 0.66796875), (0.670588235294118,\n 0.65234375, 0.65234375), (0.674509803921569, 0.63671875, 0.63671875\n ), (0.67843137254902, 0.62109375, 0.62109375), (0.682352941176471, \n 0.60546875, 0.60546875), (0.686274509803922, 0.58984375, 0.58984375\n ), (0.690196078431373, 0.57421875, 0.57421875), (0.694117647058824,\n 0.55859375, 0.55859375), (0.698039215686274, 0.54296875, 0.54296875\n ), (0.701960784313725, 0.52734375, 0.52734375), (0.705882352941177,\n 0.51171875, 0.51171875), (0.709803921568627, 0.49609375, 0.49609375\n ), (0.713725490196078, 0.48046875, 0.48046875), (0.717647058823529,\n 0.46484375, 0.46484375), (0.72156862745098, 0.44921875, 0.44921875),\n (0.725490196078431, 0.43359375, 0.43359375), (0.729411764705882, \n 0.41796875, 0.41796875), (0.733333333333333, 0.40234375, 0.40234375\n ), (0.737254901960784, 0.38671875, 0.38671875), (0.741176470588235,\n 0.37109375, 0.37109375), (0.745098039215686, 0.35546875, 0.35546875\n ), (0.749019607843137, 0.33984375, 0.33984375), (0.752941176470588,\n 0.32421875, 0.32421875), (0.756862745098039, 0.30859375, 0.30859375\n ), (0.76078431372549, 0.29296875, 0.29296875), (0.764705882352941, \n 0.27734375, 0.27734375), (0.768627450980392, 0.26171875, 0.26171875\n ), (0.772549019607843, 0.24609375, 0.24609375), (0.776470588235294,\n 0.23046875, 0.23046875), (0.780392156862745, 0.21484375, 0.21484375\n ), (0.784313725490196, 0.22663359375, 0.22663359375), (\n 0.788235294117647, 0.2384234375, 0.2384234375), (0.792156862745098,\n 0.250212890625, 0.250212890625), (0.796078431372549, 0.262002734375,\n 0.262002734375), (0.8, 0.273792578125, 0.273792578125), (\n 0.803921568627451, 0.285582421875, 0.285582421875), (\n 0.807843137254902, 0.297372265625, 0.297372265625), (\n 0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125)), 'green': ((0.0, \n 0.02984375, 0.02984375), (0.00392156862745098, 0.02984375, \n 0.02984375), (0.00784313725490196, 0.044765625, 0.044765625), (\n 0.0117647058823529, 0.0596875, 0.0596875), (0.0156862745098039, \n 0.074609375, 0.074609375), (0.0196078431372549, 0.08953125, \n 0.08953125), (0.0235294117647059, 0.104453125, 0.104453125), (\n 0.0274509803921569, 0.119375, 0.119375), (0.0313725490196078, \n 0.134296875, 0.134296875), (0.0352941176470588, 0.14921875, \n 0.14921875), (0.0392156862745098, 0.164140625, 0.164140625), (\n 0.0431372549019608, 0.1790625, 0.1790625), (0.0470588235294118, \n 0.193984375, 0.193984375), (0.0509803921568627, 0.20890625, \n 0.20890625), (0.0549019607843137, 0.223828125, 0.223828125), (\n 0.0588235294117647, 0.23875, 0.23875), (0.0627450980392157, \n 0.253671875, 0.253671875), (0.0666666666666667, 0.26859375, \n 0.26859375), (0.0705882352941176, 0.283515625, 0.283515625), (\n 0.0745098039215686, 0.2984375, 0.2984375), (0.0784313725490196, \n 0.313359375, 0.313359375), (0.0823529411764706, 0.32828125, \n 0.32828125), (0.0862745098039216, 0.343203125, 0.343203125), (\n 0.0901960784313725, 0.358125, 0.358125), (0.0941176470588235, \n 0.373046875, 0.373046875), (0.0980392156862745, 0.38796875, \n 0.38796875), (0.101960784313725, 0.402890625, 0.402890625), (\n 0.105882352941176, 0.4178125, 0.4178125), (0.109803921568627, \n 0.432734375, 0.432734375), (0.113725490196078, 0.44765625, \n 0.44765625), (0.117647058823529, 0.462578125, 0.462578125), (\n 0.12156862745098, 0.4775, 0.4775), (0.125490196078431, 0.492421875,\n 0.492421875), (0.129411764705882, 0.50734375, 0.50734375), (\n 0.133333333333333, 0.522265625, 0.522265625), (0.137254901960784, \n 0.5371875, 0.5371875), (0.141176470588235, 0.552109375, 0.552109375\n ), (0.145098039215686, 0.56703125, 0.56703125), (0.149019607843137,\n 0.581953125, 0.581953125), (0.152941176470588, 0.596875, 0.596875),\n (0.156862745098039, 0.611796875, 0.611796875), (0.16078431372549, \n 0.62671875, 0.62671875), (0.164705882352941, 0.641640625, \n 0.641640625), (0.168627450980392, 0.6565625, 0.6565625), (\n 0.172549019607843, 0.671484375, 0.671484375), (0.176470588235294, \n 0.68640625, 0.68640625), (0.180392156862745, 0.701328125, \n 0.701328125), (0.184313725490196, 0.71625, 0.71625), (\n 0.188235294117647, 0.731171875, 0.731171875), (0.192156862745098, \n 0.74609375, 0.74609375), (0.196078431372549, 0.731171875, \n 0.731171875), (0.2, 0.71625, 0.71625), (0.203921568627451, \n 0.701328125, 0.701328125), (0.207843137254902, 0.68640625, \n 0.68640625), (0.211764705882353, 0.671484375, 0.671484375), (\n 0.215686274509804, 0.6565625, 0.6565625), (0.219607843137255, \n 0.641640625, 0.641640625), (0.223529411764706, 0.62671875, \n 0.62671875), (0.227450980392157, 0.611796875, 0.611796875), (\n 0.231372549019608, 0.596875, 0.596875), (0.235294117647059, \n 0.581953125, 0.581953125), (0.23921568627451, 0.56703125, \n 0.56703125), (0.243137254901961, 0.552109375, 0.552109375), (\n 0.247058823529412, 0.5371875, 0.5371875), (0.250980392156863, \n 0.522265625, 0.522265625), (0.254901960784314, 0.50734375, \n 0.50734375), (0.258823529411765, 0.492421875, 0.492421875), (\n 0.262745098039216, 0.4775, 0.4775), (0.266666666666667, 0.462578125,\n 0.462578125), (0.270588235294118, 0.44765625, 0.44765625), (\n 0.274509803921569, 0.432734375, 0.432734375), (0.27843137254902, \n 0.4178125, 0.4178125), (0.282352941176471, 0.402890625, 0.402890625\n ), (0.286274509803922, 0.38796875, 0.38796875), (0.290196078431373,\n 0.373046875, 0.373046875), (0.294117647058824, 0.358125, 0.358125),\n (0.298039215686275, 0.343203125, 0.343203125), (0.301960784313725, \n 0.32828125, 0.32828125), (0.305882352941176, 0.313359375, \n 0.313359375), (0.309803921568627, 0.2984375, 0.2984375), (\n 0.313725490196078, 0.283515625, 0.283515625), (0.317647058823529, \n 0.26859375, 0.26859375), (0.32156862745098, 0.253671875, \n 0.253671875), (0.325490196078431, 0.23875, 0.23875), (\n 0.329411764705882, 0.223828125, 0.223828125), (0.333333333333333, \n 0.20890625, 0.20890625), (0.337254901960784, 0.193984375, \n 0.193984375), (0.341176470588235, 0.1790625, 0.1790625), (\n 0.345098039215686, 0.164140625, 0.164140625), (0.349019607843137, \n 0.14921875, 0.14921875), (0.352941176470588, 0.134296875, \n 0.134296875), (0.356862745098039, 0.119375, 0.119375), (\n 0.36078431372549, 0.104453125, 0.104453125), (0.364705882352941, \n 0.08953125, 0.08953125), (0.368627450980392, 0.074609375, \n 0.074609375), (0.372549019607843, 0.0596875, 0.0596875), (\n 0.376470588235294, 0.044765625, 0.044765625), (0.380392156862745, \n 0.0298437890625, 0.0298437890625), (0.384313725490196, 0.014921875,\n 0.014921875), (0.388235294117647, 0, 0), (0.392156862745098, \n 0.012890625, 0.012890625), (0.396078431372549, 0.02578125, \n 0.02578125), (0.4, 0.038671875, 0.038671875), (0.403921568627451, \n 0.0515625, 0.0515625), (0.407843137254902, 0.064453125, 0.064453125\n ), (0.411764705882353, 0.07734375, 0.07734375), (0.415686274509804,\n 0.090234375, 0.090234375), (0.419607843137255, 0.103125, 0.103125),\n (0.423529411764706, 0.116015625, 0.116015625), (0.427450980392157, \n 0.12890625, 0.12890625), (0.431372549019608, 0.141796875, \n 0.141796875), (0.435294117647059, 0.1546875, 0.1546875), (\n 0.43921568627451, 0.167578125, 0.167578125), (0.443137254901961, \n 0.18046875, 0.18046875), (0.447058823529412, 0.193359375, \n 0.193359375), (0.450980392156863, 0.20625, 0.20625), (\n 0.454901960784314, 0.219140625, 0.219140625), (0.458823529411765, \n 0.23203125, 0.23203125), (0.462745098039216, 0.244921875, \n 0.244921875), (0.466666666666667, 0.2578125, 0.2578125), (\n 0.470588235294118, 0.270703125, 0.270703125), (0.474509803921569, \n 0.28359375, 0.28359375), (0.47843137254902, 0.296484375, \n 0.296484375), (0.482352941176471, 0.309375, 0.309375), (\n 0.486274509803922, 0.322265625, 0.322265625), (0.490196078431373, \n 0.33515625, 0.33515625), (0.494117647058824, 0.348046875, \n 0.348046875), (0.498039215686275, 0.3609375, 0.3609375), (\n 0.501960784313725, 0.373828125, 0.373828125), (0.505882352941176, \n 0.38671875, 0.38671875), (0.509803921568627, 0.399609375, \n 0.399609375), (0.513725490196078, 0.4125, 0.4125), (\n 0.517647058823529, 0.425390625, 0.425390625), (0.52156862745098, \n 0.43828125, 0.43828125), (0.525490196078431, 0.451171875, \n 0.451171875), (0.529411764705882, 0.4640625, 0.4640625), (\n 0.533333333333333, 0.476953125, 0.476953125), (0.537254901960784, \n 0.48984375, 0.48984375), (0.541176470588235, 0.502734375, \n 0.502734375), (0.545098039215686, 0.515625, 0.515625), (\n 0.549019607843137, 0.528515625, 0.528515625), (0.552941176470588, \n 0.54140625, 0.54140625), (0.556862745098039, 0.554296875, \n 0.554296875), (0.56078431372549, 0.5671875, 0.5671875), (\n 0.564705882352941, 0.580078125, 0.580078125), (0.568627450980392, \n 0.59296875, 0.59296875), (0.572549019607843, 0.605859375, \n 0.605859375), (0.576470588235294, 0.61875, 0.61875), (\n 0.580392156862745, 0.631640625, 0.631640625), (0.584313725490196, \n 0.64453125, 0.64453125), (0.588235294117647, 0.6359375, 0.6359375),\n (0.592156862745098, 0.62734375, 0.62734375), (0.596078431372549, \n 0.61875, 0.61875), (0.6, 0.61015625, 0.61015625), (\n 0.603921568627451, 0.6015625, 0.6015625), (0.607843137254902, \n 0.59296875, 0.59296875), (0.611764705882353, 0.584375, 0.584375), (\n 0.615686274509804, 0.57578125, 0.57578125), (0.619607843137255, \n 0.5671875, 0.5671875), (0.623529411764706, 0.55859375, 0.55859375),\n (0.627450980392157, 0.55, 0.55), (0.631372549019608, 0.54140625, \n 0.54140625), (0.635294117647059, 0.5328125, 0.5328125), (\n 0.63921568627451, 0.52421875, 0.52421875), (0.643137254901961, \n 0.515625, 0.515625), (0.647058823529412, 0.50703125, 0.50703125), (\n 0.650980392156863, 0.4984375, 0.4984375), (0.654901960784314, \n 0.48984375, 0.48984375), (0.658823529411765, 0.48125, 0.48125), (\n 0.662745098039216, 0.47265625, 0.47265625), (0.666666666666667, \n 0.4640625, 0.4640625), (0.670588235294118, 0.45546875, 0.45546875),\n (0.674509803921569, 0.446875, 0.446875), (0.67843137254902, \n 0.43828125, 0.43828125), (0.682352941176471, 0.4296875, 0.4296875),\n (0.686274509803922, 0.42109375, 0.42109375), (0.690196078431373, \n 0.4125, 0.4125), (0.694117647058824, 0.40390625, 0.40390625), (\n 0.698039215686274, 0.3953125, 0.3953125), (0.701960784313725, \n 0.38671875, 0.38671875), (0.705882352941177, 0.378125, 0.378125), (\n 0.709803921568627, 0.36953125, 0.36953125), (0.713725490196078, \n 0.3609375, 0.3609375), (0.717647058823529, 0.35234375, 0.35234375),\n (0.72156862745098, 0.34375, 0.34375), (0.725490196078431, \n 0.33515625, 0.33515625), (0.729411764705882, 0.3265625, 0.3265625),\n (0.733333333333333, 0.31796875, 0.31796875), (0.737254901960784, \n 0.309375, 0.309375), (0.741176470588235, 0.30078125, 0.30078125), (\n 0.745098039215686, 0.2921875, 0.2921875), (0.749019607843137, \n 0.28359375, 0.28359375), (0.752941176470588, 0.275, 0.275), (\n 0.756862745098039, 0.26640625, 0.26640625), (0.76078431372549, \n 0.2578125, 0.2578125), (0.764705882352941, 0.24921875, 0.24921875),\n (0.768627450980392, 0.240625, 0.240625), (0.772549019607843, \n 0.23203125, 0.23203125), (0.776470588235294, 0.2234375, 0.2234375),\n (0.780392156862745, 0.21484375, 0.21484375), (0.784313725490196, \n 0.222301171875, 0.222301171875), (0.788235294117647, 0.22975859375,\n 0.22975859375), (0.792156862745098, 0.237216015625, 0.237216015625),\n (0.796078431372549, 0.2446734375, 0.2446734375), (0.8, \n 0.252130859375, 0.252130859375), (0.803921568627451, 0.259587890625,\n 0.259587890625), (0.807843137254902, 0.2670453125, 0.2670453125), (\n 0.811764705882353, 0.274502734375, 0.274502734375), (\n 0.815686274509804, 0.28196015625, 0.28196015625), (\n 0.819607843137255, 0.289417578125, 0.289417578125), (\n 0.823529411764706, 0.296875, 0.296875), (0.827450980392157, \n 0.304332421875, 0.304332421875), (0.831372549019608, 0.31178984375,\n 0.31178984375), (0.835294117647059, 0.319247265625, 0.319247265625),\n (0.83921568627451, 0.3267046875, 0.3267046875), (0.843137254901961,\n 0.334162109375, 0.334162109375), (0.847058823529412, 0.34161953125,\n 0.34161953125), (0.850980392156863, 0.3490765625, 0.3490765625), (\n 0.854901960784314, 0.356533984375, 0.356533984375), (\n 0.858823529411765, 0.36399140625, 0.36399140625), (\n 0.862745098039216, 0.371448828125, 0.371448828125), (\n 0.866666666666667, 0.37890625, 0.37890625), (0.870588235294118, \n 0.386363671875, 0.386363671875), (0.874509803921569, 0.3938203125, \n 0.3938203125), (0.87843137254902, 0.40127734375, 0.40127734375), (\n 0.882352941176471, 0.408734375, 0.408734375), (0.886274509803922, \n 0.41619140625, 0.41619140625), (0.890196078431373, 0.42365234375, \n 0.42365234375), (0.894117647058824, 0.431109375, 0.431109375), (\n 0.898039215686275, 0.43856640625, 0.43856640625), (\n 0.901960784313726, 0.4460234375, 0.4460234375), (0.905882352941176,\n 0.45348046875, 0.45348046875), (0.909803921568627, 0.4609375, \n 0.4609375), (0.913725490196078, 0.46839453125, 0.46839453125), (\n 0.917647058823529, 0.4758515625, 0.4758515625), (0.92156862745098, \n 0.48330859375, 0.48330859375), (0.925490196078431, 0.490765625, \n 0.490765625), (0.929411764705882, 0.49822265625, 0.49822265625), (\n 0.933333333333333, 0.50568359375, 0.50568359375), (\n 0.937254901960784, 0.513140625, 0.513140625), (0.941176470588235, \n 0.52059765625, 0.52059765625), (0.945098039215686, 0.5280546875, \n 0.5280546875), (0.949019607843137, 0.53551171875, 0.53551171875), (\n 0.952941176470588, 0.54296875, 0.54296875), (0.956862745098039, \n 0.55042578125, 0.55042578125), (0.96078431372549, 0.5578828125, \n 0.5578828125), (0.964705882352941, 0.56533984375, 0.56533984375), (\n 0.968627450980392, 0.572796875, 0.572796875), (0.972549019607843, \n 0.58025390625, 0.58025390625), (0.976470588235294, 0.58771484375, \n 0.58771484375), (0.980392156862745, 0.595171875, 0.595171875), (\n 0.984313725490196, 0.60262890625, 0.60262890625), (\n 0.988235294117647, 0.6100859375, 0.6100859375), (0.992156862745098,\n 0.61754296875, 0.61754296875), (0.996078431372549, 0.625, 0.625), (\n 1.0, 0.625, 0.625)), 'blue': ((0.0, 0.51984375, 0.51984375), (\n 0.00392156862745098, 0.51984375, 0.51984375), (0.00784313725490196,\n 0.529765625, 0.529765625), (0.0117647058823529, 0.5396875, \n 0.5396875), (0.0156862745098039, 0.549609375, 0.549609375), (\n 0.0196078431372549, 0.55953125, 0.55953125), (0.0235294117647059, \n 0.569453125, 0.569453125), (0.0274509803921569, 0.579375, 0.579375),\n (0.0313725490196078, 0.589296875, 0.589296875), (0.0352941176470588,\n 0.59921875, 0.59921875), (0.0392156862745098, 0.609140625, \n 0.609140625), (0.0431372549019608, 0.6190625, 0.6190625), (\n 0.0470588235294118, 0.628984375, 0.628984375), (0.0509803921568627,\n 0.63890625, 0.63890625), (0.0549019607843137, 0.648828125, \n 0.648828125), (0.0588235294117647, 0.65875, 0.65875), (\n 0.0627450980392157, 0.668671875, 0.668671875), (0.0666666666666667,\n 0.67859375, 0.67859375), (0.0705882352941176, 0.688515625, \n 0.688515625), (0.0745098039215686, 0.6984375, 0.6984375), (\n 0.0784313725490196, 0.708359375, 0.708359375), (0.0823529411764706,\n 0.71828125, 0.71828125), (0.0862745098039216, 0.728203125, \n 0.728203125), (0.0901960784313725, 0.738125, 0.738125), (\n 0.0941176470588235, 0.748046875, 0.748046875), (0.0980392156862745,\n 0.75796875, 0.75796875), (0.101960784313725, 0.767890625, \n 0.767890625), (0.105882352941176, 0.7778125, 0.7778125), (\n 0.109803921568627, 0.787734375, 0.787734375), (0.113725490196078, \n 0.79765625, 0.79765625), (0.117647058823529, 0.807578125, \n 0.807578125), (0.12156862745098, 0.8175, 0.8175), (\n 0.125490196078431, 0.827421875, 0.827421875), (0.129411764705882, \n 0.83734375, 0.83734375), (0.133333333333333, 0.847265625, \n 0.847265625), (0.137254901960784, 0.8571875, 0.8571875), (\n 0.141176470588235, 0.867109375, 0.867109375), (0.145098039215686, \n 0.87703125, 0.87703125), (0.149019607843137, 0.886953125, \n 0.886953125), (0.152941176470588, 0.896875, 0.896875), (\n 0.156862745098039, 0.906796875, 0.906796875), (0.16078431372549, \n 0.91671875, 0.91671875), (0.164705882352941, 0.926640625, \n 0.926640625), (0.168627450980392, 0.9365625, 0.9365625), (\n 0.172549019607843, 0.946484375, 0.946484375), (0.176470588235294, \n 0.95640625, 0.95640625), (0.180392156862745, 0.966328125, \n 0.966328125), (0.184313725490196, 0.97625, 0.97625), (\n 0.188235294117647, 0.986171875, 0.986171875), (0.192156862745098, \n 0.99609375, 0.99609375), (0.196078431372549, 0.976171875, \n 0.976171875), (0.2, 0.95625, 0.95625), (0.203921568627451, \n 0.936328125, 0.936328125), (0.207843137254902, 0.91640625, \n 0.91640625), (0.211764705882353, 0.896484375, 0.896484375), (\n 0.215686274509804, 0.8765625, 0.8765625), (0.219607843137255, \n 0.856640625, 0.856640625), (0.223529411764706, 0.83671875, \n 0.83671875), (0.227450980392157, 0.816796875, 0.816796875), (\n 0.231372549019608, 0.796875, 0.796875), (0.235294117647059, \n 0.776953125, 0.776953125), (0.23921568627451, 0.75703125, \n 0.75703125), (0.243137254901961, 0.737109375, 0.737109375), (\n 0.247058823529412, 0.7171875, 0.7171875), (0.250980392156863, \n 0.697265625, 0.697265625), (0.254901960784314, 0.67734375, \n 0.67734375), (0.258823529411765, 0.657421875, 0.657421875), (\n 0.262745098039216, 0.6375, 0.6375), (0.266666666666667, 0.617578125,\n 0.617578125), (0.270588235294118, 0.59765625, 0.59765625), (\n 0.274509803921569, 0.577734375, 0.577734375), (0.27843137254902, \n 0.5578125, 0.5578125), (0.282352941176471, 0.537890625, 0.537890625\n ), (0.286274509803922, 0.51796875, 0.51796875), (0.290196078431373,\n 0.498046875, 0.498046875), (0.294117647058824, 0.478125, 0.478125),\n (0.298039215686275, 0.458203125, 0.458203125), (0.301960784313725, \n 0.43828125, 0.43828125), (0.305882352941176, 0.418359375, \n 0.418359375), (0.309803921568627, 0.3984375, 0.3984375), (\n 0.313725490196078, 0.378515625, 0.378515625), (0.317647058823529, \n 0.35859375, 0.35859375), (0.32156862745098, 0.338671875, \n 0.338671875), (0.325490196078431, 0.31875, 0.31875), (\n 0.329411764705882, 0.298828125, 0.298828125), (0.333333333333333, \n 0.27890625, 0.27890625), (0.337254901960784, 0.258984375, \n 0.258984375), (0.341176470588235, 0.2390625, 0.2390625), (\n 0.345098039215686, 0.219140625, 0.219140625), (0.349019607843137, \n 0.19921875, 0.19921875), (0.352941176470588, 0.179296875, \n 0.179296875), (0.356862745098039, 0.159375, 0.159375), (\n 0.36078431372549, 0.139453125, 0.139453125), (0.364705882352941, \n 0.11953125, 0.11953125), (0.368627450980392, 0.099609375, \n 0.099609375), (0.372549019607843, 0.0796875, 0.0796875), (\n 0.376470588235294, 0.059765625, 0.059765625), (0.380392156862745, \n 0.03984375, 0.03984375), (0.384313725490196, 0.019921875, \n 0.019921875), (0.388235294117647, 0, 0), (0.392156862745098, 0, 0),\n (0.396078431372549, 0, 0), (0.4, 0, 0), (0.403921568627451, 0, 0),\n (0.407843137254902, 0, 0), (0.411764705882353, 0, 0), (\n 0.415686274509804, 0, 0), (0.419607843137255, 0, 0), (\n 0.423529411764706, 0, 0), (0.427450980392157, 0, 0), (\n 0.431372549019608, 0, 0), (0.435294117647059, 0, 0), (\n 0.43921568627451, 0, 0), (0.443137254901961, 0, 0), (\n 0.447058823529412, 0, 0), (0.450980392156863, 0, 0), (\n 0.454901960784314, 0, 0), (0.458823529411765, 0, 0), (\n 0.462745098039216, 0, 0), (0.466666666666667, 0, 0), (\n 0.470588235294118, 0, 0), (0.474509803921569, 0, 0), (\n 0.47843137254902, 0, 0), (0.482352941176471, 0, 0), (\n 0.486274509803922, 0, 0), (0.490196078431373, 0, 0), (\n 0.494117647058824, 0, 0), (0.498039215686275, 0, 0), (\n 0.501960784313725, 0, 0), (0.505882352941176, 0, 0), (\n 0.509803921568627, 0, 0), (0.513725490196078, 0, 0), (\n 0.517647058823529, 0, 0), (0.52156862745098, 0, 0), (\n 0.525490196078431, 0, 0), (0.529411764705882, 0, 0), (\n 0.533333333333333, 0, 0), (0.537254901960784, 0, 0), (\n 0.541176470588235, 0, 0), (0.545098039215686, 0, 0), (\n 0.549019607843137, 0, 0), (0.552941176470588, 0, 0), (\n 0.556862745098039, 0, 0), (0.56078431372549, 0, 0), (\n 0.564705882352941, 0, 0), (0.568627450980392, 0, 0), (\n 0.572549019607843, 0, 0), (0.576470588235294, 0, 0), (\n 0.580392156862745, 0, 0), (0.584313725490196, 0, 0), (\n 0.588235294117647, 0.004296875, 0.004296875), (0.592156862745098, \n 0.00859375, 0.00859375), (0.596078431372549, 0.012890625, \n 0.012890625), (0.6, 0.0171875, 0.0171875), (0.603921568627451, \n 0.021484375, 0.021484375), (0.607843137254902, 0.02578125, \n 0.02578125), (0.611764705882353, 0.030078125, 0.030078125), (\n 0.615686274509804, 0.034375, 0.034375), (0.619607843137255, \n 0.038671875, 0.038671875), (0.623529411764706, 0.04296875, \n 0.04296875), (0.627450980392157, 0.047265625, 0.047265625), (\n 0.631372549019608, 0.0515625, 0.0515625), (0.635294117647059, \n 0.055859375, 0.055859375), (0.63921568627451, 0.06015625, \n 0.06015625), (0.643137254901961, 0.064453125, 0.064453125), (\n 0.647058823529412, 0.06875, 0.06875), (0.650980392156863, \n 0.073046875, 0.073046875), (0.654901960784314, 0.07734375, \n 0.07734375), (0.658823529411765, 0.081640625, 0.081640625), (\n 0.662745098039216, 0.0859375, 0.0859375), (0.666666666666667, \n 0.090234375, 0.090234375), (0.670588235294118, 0.09453125, \n 0.09453125), (0.674509803921569, 0.098828125, 0.098828125), (\n 0.67843137254902, 0.103125, 0.103125), (0.682352941176471, \n 0.107421875, 0.107421875), (0.686274509803922, 0.11171875, \n 0.11171875), (0.690196078431373, 0.116015625, 0.116015625), (\n 0.694117647058824, 0.1203125, 0.1203125), (0.698039215686274, \n 0.124609375, 0.124609375), (0.701960784313725, 0.12890625, \n 0.12890625), (0.705882352941177, 0.133203125, 0.133203125), (\n 0.709803921568627, 0.1375, 0.1375), (0.713725490196078, 0.141796875,\n 0.141796875), (0.717647058823529, 0.14609375, 0.14609375), (\n 0.72156862745098, 0.150390625, 0.150390625), (0.725490196078431, \n 0.1546875, 0.1546875), (0.729411764705882, 0.158984375, 0.158984375\n ), (0.733333333333333, 0.16328125, 0.16328125), (0.737254901960784,\n 0.167578125, 0.167578125), (0.741176470588235, 0.171875, 0.171875),\n (0.745098039215686, 0.176171875, 0.176171875), (0.749019607843137, \n 0.18046875, 0.18046875), (0.752941176470588, 0.184765625, \n 0.184765625), (0.756862745098039, 0.1890625, 0.1890625), (\n 0.76078431372549, 0.193359375, 0.193359375), (0.764705882352941, \n 0.19765625, 0.19765625), (0.768627450980392, 0.201953125, \n 0.201953125), (0.772549019607843, 0.20625, 0.20625), (\n 0.776470588235294, 0.210546875, 0.210546875), (0.780392156862745, \n 0.21484375, 0.21484375), (0.784313725490196, 0.22663359375, \n 0.22663359375), (0.788235294117647, 0.2384234375, 0.2384234375), (\n 0.792156862745098, 0.250212890625, 0.250212890625), (\n 0.796078431372549, 0.262002734375, 0.262002734375), (0.8, \n 0.273792578125, 0.273792578125), (0.803921568627451, 0.285582421875,\n 0.285582421875), (0.807843137254902, 0.297372265625, 0.297372265625\n ), (0.811764705882353, 0.309162109375, 0.309162109375), (\n 0.815686274509804, 0.3209515625, 0.3209515625), (0.819607843137255,\n 0.33274140625, 0.33274140625), (0.823529411764706, 0.34453125, \n 0.34453125), (0.827450980392157, 0.35632109375, 0.35632109375), (\n 0.831372549019608, 0.3681109375, 0.3681109375), (0.835294117647059,\n 0.379900390625, 0.379900390625), (0.83921568627451, 0.39169140625, \n 0.39169140625), (0.843137254901961, 0.40348046875, 0.40348046875),\n (0.847058823529412, 0.41526953125, 0.41526953125), (\n 0.850980392156863, 0.42705859375, 0.42705859375), (\n 0.854901960784314, 0.43884765625, 0.43884765625), (\n 0.858823529411765, 0.450640625, 0.450640625), (0.862745098039216, \n 0.4624296875, 0.4624296875), (0.866666666666667, 0.47421875, \n 0.47421875), (0.870588235294118, 0.4860078125, 0.4860078125), (\n 0.874509803921569, 0.497796875, 0.497796875), (0.87843137254902, \n 0.50958984375, 0.50958984375), (0.882352941176471, 0.52137890625, \n 0.52137890625), (0.886274509803922, 0.53316796875, 0.53316796875),\n (0.890196078431373, 0.54495703125, 0.54495703125), (\n 0.894117647058824, 0.55674609375, 0.55674609375), (\n 0.898039215686275, 0.56853515625, 0.56853515625), (\n 0.901960784313726, 0.580328125, 0.580328125), (0.905882352941176, \n 0.5921171875, 0.5921171875), (0.909803921568627, 0.60390625, \n 0.60390625), (0.913725490196078, 0.6156953125, 0.6156953125), (\n 0.917647058823529, 0.627484375, 0.627484375), (0.92156862745098, \n 0.63927734375, 0.63927734375), (0.925490196078431, 0.65106640625, \n 0.65106640625), (0.929411764705882, 0.66285546875, 0.66285546875),\n (0.933333333333333, 0.67464453125, 0.67464453125), (\n 0.937254901960784, 0.68643359375, 0.68643359375), (\n 0.941176470588235, 0.69822265625, 0.69822265625), (\n 0.945098039215686, 0.710015625, 0.710015625), (0.949019607843137, \n 0.7218046875, 0.7218046875), (0.952941176470588, 0.73359375, \n 0.73359375), (0.956862745098039, 0.7453828125, 0.7453828125), (\n 0.96078431372549, 0.757171875, 0.757171875), (0.964705882352941, \n 0.76896484375, 0.76896484375), (0.968627450980392, 0.78075390625, \n 0.78075390625), (0.972549019607843, 0.79254296875, 0.79254296875),\n (0.976470588235294, 0.80433203125, 0.80433203125), (\n 0.980392156862745, 0.81612109375, 0.81612109375), (\n 0.984313725490196, 0.82791015625, 0.82791015625), (\n 0.988235294117647, 0.839703125, 0.839703125), (0.992156862745098, \n 0.8514921875, 0.8514921875), (0.996078431372549, 0.86328125, \n 0.86328125), (1.0, 0.86328125, 0.86328125))}\n vcdict = {'red': ((0, 1, 1), (0.00392156862745098, 0.54508984375, \n 0.54508984375), (0.00784313725490196, 0.5285703125, 0.5285703125),\n (0.0117647058823529, 0.5120546875, 0.5120546875), (\n 0.0156862745098039, 0.49553515625, 0.49553515625), (\n 0.0196078431372549, 0.47901953125, 0.47901953125), (\n 0.0235294117647059, 0.4625, 0.4625), (0.0274509803921569, \n 0.44598046875, 0.44598046875), (0.0313725490196078, 0.42946484375, \n 0.42946484375), (0.0352941176470588, 0.4129453125, 0.4129453125), (\n 0.0392156862745098, 0.3964296875, 0.3964296875), (\n 0.0431372549019608, 0.379910546875, 0.379910546875), (\n 0.0470588235294118, 0.36339296875, 0.36339296875), (\n 0.0509803921568627, 0.346875, 0.346875), (0.0549019607843137, \n 0.33035703125, 0.33035703125), (0.0588235294117647, 0.313839453125,\n 0.313839453125), (0.0627450980392157, 0.297321484375, \n 0.297321484375), (0.0666666666666667, 0.280803515625, \n 0.280803515625), (0.0705882352941176, 0.2642859375, 0.2642859375),\n (0.0745098039215686, 0.24776796875, 0.24776796875), (\n 0.0784313725490196, 0.23125, 0.23125), (0.0823529411764706, \n 0.21473203125, 0.21473203125), (0.0862745098039216, 0.198214453125,\n 0.198214453125), (0.0901960784313725, 0.181696484375, \n 0.181696484375), (0.0941176470588235, 0.165178515625, \n 0.165178515625), (0.0980392156862745, 0.148660546875, \n 0.148660546875), (0.101960784313725, 0.13214296875, 0.13214296875),\n (0.105882352941176, 0.115625, 0.115625), (0.109803921568627, \n 0.09910703125, 0.09910703125), (0.113725490196078, 0.082589453125, \n 0.082589453125), (0.117647058823529, 0.066071484375, 0.066071484375\n ), (0.12156862745098, 0.049553515625, 0.049553515625), (\n 0.125490196078431, 0.0330357421875, 0.0330357421875), (\n 0.129411764705882, 0.016517890625, 0.016517890625), (\n 0.133333333333333, 0, 0), (0.137254901960784, 0, 0), (\n 0.141176470588235, 0, 0), (0.145098039215686, 0, 0), (\n 0.149019607843137, 0, 0), (0.152941176470588, 0, 0), (\n 0.156862745098039, 0, 0), (0.16078431372549, 0, 0), (\n 0.164705882352941, 0, 0), (0.168627450980392, 0, 0), (\n 0.172549019607843, 0, 0), (0.176470588235294, 0, 0), (\n 0.180392156862745, 0, 0), (0.184313725490196, 0, 0), (\n 0.188235294117647, 0, 0), (0.192156862745098, 0, 0), (\n 0.196078431372549, 0, 0), (0.2, 0, 0), (0.203921568627451, 0, 0), (\n 0.207843137254902, 0, 0), (0.211764705882353, 0, 0), (\n 0.215686274509804, 0, 0), (0.219607843137255, 0, 0), (\n 0.223529411764706, 0, 0), (0.227450980392157, 0, 0), (\n 0.231372549019608, 0, 0), (0.235294117647059, 0, 0), (\n 0.23921568627451, 0, 0), (0.243137254901961, 0, 0), (\n 0.247058823529412, 0, 0), (0.250980392156863, 0, 0), (\n 0.254901960784314, 0, 0), (0.258823529411765, 0, 0), (\n 0.262745098039216, 0, 0), (0.266666666666667, 0, 0), (\n 0.270588235294118, 0, 0), (0.274509803921569, 0, 0), (\n 0.27843137254902, 0, 0), (0.282352941176471, 0, 0), (\n 0.286274509803922, 0, 0), (0.290196078431373, 0, 0), (\n 0.294117647058824, 0, 0), (0.298039215686275, 0, 0), (\n 0.301960784313725, 0, 0), (0.305882352941176, 0, 0), (\n 0.309803921568627, 0, 0), (0.313725490196078, 0, 0), (\n 0.317647058823529, 0, 0), (0.32156862745098, 0, 0), (\n 0.325490196078431, 0, 0), (0.329411764705882, 0, 0), (\n 0.333333333333333, 0, 0), (0.337254901960784, 0, 0), (\n 0.341176470588235, 0, 0), (0.345098039215686, 0, 0), (\n 0.349019607843137, 0, 0), (0.352941176470588, 0.0061383984375, \n 0.0061383984375), (0.356862745098039, 0.012276796875, \n 0.012276796875), (0.36078431372549, 0.0184151953125, \n 0.0184151953125), (0.364705882352941, 0.0245535546875, \n 0.0245535546875), (0.368627450980392, 0.030691953125, \n 0.030691953125), (0.372549019607843, 0.0368303515625, \n 0.0368303515625), (0.376470588235294, 0.04296875, 0.04296875), (\n 0.380392156862745, 0.04910703125, 0.04910703125), (\n 0.384313725490196, 0.055245703125, 0.055245703125), (\n 0.388235294117647, 0.061383984375, 0.061383984375), (\n 0.392156862745098, 0.067522265625, 0.067522265625), (\n 0.396078431372549, 0.073660546875, 0.073660546875), (0.4, \n 0.07979921875, 0.07979921875), (0.403921568627451, 0.0859375, \n 0.0859375), (0.407843137254902, 0.09207578125, 0.09207578125), (\n 0.411764705882353, 0.098214453125, 0.098214453125), (\n 0.415686274509804, 0.104352734375, 0.104352734375), (\n 0.419607843137255, 0.110491015625, 0.110491015625), (\n 0.423529411764706, 0.116629296875, 0.116629296875), (\n 0.427450980392157, 0.12276796875, 0.12276796875), (\n 0.431372549019608, 0.12890625, 0.12890625), (0.435294117647059, \n 0.13504453125, 0.13504453125), (0.43921568627451, 0.141183203125, \n 0.141183203125), (0.443137254901961, 0.147321484375, 0.147321484375\n ), (0.447058823529412, 0.153459765625, 0.153459765625), (\n 0.450980392156863, 0.159598046875, 0.159598046875), (\n 0.454901960784314, 0.16573671875, 0.16573671875), (\n 0.458823529411765, 0.171875, 0.171875), (0.462745098039216, \n 0.17801328125, 0.17801328125), (0.466666666666667, 0.184151953125, \n 0.184151953125), (0.470588235294118, 0.190290234375, 0.190290234375\n ), (0.474509803921569, 0.196428515625, 0.196428515625), (\n 0.47843137254902, 0.202566796875, 0.202566796875), (\n 0.482352941176471, 0.20870546875, 0.20870546875), (\n 0.486274509803922, 0.21484375, 0.21484375), (0.490196078431373, \n 0.233370703125, 0.233370703125), (0.494117647058824, 0.251897265625,\n 0.251897265625), (0.498039215686275, 0.27042421875, 0.27042421875),\n (0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.86549609375, 0.86549609375), (0.631372549019608, 0.86770703125, \n 0.86770703125), (0.635294117647059, 0.869921875, 0.869921875), (\n 0.63921568627451, 0.87213671875, 0.87213671875), (0.643137254901961,\n 0.87434765625, 0.87434765625), (0.647058823529412, 0.8765625, \n 0.8765625), (0.650980392156863, 0.87877734375, 0.87877734375), (\n 0.654901960784314, 0.88098828125, 0.88098828125), (\n 0.658823529411765, 0.883203125, 0.883203125), (0.662745098039216, \n 0.88541796875, 0.88541796875), (0.666666666666667, 0.88762890625, \n 0.88762890625), (0.670588235294118, 0.88984375, 0.88984375), (\n 0.674509803921569, 0.89205859375, 0.89205859375), (0.67843137254902,\n 0.89426953125, 0.89426953125), (0.682352941176471, 0.896484375, \n 0.896484375), (0.686274509803922, 0.89869921875, 0.89869921875), (\n 0.690196078431373, 0.90091015625, 0.90091015625), (\n 0.694117647058824, 0.903125, 0.903125), (0.698039215686274, \n 0.90533984375, 0.90533984375), (0.701960784313725, 0.90755078125, \n 0.90755078125), (0.705882352941177, 0.909765625, 0.909765625), (\n 0.709803921568627, 0.91198046875, 0.91198046875), (\n 0.713725490196078, 0.91419140625, 0.91419140625), (\n 0.717647058823529, 0.91640625, 0.91640625), (0.72156862745098, \n 0.91862109375, 0.91862109375), (0.725490196078431, 0.92083203125, \n 0.92083203125), (0.729411764705882, 0.923046875, 0.923046875), (\n 0.733333333333333, 0.92526171875, 0.92526171875), (\n 0.737254901960784, 0.92747265625, 0.92747265625), (\n 0.741176470588235, 0.9296875, 0.9296875), (0.745098039215686, \n 0.93190234375, 0.93190234375), (0.749019607843137, 0.93411328125, \n 0.93411328125), (0.752941176470588, 0.936328125, 0.936328125), (\n 0.756862745098039, 0.93854296875, 0.93854296875), (0.76078431372549,\n 0.94075390625, 0.94075390625), (0.764705882352941, 0.94296875, \n 0.94296875), (0.768627450980392, 0.94518359375, 0.94518359375), (\n 0.772549019607843, 0.94739453125, 0.94739453125), (\n 0.776470588235294, 0.949609375, 0.949609375), (0.780392156862745, \n 0.95182421875, 0.95182421875), (0.784313725490196, 0.95403515625, \n 0.95403515625), (0.788235294117647, 0.95625, 0.95625), (\n 0.792156862745098, 0.95846484375, 0.95846484375), (\n 0.796078431372549, 0.96067578125, 0.96067578125), (0.8, 0.962890625,\n 0.962890625), (0.803921568627451, 0.96510546875, 0.96510546875), (\n 0.807843137254902, 0.96731640625, 0.96731640625), (\n 0.811764705882353, 0.96953125, 0.96953125), (0.815686274509804, \n 0.97174609375, 0.97174609375), (0.819607843137255, 0.97395703125, \n 0.97395703125), (0.823529411764706, 0.976171875, 0.976171875), (\n 0.827450980392157, 0.97838671875, 0.97838671875), (\n 0.831372549019608, 0.98059765625, 0.98059765625), (\n 0.835294117647059, 0.9828125, 0.9828125), (0.83921568627451, \n 0.98502734375, 0.98502734375), (0.843137254901961, 0.98723828125, \n 0.98723828125), (0.847058823529412, 0.989453125, 0.989453125), (\n 0.850980392156863, 0.99166796875, 0.99166796875), (\n 0.854901960784314, 0.99387890625, 0.99387890625), (\n 0.858823529411765, 0.99609375, 0.99609375), (0.862745098039216, \n 0.99609375, 0.99609375), (0.866666666666667, 0.99609375, 0.99609375\n ), (0.870588235294118, 0.99609375, 0.99609375), (0.874509803921569,\n 0.99609375, 0.99609375), (0.87843137254902, 0.99609375, 0.99609375),\n (0.882352941176471, 0.99609375, 0.99609375), (0.886274509803922, \n 0.99609375, 0.99609375), (0.890196078431373, 0.99609375, 0.99609375\n ), (0.894117647058824, 0.99609375, 0.99609375), (0.898039215686275,\n 0.99609375, 0.99609375), (0.901960784313726, 0.99609375, 0.99609375\n ), (0.905882352941176, 0.99609375, 0.99609375), (0.909803921568627,\n 0.99609375, 0.99609375), (0.913725490196078, 0.99609375, 0.99609375\n ), (0.917647058823529, 0.99609375, 0.99609375), (0.92156862745098, \n 0.99609375, 0.99609375), (0.925490196078431, 0.99609375, 0.99609375\n ), (0.929411764705882, 0.99609375, 0.99609375), (0.933333333333333,\n 0.99609375, 0.99609375), (0.937254901960784, 0.99609375, 0.99609375\n ), (0.941176470588235, 0.99609375, 0.99609375), (0.945098039215686,\n 0.99609375, 0.99609375), (0.949019607843137, 0.99609375, 0.99609375\n ), (0.952941176470588, 0.99609375, 0.99609375), (0.956862745098039,\n 0.99609375, 0.99609375), (0.96078431372549, 0.99609375, 0.99609375),\n (0.964705882352941, 0.99609375, 0.99609375), (0.968627450980392, \n 0.99609375, 0.99609375), (0.972549019607843, 0.99609375, 0.99609375\n ), (0.976470588235294, 0.99609375, 0.99609375), (0.980392156862745,\n 0.99609375, 0.99609375), (0.984313725490196, 0.99609375, 0.99609375\n ), (0.988235294117647, 0.99609375, 0.99609375), (0.992156862745098,\n 0.99609375, 0.99609375), (0.996078431372549, 0.99609375, 0.99609375\n ), (1, 0.99609375, 0.99609375)), 'green': ((0, 1, 1), (\n 0.00392156862745098, 0, 0), (0.00784313725490196, 0, 0), (\n 0.0117647058823529, 0, 0), (0.0156862745098039, 0, 0), (\n 0.0196078431372549, 0, 0), (0.0235294117647059, 0, 0), (\n 0.0274509803921569, 0, 0), (0.0313725490196078, 0, 0), (\n 0.0352941176470588, 0, 0), (0.0392156862745098, 0, 0), (\n 0.0431372549019608, 0, 0), (0.0470588235294118, 0, 0), (\n 0.0509803921568627, 0, 0), (0.0549019607843137, 0, 0), (\n 0.0588235294117647, 0, 0), (0.0627450980392157, 0, 0), (\n 0.0666666666666667, 0, 0), (0.0705882352941176, 0, 0), (\n 0.0745098039215686, 0, 0), (0.0784313725490196, 0, 0), (\n 0.0823529411764706, 0, 0), (0.0862745098039216, 0, 0), (\n 0.0901960784313725, 0, 0), (0.0941176470588235, 0, 0), (\n 0.0980392156862745, 0, 0), (0.101960784313725, 0, 0), (\n 0.105882352941176, 0, 0), (0.109803921568627, 0, 0), (\n 0.113725490196078, 0, 0), (0.117647058823529, 0, 0), (\n 0.12156862745098, 0, 0), (0.125490196078431, 0, 0), (\n 0.129411764705882, 0, 0), (0.133333333333333, 0, 0), (\n 0.137254901960784, 0.0135653515625, 0.0135653515625), (\n 0.141176470588235, 0.0271306640625, 0.0271306640625), (\n 0.145098039215686, 0.04069609375, 0.04069609375), (\n 0.149019607843137, 0.054261328125, 0.054261328125), (\n 0.152941176470588, 0.0678265625, 0.0678265625), (0.156862745098039,\n 0.0813921875, 0.0813921875), (0.16078431372549, 0.094957421875, \n 0.094957421875), (0.164705882352941, 0.10852265625, 0.10852265625),\n (0.168627450980392, 0.122087890625, 0.122087890625), (\n 0.172549019607843, 0.135653515625, 0.135653515625), (\n 0.176470588235294, 0.14921875, 0.14921875), (0.180392156862745, \n 0.162783984375, 0.162783984375), (0.184313725490196, 0.176349609375,\n 0.176349609375), (0.188235294117647, 0.18991484375, 0.18991484375),\n (0.192156862745098, 0.203480078125, 0.203480078125), (\n 0.196078431372549, 0.2170453125, 0.2170453125), (0.2, 0.2306109375,\n 0.2306109375), (0.203921568627451, 0.244176171875, 0.244176171875),\n (0.207843137254902, 0.25774140625, 0.25774140625), (\n 0.211764705882353, 0.27130703125, 0.27130703125), (\n 0.215686274509804, 0.284872265625, 0.284872265625), (\n 0.219607843137255, 0.2984375, 0.2984375), (0.223529411764706, \n 0.312002734375, 0.312002734375), (0.227450980392157, 0.325568359375,\n 0.325568359375), (0.231372549019608, 0.33913359375, 0.33913359375),\n (0.235294117647059, 0.352698828125, 0.352698828125), (\n 0.23921568627451, 0.3662640625, 0.3662640625), (0.243137254901961, \n 0.3798296875, 0.3798296875), (0.247058823529412, 0.39339453125, \n 0.39339453125), (0.250980392156863, 0.4069609375, 0.4069609375), (\n 0.254901960784314, 0.42052734375, 0.42052734375), (\n 0.258823529411765, 0.43408984375, 0.43408984375), (\n 0.262745098039216, 0.44765625, 0.44765625), (0.266666666666667, \n 0.46122265625, 0.46122265625), (0.270588235294118, 0.47478515625, \n 0.47478515625), (0.274509803921569, 0.4883515625, 0.4883515625), (\n 0.27843137254902, 0.50191796875, 0.50191796875), (0.282352941176471,\n 0.515484375, 0.515484375), (0.286274509803922, 0.529046875, \n 0.529046875), (0.290196078431373, 0.54261328125, 0.54261328125), (\n 0.294117647058824, 0.5561796875, 0.5561796875), (0.298039215686275,\n 0.56974609375, 0.56974609375), (0.301960784313725, 0.58330859375, \n 0.58330859375), (0.305882352941176, 0.596875, 0.596875), (\n 0.309803921568627, 0.61044140625, 0.61044140625), (\n 0.313725490196078, 0.62400390625, 0.62400390625), (\n 0.317647058823529, 0.6375703125, 0.6375703125), (0.32156862745098, \n 0.65113671875, 0.65113671875), (0.325490196078431, 0.664703125, \n 0.664703125), (0.329411764705882, 0.678265625, 0.678265625), (\n 0.333333333333333, 0.69183203125, 0.69183203125), (\n 0.337254901960784, 0.7053984375, 0.7053984375), (0.341176470588235,\n 0.71896484375, 0.71896484375), (0.345098039215686, 0.73252734375, \n 0.73252734375), (0.349019607843137, 0.74609375, 0.74609375), (\n 0.352941176470588, 0.7309140625, 0.7309140625), (0.356862745098039,\n 0.71573828125, 0.71573828125), (0.36078431372549, 0.70055859375, \n 0.70055859375), (0.364705882352941, 0.68537890625, 0.68537890625),\n (0.368627450980392, 0.67019921875, 0.67019921875), (\n 0.372549019607843, 0.6550234375, 0.6550234375), (0.376470588235294,\n 0.63984375, 0.63984375), (0.380392156862745, 0.6246640625, \n 0.6246640625), (0.384313725490196, 0.60948828125, 0.60948828125), (\n 0.388235294117647, 0.59430859375, 0.59430859375), (\n 0.392156862745098, 0.57912890625, 0.57912890625), (\n 0.396078431372549, 0.56394921875, 0.56394921875), (0.4, \n 0.5487734375, 0.5487734375), (0.403921568627451, 0.53359375, \n 0.53359375), (0.407843137254902, 0.5184140625, 0.5184140625), (\n 0.411764705882353, 0.50323828125, 0.50323828125), (\n 0.415686274509804, 0.48805859375, 0.48805859375), (\n 0.419607843137255, 0.47287890625, 0.47287890625), (\n 0.423529411764706, 0.45769921875, 0.45769921875), (\n 0.427450980392157, 0.4425234375, 0.4425234375), (0.431372549019608,\n 0.42734375, 0.42734375), (0.435294117647059, 0.4121640625, \n 0.4121640625), (0.43921568627451, 0.39698828125, 0.39698828125), (\n 0.443137254901961, 0.381808203125, 0.381808203125), (\n 0.447058823529412, 0.366629296875, 0.366629296875), (\n 0.450980392156863, 0.35145078125, 0.35145078125), (\n 0.454901960784314, 0.336272265625, 0.336272265625), (\n 0.458823529411765, 0.32109375, 0.32109375), (0.462745098039216, \n 0.305915234375, 0.305915234375), (0.466666666666667, 0.29073671875,\n 0.29073671875), (0.470588235294118, 0.2755578125, 0.2755578125), (\n 0.474509803921569, 0.260379296875, 0.260379296875), (\n 0.47843137254902, 0.24520078125, 0.24520078125), (0.482352941176471,\n 0.230022265625, 0.230022265625), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.2265625, 0.2265625), (\n 0.494117647058824, 0.23828125, 0.23828125), (0.498039215686275, \n 0.25, 0.25), (0.501960784313725, 0.26171875, 0.26171875), (\n 0.505882352941176, 0.2734375, 0.2734375), (0.509803921568627, \n 0.28515625, 0.28515625), (0.513725490196078, 0.296875, 0.296875), (\n 0.517647058823529, 0.30859375, 0.30859375), (0.52156862745098, \n 0.3203125, 0.3203125), (0.525490196078431, 0.33203125, 0.33203125),\n (0.529411764705882, 0.34375, 0.34375), (0.533333333333333, \n 0.35546875, 0.35546875), (0.537254901960784, 0.3671875, 0.3671875),\n (0.541176470588235, 0.37890625, 0.37890625), (0.545098039215686, \n 0.390625, 0.390625), (0.549019607843137, 0.40234375, 0.40234375), (\n 0.552941176470588, 0.4140625, 0.4140625), (0.556862745098039, \n 0.42578125, 0.42578125), (0.56078431372549, 0.4375, 0.4375), (\n 0.564705882352941, 0.44921875, 0.44921875), (0.568627450980392, \n 0.4609375, 0.4609375), (0.572549019607843, 0.47265625, 0.47265625),\n (0.576470588235294, 0.484375, 0.484375), (0.580392156862745, \n 0.49609375, 0.49609375), (0.584313725490196, 0.5078125, 0.5078125),\n (0.588235294117647, 0.51953125, 0.51953125), (0.592156862745098, \n 0.53125, 0.53125), (0.596078431372549, 0.54296875, 0.54296875), (\n 0.6, 0.5546875, 0.5546875), (0.603921568627451, 0.56640625, \n 0.56640625), (0.607843137254902, 0.578125, 0.578125), (\n 0.611764705882353, 0.58984375, 0.58984375), (0.615686274509804, \n 0.6015625, 0.6015625), (0.619607843137255, 0.61328125, 0.61328125),\n (0.623529411764706, 0.625, 0.625), (0.627450980392157, \n 0.61458203125, 0.61458203125), (0.631372549019608, 0.60416796875, \n 0.60416796875), (0.635294117647059, 0.59375, 0.59375), (\n 0.63921568627451, 0.58333203125, 0.58333203125), (0.643137254901961,\n 0.57291796875, 0.57291796875), (0.647058823529412, 0.5625, 0.5625),\n (0.650980392156863, 0.55208203125, 0.55208203125), (\n 0.654901960784314, 0.54166796875, 0.54166796875), (\n 0.658823529411765, 0.53125, 0.53125), (0.662745098039216, \n 0.52083203125, 0.52083203125), (0.666666666666667, 0.51041796875, \n 0.51041796875), (0.670588235294118, 0.5, 0.5), (0.674509803921569, \n 0.48958203125, 0.48958203125), (0.67843137254902, 0.47916796875, \n 0.47916796875), (0.682352941176471, 0.46875, 0.46875), (\n 0.686274509803922, 0.45833203125, 0.45833203125), (\n 0.690196078431373, 0.44791796875, 0.44791796875), (\n 0.694117647058824, 0.4375, 0.4375), (0.698039215686274, \n 0.42708203125, 0.42708203125), (0.701960784313725, 0.41666796875, \n 0.41666796875), (0.705882352941177, 0.40625, 0.40625), (\n 0.709803921568627, 0.39583203125, 0.39583203125), (\n 0.713725490196078, 0.385416796875, 0.385416796875), (\n 0.717647058823529, 0.375, 0.375), (0.72156862745098, 0.364583203125,\n 0.364583203125), (0.725490196078431, 0.354166796875, 0.354166796875\n ), (0.729411764705882, 0.34375, 0.34375), (0.733333333333333, \n 0.333333203125, 0.333333203125), (0.737254901960784, 0.322916796875,\n 0.322916796875), (0.741176470588235, 0.3125, 0.3125), (\n 0.745098039215686, 0.302083203125, 0.302083203125), (\n 0.749019607843137, 0.291666796875, 0.291666796875), (\n 0.752941176470588, 0.28125, 0.28125), (0.756862745098039, \n 0.270833203125, 0.270833203125), (0.76078431372549, 0.260416796875,\n 0.260416796875), (0.764705882352941, 0.25, 0.25), (\n 0.768627450980392, 0.239583203125, 0.239583203125), (\n 0.772549019607843, 0.229166796875, 0.229166796875), (\n 0.776470588235294, 0.21875, 0.21875), (0.780392156862745, \n 0.208333203125, 0.208333203125), (0.784313725490196, 0.197916796875,\n 0.197916796875), (0.788235294117647, 0.1875, 0.1875), (\n 0.792156862745098, 0.177083203125, 0.177083203125), (\n 0.796078431372549, 0.166666796875, 0.166666796875), (0.8, 0.15625, \n 0.15625), (0.803921568627451, 0.145833203125, 0.145833203125), (\n 0.807843137254902, 0.135416796875, 0.135416796875), (\n 0.811764705882353, 0.125, 0.125), (0.815686274509804, \n 0.114583203125, 0.114583203125), (0.819607843137255, 0.104166796875,\n 0.104166796875), (0.823529411764706, 0.09375, 0.09375), (\n 0.827450980392157, 0.083333203125, 0.083333203125), (\n 0.831372549019608, 0.072916796875, 0.072916796875), (\n 0.835294117647059, 0.0625, 0.0625), (0.83921568627451, \n 0.052083203125, 0.052083203125), (0.843137254901961, 0.041666796875,\n 0.041666796875), (0.847058823529412, 0.03125, 0.03125), (\n 0.850980392156863, 0.0208333203125, 0.0208333203125), (\n 0.854901960784314, 0.0104166796875, 0.0104166796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0.0184151953125, \n 0.0184151953125), (0.866666666666667, 0.0368303515625, \n 0.0368303515625), (0.870588235294118, 0.055245703125, \n 0.055245703125), (0.874509803921569, 0.073660546875, 0.073660546875\n ), (0.87843137254902, 0.09207578125, 0.09207578125), (\n 0.882352941176471, 0.110491015625, 0.110491015625), (\n 0.886274509803922, 0.12890625, 0.12890625), (0.890196078431373, \n 0.147321484375, 0.147321484375), (0.894117647058824, 0.16573671875,\n 0.16573671875), (0.898039215686275, 0.184151953125, 0.184151953125),\n (0.901960784313726, 0.202566796875, 0.202566796875), (\n 0.905882352941176, 0.22098203125, 0.22098203125), (\n 0.909803921568627, 0.239397265625, 0.239397265625), (\n 0.913725490196078, 0.2578125, 0.2578125), (0.917647058823529, \n 0.276227734375, 0.276227734375), (0.92156862745098, 0.29464296875, \n 0.29464296875), (0.925490196078431, 0.313058203125, 0.313058203125),\n (0.929411764705882, 0.331473046875, 0.331473046875), (\n 0.933333333333333, 0.34988828125, 0.34988828125), (\n 0.937254901960784, 0.368303515625, 0.368303515625), (\n 0.941176470588235, 0.38671875, 0.38671875), (0.945098039215686, \n 0.4051328125, 0.4051328125), (0.949019607843137, 0.42355078125, \n 0.42355078125), (0.952941176470588, 0.44196484375, 0.44196484375),\n (0.956862745098039, 0.46037890625, 0.46037890625), (\n 0.96078431372549, 0.47879296875, 0.47879296875), (0.964705882352941,\n 0.4972109375, 0.4972109375), (0.968627450980392, 0.515625, 0.515625\n ), (0.972549019607843, 0.5340390625, 0.5340390625), (\n 0.976470588235294, 0.55245703125, 0.55245703125), (\n 0.980392156862745, 0.57087109375, 0.57087109375), (\n 0.984313725490196, 0.58928515625, 0.58928515625), (\n 0.988235294117647, 0.60769921875, 0.60769921875), (\n 0.992156862745098, 0.6261171875, 0.6261171875), (0.996078431372549,\n 0.64453125, 0.64453125), (1, 0.64453125, 0.64453125)), 'blue': ((0,\n 1, 1), (0.00392156862745098, 0.80569140625, 0.80569140625), (\n 0.00784313725490196, 0.7964296875, 0.7964296875), (\n 0.0117647058823529, 0.7871640625, 0.7871640625), (\n 0.0156862745098039, 0.77790234375, 0.77790234375), (\n 0.0196078431372549, 0.76863671875, 0.76863671875), (\n 0.0235294117647059, 0.759375, 0.759375), (0.0274509803921569, \n 0.75011328125, 0.75011328125), (0.0313725490196078, 0.74084765625, \n 0.74084765625), (0.0352941176470588, 0.7315859375, 0.7315859375), (\n 0.0392156862745098, 0.7223203125, 0.7223203125), (\n 0.0431372549019608, 0.71305859375, 0.71305859375), (\n 0.0470588235294118, 0.70379296875, 0.70379296875), (\n 0.0509803921568627, 0.69453125, 0.69453125), (0.0549019607843137, \n 0.68526953125, 0.68526953125), (0.0588235294117647, 0.67600390625, \n 0.67600390625), (0.0627450980392157, 0.6667421875, 0.6667421875), (\n 0.0666666666666667, 0.6574765625, 0.6574765625), (\n 0.0705882352941176, 0.64821484375, 0.64821484375), (\n 0.0745098039215686, 0.63894921875, 0.63894921875), (\n 0.0784313725490196, 0.6296875, 0.6296875), (0.0823529411764706, \n 0.62042578125, 0.62042578125), (0.0862745098039216, 0.61116015625, \n 0.61116015625), (0.0901960784313725, 0.6018984375, 0.6018984375), (\n 0.0941176470588235, 0.5926328125, 0.5926328125), (\n 0.0980392156862745, 0.58337109375, 0.58337109375), (\n 0.101960784313725, 0.57410546875, 0.57410546875), (\n 0.105882352941176, 0.56484375, 0.56484375), (0.109803921568627, \n 0.55558203125, 0.55558203125), (0.113725490196078, 0.54631640625, \n 0.54631640625), (0.117647058823529, 0.5370546875, 0.5370546875), (\n 0.12156862745098, 0.5277890625, 0.5277890625), (0.125490196078431, \n 0.51852734375, 0.51852734375), (0.129411764705882, 0.50926171875, \n 0.50926171875), (0.133333333333333, 0.5, 0.5), (0.137254901960784, \n 0.50901953125, 0.50901953125), (0.141176470588235, 0.5180390625, \n 0.5180390625), (0.145098039215686, 0.52705859375, 0.52705859375), (\n 0.149019607843137, 0.536078125, 0.536078125), (0.152941176470588, \n 0.54509765625, 0.54509765625), (0.156862745098039, 0.55412109375, \n 0.55412109375), (0.16078431372549, 0.563140625, 0.563140625), (\n 0.164705882352941, 0.57216015625, 0.57216015625), (\n 0.168627450980392, 0.5811796875, 0.5811796875), (0.172549019607843,\n 0.59019921875, 0.59019921875), (0.176470588235294, 0.59921875, \n 0.59921875), (0.180392156862745, 0.60823828125, 0.60823828125), (\n 0.184313725490196, 0.6172578125, 0.6172578125), (0.188235294117647,\n 0.62627734375, 0.62627734375), (0.192156862745098, 0.635296875, \n 0.635296875), (0.196078431372549, 0.64431640625, 0.64431640625), (\n 0.2, 0.65333984375, 0.65333984375), (0.203921568627451, 0.662359375,\n 0.662359375), (0.207843137254902, 0.67137890625, 0.67137890625), (\n 0.211764705882353, 0.6803984375, 0.6803984375), (0.215686274509804,\n 0.68941796875, 0.68941796875), (0.219607843137255, 0.6984375, \n 0.6984375), (0.223529411764706, 0.70745703125, 0.70745703125), (\n 0.227450980392157, 0.7164765625, 0.7164765625), (0.231372549019608,\n 0.72549609375, 0.72549609375), (0.235294117647059, 0.734515625, \n 0.734515625), (0.23921568627451, 0.74353515625, 0.74353515625), (\n 0.243137254901961, 0.75255859375, 0.75255859375), (\n 0.247058823529412, 0.761578125, 0.761578125), (0.250980392156863, \n 0.77059765625, 0.77059765625), (0.254901960784314, 0.7796171875, \n 0.7796171875), (0.258823529411765, 0.78863671875, 0.78863671875), (\n 0.262745098039216, 0.79765625, 0.79765625), (0.266666666666667, \n 0.80667578125, 0.80667578125), (0.270588235294118, 0.8156953125, \n 0.8156953125), (0.274509803921569, 0.82471484375, 0.82471484375), (\n 0.27843137254902, 0.833734375, 0.833734375), (0.282352941176471, \n 0.84275390625, 0.84275390625), (0.286274509803922, 0.85177734375, \n 0.85177734375), (0.290196078431373, 0.860796875, 0.860796875), (\n 0.294117647058824, 0.86981640625, 0.86981640625), (\n 0.298039215686275, 0.8788359375, 0.8788359375), (0.301960784313725,\n 0.88785546875, 0.88785546875), (0.305882352941176, 0.896875, \n 0.896875), (0.309803921568627, 0.90589453125, 0.90589453125), (\n 0.313725490196078, 0.9149140625, 0.9149140625), (0.317647058823529,\n 0.92393359375, 0.92393359375), (0.32156862745098, 0.932953125, \n 0.932953125), (0.325490196078431, 0.94197265625, 0.94197265625), (\n 0.329411764705882, 0.95099609375, 0.95099609375), (\n 0.333333333333333, 0.960015625, 0.960015625), (0.337254901960784, \n 0.96903515625, 0.96903515625), (0.341176470588235, 0.9780546875, \n 0.9780546875), (0.345098039215686, 0.98707421875, 0.98707421875), (\n 0.349019607843137, 0.99609375, 0.99609375), (0.352941176470588, \n 0.9737734375, 0.9737734375), (0.356862745098039, 0.95144921875, \n 0.95144921875), (0.36078431372549, 0.92912890625, 0.92912890625), (\n 0.364705882352941, 0.90680859375, 0.90680859375), (\n 0.368627450980392, 0.88448828125, 0.88448828125), (\n 0.372549019607843, 0.8621640625, 0.8621640625), (0.376470588235294,\n 0.83984375, 0.83984375), (0.380392156862745, 0.8175234375, \n 0.8175234375), (0.384313725490196, 0.79519921875, 0.79519921875), (\n 0.388235294117647, 0.77287890625, 0.77287890625), (\n 0.392156862745098, 0.75055859375, 0.75055859375), (\n 0.396078431372549, 0.72823828125, 0.72823828125), (0.4, \n 0.7059140625, 0.7059140625), (0.403921568627451, 0.68359375, \n 0.68359375), (0.407843137254902, 0.6612734375, 0.6612734375), (\n 0.411764705882353, 0.63894921875, 0.63894921875), (\n 0.415686274509804, 0.61662890625, 0.61662890625), (\n 0.419607843137255, 0.59430859375, 0.59430859375), (\n 0.423529411764706, 0.57198828125, 0.57198828125), (\n 0.427450980392157, 0.5496640625, 0.5496640625), (0.431372549019608,\n 0.52734375, 0.52734375), (0.435294117647059, 0.5050234375, \n 0.5050234375), (0.43921568627451, 0.48269921875, 0.48269921875), (\n 0.443137254901961, 0.46037890625, 0.46037890625), (\n 0.447058823529412, 0.43805859375, 0.43805859375), (\n 0.450980392156863, 0.41573828125, 0.41573828125), (\n 0.454901960784314, 0.3934140625, 0.3934140625), (0.458823529411765,\n 0.37109375, 0.37109375), (0.462745098039216, 0.348772265625, \n 0.348772265625), (0.466666666666667, 0.32645078125, 0.32645078125),\n (0.470588235294118, 0.304129296875, 0.304129296875), (\n 0.474509803921569, 0.281808203125, 0.281808203125), (\n 0.47843137254902, 0.25948671875, 0.25948671875), (0.482352941176471,\n 0.237165234375, 0.237165234375), (0.486274509803922, 0.21484375, \n 0.21484375), (0.490196078431373, 0.233370703125, 0.233370703125), (\n 0.494117647058824, 0.251897265625, 0.251897265625), (\n 0.498039215686275, 0.27042421875, 0.27042421875), (\n 0.501960784313725, 0.28895078125, 0.28895078125), (\n 0.505882352941176, 0.307477734375, 0.307477734375), (\n 0.509803921568627, 0.326004296875, 0.326004296875), (\n 0.513725490196078, 0.34453125, 0.34453125), (0.517647058823529, \n 0.363058203125, 0.363058203125), (0.52156862745098, 0.381584765625,\n 0.381584765625), (0.525490196078431, 0.40011328125, 0.40011328125),\n (0.529411764705882, 0.41863671875, 0.41863671875), (\n 0.533333333333333, 0.4371640625, 0.4371640625), (0.537254901960784,\n 0.45569140625, 0.45569140625), (0.541176470588235, 0.47421875, \n 0.47421875), (0.545098039215686, 0.49274609375, 0.49274609375), (\n 0.549019607843137, 0.5112734375, 0.5112734375), (0.552941176470588,\n 0.52980078125, 0.52980078125), (0.556862745098039, 0.54832421875, \n 0.54832421875), (0.56078431372549, 0.5668515625, 0.5668515625), (\n 0.564705882352941, 0.58537890625, 0.58537890625), (\n 0.568627450980392, 0.60390625, 0.60390625), (0.572549019607843, \n 0.62243359375, 0.62243359375), (0.576470588235294, 0.6409609375, \n 0.6409609375), (0.580392156862745, 0.65948828125, 0.65948828125), (\n 0.584313725490196, 0.67801171875, 0.67801171875), (\n 0.588235294117647, 0.6965390625, 0.6965390625), (0.592156862745098,\n 0.71506640625, 0.71506640625), (0.596078431372549, 0.73359375, \n 0.73359375), (0.6, 0.75212109375, 0.75212109375), (\n 0.603921568627451, 0.7706484375, 0.7706484375), (0.607843137254902,\n 0.78917578125, 0.78917578125), (0.611764705882353, 0.80769921875, \n 0.80769921875), (0.615686274509804, 0.8262265625, 0.8262265625), (\n 0.619607843137255, 0.84475390625, 0.84475390625), (\n 0.623529411764706, 0.86328125, 0.86328125), (0.627450980392157, \n 0.84889453125, 0.84889453125), (0.631372549019608, 0.83450390625, \n 0.83450390625), (0.635294117647059, 0.8201171875, 0.8201171875), (\n 0.63921568627451, 0.80573046875, 0.80573046875), (0.643137254901961,\n 0.79133984375, 0.79133984375), (0.647058823529412, 0.776953125, \n 0.776953125), (0.650980392156863, 0.76256640625, 0.76256640625), (\n 0.654901960784314, 0.74817578125, 0.74817578125), (\n 0.658823529411765, 0.7337890625, 0.7337890625), (0.662745098039216,\n 0.71940234375, 0.71940234375), (0.666666666666667, 0.70501171875, \n 0.70501171875), (0.670588235294118, 0.690625, 0.690625), (\n 0.674509803921569, 0.67623828125, 0.67623828125), (0.67843137254902,\n 0.66184765625, 0.66184765625), (0.682352941176471, 0.6474609375, \n 0.6474609375), (0.686274509803922, 0.63307421875, 0.63307421875), (\n 0.690196078431373, 0.61868359375, 0.61868359375), (\n 0.694117647058824, 0.604296875, 0.604296875), (0.698039215686274, \n 0.58991015625, 0.58991015625), (0.701960784313725, 0.57551953125, \n 0.57551953125), (0.705882352941177, 0.5611328125, 0.5611328125), (\n 0.709803921568627, 0.54674609375, 0.54674609375), (\n 0.713725490196078, 0.53235546875, 0.53235546875), (\n 0.717647058823529, 0.51796875, 0.51796875), (0.72156862745098, \n 0.50358203125, 0.50358203125), (0.725490196078431, 0.48919140625, \n 0.48919140625), (0.729411764705882, 0.4748046875, 0.4748046875), (\n 0.733333333333333, 0.46041796875, 0.46041796875), (\n 0.737254901960784, 0.44602734375, 0.44602734375), (\n 0.741176470588235, 0.431640625, 0.431640625), (0.745098039215686, \n 0.41725390625, 0.41725390625), (0.749019607843137, 0.40286328125, \n 0.40286328125), (0.752941176470588, 0.3884765625, 0.3884765625), (\n 0.756862745098039, 0.374088671875, 0.374088671875), (\n 0.76078431372549, 0.359700390625, 0.359700390625), (\n 0.764705882352941, 0.3453125, 0.3453125), (0.768627450980392, \n 0.330924609375, 0.330924609375), (0.772549019607843, 0.316536328125,\n 0.316536328125), (0.776470588235294, 0.3021484375, 0.3021484375), (\n 0.780392156862745, 0.287760546875, 0.287760546875), (\n 0.784313725490196, 0.273372265625, 0.273372265625), (\n 0.788235294117647, 0.258984375, 0.258984375), (0.792156862745098, \n 0.244596484375, 0.244596484375), (0.796078431372549, 0.230208203125,\n 0.230208203125), (0.8, 0.2158203125, 0.2158203125), (\n 0.803921568627451, 0.201432421875, 0.201432421875), (\n 0.807843137254902, 0.187044140625, 0.187044140625), (\n 0.811764705882353, 0.17265625, 0.17265625), (0.815686274509804, \n 0.158268359375, 0.158268359375), (0.819607843137255, 0.143880078125,\n 0.143880078125), (0.823529411764706, 0.1294921875, 0.1294921875), (\n 0.827450980392157, 0.115104296875, 0.115104296875), (\n 0.831372549019608, 0.100716015625, 0.100716015625), (\n 0.835294117647059, 0.086328125, 0.086328125), (0.83921568627451, \n 0.071940234375, 0.071940234375), (0.843137254901961, 0.057551953125,\n 0.057551953125), (0.847058823529412, 0.0431640625, 0.0431640625), (\n 0.850980392156863, 0.028776015625, 0.028776015625), (\n 0.854901960784314, 0.01438796875, 0.01438796875), (\n 0.858823529411765, 0, 0), (0.862745098039216, 0, 0), (\n 0.866666666666667, 0, 0), (0.870588235294118, 0, 0), (\n 0.874509803921569, 0, 0), (0.87843137254902, 0, 0), (\n 0.882352941176471, 0, 0), (0.886274509803922, 0, 0), (\n 0.890196078431373, 0, 0), (0.894117647058824, 0, 0), (\n 0.898039215686275, 0, 0), (0.901960784313726, 0, 0), (\n 0.905882352941176, 0, 0), (0.909803921568627, 0, 0), (\n 0.913725490196078, 0, 0), (0.917647058823529, 0, 0), (\n 0.92156862745098, 0, 0), (0.925490196078431, 0, 0), (\n 0.929411764705882, 0, 0), (0.933333333333333, 0, 0), (\n 0.937254901960784, 0, 0), (0.941176470588235, 0, 0), (\n 0.945098039215686, 0, 0), (0.949019607843137, 0, 0), (\n 0.952941176470588, 0, 0), (0.956862745098039, 0, 0), (\n 0.96078431372549, 0, 0), (0.964705882352941, 0, 0), (\n 0.968627450980392, 0, 0), (0.972549019607843, 0, 0), (\n 0.976470588235294, 0, 0), (0.980392156862745, 0, 0), (\n 0.984313725490196, 0, 0), (0.988235294117647, 0, 0), (\n 0.992156862745098, 0, 0), (0.996078431372549, 0, 0), (1, 0, 0))}\n califa = mcol.LinearSegmentedColormap('califa', cdict)\n vcalifa = mcol.LinearSegmentedColormap('vcalifa', vcdict)\n if option == 'v':\n return vcalifa\n else:\n return califa\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\n<function token>\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\n<function token>\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\n<function token>\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\n<function token>\n<function token>\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\n<function token>\n<function token>\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\ndef Gr_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_grazy = np.linspace(x_min, -0.2, 100)\n ax.plot(x_set_grazy, grazy(x_set_grazy), label='Stasinska+03', **kwargs)\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\n<function token>\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\n<function token>\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\ndef A_l(R_v, lw):\n lw = lw / 10000\n x = 1 / lw\n if x > 1.1:\n y = x - 1.82\n a_x = (1.0 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + \n 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.7753 * y ** 6 + 0.32999 *\n y ** 7)\n b_x = (1.41338 * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 *\n y ** 4 - 0.62251 * y ** 5 + 5.3026 * y ** 6 - 2.09002 * y ** 7)\n else:\n a_x = 0.574 * x ** 1.61\n b_x = -0.527 * x ** 1.61\n A_l_ = a_x + b_x / R_v\n return A_l_\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\n<function token>\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\ndef LINSy2line2(logOIHa):\n val = 1.18 * logOIHa + 1.3\n return val\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\n<function token>\n\n\ndef AGNline(logSIIHa):\n val = 0.72 / (logSIIHa - 0.32) + 1.3\n return val\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\ndef O3S2_line_c(x):\n val = 0.04074804 / (x + 0.01253238) + 0.58154113\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\ndef kewley(logNIIHa):\n val = 0.61 / (logNIIHa - 0.47) + 1.19\n return val\n\n\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n\n\ndef grazy(logNIIHa):\n x = logNIIHa\n val = (-30.787 + 1.1358 * x + 0.27297 * x * x) * np.tanh(5.7409 * x\n ) - 31.093\n return val\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_OI_curve_plot(ax=None, x_min=-3.5, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.35)\n ax.plot(x_set_line, O3O1_line_c(x_set_line), label=label, **kwargs)\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n\n\ndef Es_curve_plot(ax=None, x_min=-2.0, label='Espinosa-Ponce+20', **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.08, 100)\n ax.plot(x_set_line, espinosa(x_set_line), label=label, **kwargs)\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n\n\ndef espinosa(logNIIHa):\n val = 0.12579066 / (logNIIHa - 0.00302777) + 0.56846872\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\ndef OI_LINERS_curve_plot(ax=None, x_min=-1.1, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.5, 100)\n ax.plot(x_set_line, LINSy2line2(x_set_line), label='LINER/Sy2 line', **\n kwargs)\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n\n\ndef SII_AGN_curve_plot(ax=None, x_min=-1.5, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.3, 100)\n ax.plot(x_set_line, AGNline(x_set_line), label='AGN line', **kwargs)\n\n\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, 0.01, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef OI_AGN_curve_plot(ax=None, x_min=-4, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.6, 100)\n ax.plot(x_set_line, AGNline2(x_set_line), label='main AGN line', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef LINSy2line(logSIIHa):\n val = 1.89 * logSIIHa + 0.76\n return val\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n\n\ndef kauffmann(x):\n val = 0.61 / (x - 0.05) + 1.3\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef color_map_califa_old(option='json'):\n if option == 'json':\n cmap_cal_dic = json.load(open('code/cmap_cal_json.txt'))\n elif option == 'pickle':\n with open('cmap_cal_pickle.txt', 'rb') as handle:\n cmap_cal_dic = pickle.loads(handle.read())\n cmap_cal = mcol.LinearSegmentedColormap('cmap_CALIFA', cmap_cal_dic)\n return cmap_cal\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef Es_SII_curve_plot(ax=None, x_min=-1.5, label='Espinosa-Ponce+20', **kwargs\n ):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_line = np.linspace(x_min, -0.02, 100)\n ax.plot(x_set_line, O3S2_line_c(x_set_line), label=label, **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\ndef Kw_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kw = np.linspace(x_min, 0.4, 100)\n ax.plot(x_set_kw, kewley(x_set_kw), label='Kewley+01', **kwargs)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef Kf_curve_plot(ax=None, x_min=-2.0, **kwargs):\n if ax is None:\n ax_flag = True\n fig, ax = plt.subplots()\n else:\n ax_flag = False\n x_set_kauff = np.linspace(x_min, 0.0, 100)\n ax.plot(x_set_kauff, kauffmann(x_set_kauff), label='Kauffmann+03', **kwargs\n )\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef AGNline2(logOIHa):\n val = 0.73 / (logOIHa + 0.59) + 1.33\n return val\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,593 |
5268071c520a0bde7eea8d362b6a8ce65472e906
|
"""Philips hue proxy code."""
import logging
import requests
import phue
from pi import proxy, scanning_proxy
class Hue(scanning_proxy.ScanningProxy):
"""Hue proxy object."""
def __init__(self, refresh_period, callback):
super(Hue, self).__init__(refresh_period)
self._callback = callback
self._bridges = {}
self._lights = {}
def _scan_once(self):
"""Find hue hubs on the network and tell appengine about them."""
logging.info('Starting hue bridge scan')
response = requests.get('https://www.meethue.com/api/nupnp')
assert response.status_code == 200, response.status_code
bridges = response.json()
for bridge in bridges:
bridge_id = bridge['id']
bridge_ip = bridge['internalipaddress']
bridge_name = None
# Event explicity doesn't contain ip (it might change)
# or id (its in the device path)
event = None
try:
bridge = phue.Bridge(ip=bridge_ip)
bridge_name = bridge.name
if bridge_id not in self._bridges:
self._bridges[bridge_id] = bridge
event = {'name': bridge_name, 'linked': True}
except phue.PhueRegistrationException:
if bridge_id in self._bridges:
del self._bridges[bridge_id]
event = {'linked': False}
if event is not None:
logging.debug('Hue bridge \'%s\' (%s) found at %s - linked=%s',
bridge_name, bridge_id, bridge_ip, event['linked'])
self._callback('hue_bridge', 'hue-%s' % bridge_id, event)
# Now find all the lights
for bridge_id, bridge in self._bridges.iteritems():
lights_by_id = bridge.get_light_objects(mode='id')
for light_id in lights_by_id.iterkeys():
light_details = bridge.get_light(light_id)
logging.debug('Hue light %d (\'%s\') found on bridge \'%s\', on=%s',
light_id, light_details['name'], bridge_id,
light_details['state']['on'])
light_id = 'hue-%s-%d' % (bridge_id, light_id)
if self._lights.get(light_id, None) != light_details:
self._callback('hue_light', light_id, light_details)
self._lights[light_id] = light_details
@proxy.command
def set_state(self, bridge_id, device_id, mode,
brightness=255, color_temperature=500):
"""Turn a light on or off."""
logging.info('bridge_id = %s, device_id = %d, mode = %s, '
'brightness = %s, color temp = %s',
bridge_id, device_id, mode, brightness,
color_temperature)
bridge = self._bridges.get(bridge_id, None)
if not bridge:
logging.error('Bridge %s not found!', bridge_id)
return
command = {'on' : mode,
'bri' : brightness}
if color_temperature is not None:
command['ct'] = color_temperature
bridge.set_light(device_id, command)
|
[
"\"\"\"Philips hue proxy code.\"\"\"\n\nimport logging\n\nimport requests\nimport phue\n\nfrom pi import proxy, scanning_proxy\n\n\nclass Hue(scanning_proxy.ScanningProxy):\n \"\"\"Hue proxy object.\"\"\"\n\n def __init__(self, refresh_period, callback):\n super(Hue, self).__init__(refresh_period)\n\n self._callback = callback\n self._bridges = {}\n self._lights = {}\n\n def _scan_once(self):\n \"\"\"Find hue hubs on the network and tell appengine about them.\"\"\"\n logging.info('Starting hue bridge scan')\n response = requests.get('https://www.meethue.com/api/nupnp')\n assert response.status_code == 200, response.status_code\n bridges = response.json()\n for bridge in bridges:\n bridge_id = bridge['id']\n bridge_ip = bridge['internalipaddress']\n bridge_name = None\n\n # Event explicity doesn't contain ip (it might change)\n # or id (its in the device path)\n event = None\n try:\n bridge = phue.Bridge(ip=bridge_ip)\n bridge_name = bridge.name\n\n if bridge_id not in self._bridges:\n self._bridges[bridge_id] = bridge\n event = {'name': bridge_name, 'linked': True}\n except phue.PhueRegistrationException:\n if bridge_id in self._bridges:\n del self._bridges[bridge_id]\n event = {'linked': False}\n\n if event is not None:\n logging.debug('Hue bridge \\'%s\\' (%s) found at %s - linked=%s',\n bridge_name, bridge_id, bridge_ip, event['linked'])\n\n self._callback('hue_bridge', 'hue-%s' % bridge_id, event)\n\n # Now find all the lights\n for bridge_id, bridge in self._bridges.iteritems():\n lights_by_id = bridge.get_light_objects(mode='id')\n for light_id in lights_by_id.iterkeys():\n light_details = bridge.get_light(light_id)\n logging.debug('Hue light %d (\\'%s\\') found on bridge \\'%s\\', on=%s',\n light_id, light_details['name'], bridge_id,\n light_details['state']['on'])\n\n light_id = 'hue-%s-%d' % (bridge_id, light_id)\n if self._lights.get(light_id, None) != light_details:\n self._callback('hue_light', light_id, light_details)\n self._lights[light_id] = light_details\n\n @proxy.command\n def set_state(self, bridge_id, device_id, mode,\n brightness=255, color_temperature=500):\n \"\"\"Turn a light on or off.\"\"\"\n logging.info('bridge_id = %s, device_id = %d, mode = %s, '\n 'brightness = %s, color temp = %s',\n bridge_id, device_id, mode, brightness,\n color_temperature)\n\n bridge = self._bridges.get(bridge_id, None)\n if not bridge:\n logging.error('Bridge %s not found!', bridge_id)\n return\n\n command = {'on' : mode,\n 'bri' : brightness}\n if color_temperature is not None:\n command['ct'] = color_temperature\n\n bridge.set_light(device_id, command)\n",
"<docstring token>\nimport logging\nimport requests\nimport phue\nfrom pi import proxy, scanning_proxy\n\n\nclass Hue(scanning_proxy.ScanningProxy):\n \"\"\"Hue proxy object.\"\"\"\n\n def __init__(self, refresh_period, callback):\n super(Hue, self).__init__(refresh_period)\n self._callback = callback\n self._bridges = {}\n self._lights = {}\n\n def _scan_once(self):\n \"\"\"Find hue hubs on the network and tell appengine about them.\"\"\"\n logging.info('Starting hue bridge scan')\n response = requests.get('https://www.meethue.com/api/nupnp')\n assert response.status_code == 200, response.status_code\n bridges = response.json()\n for bridge in bridges:\n bridge_id = bridge['id']\n bridge_ip = bridge['internalipaddress']\n bridge_name = None\n event = None\n try:\n bridge = phue.Bridge(ip=bridge_ip)\n bridge_name = bridge.name\n if bridge_id not in self._bridges:\n self._bridges[bridge_id] = bridge\n event = {'name': bridge_name, 'linked': True}\n except phue.PhueRegistrationException:\n if bridge_id in self._bridges:\n del self._bridges[bridge_id]\n event = {'linked': False}\n if event is not None:\n logging.debug(\"Hue bridge '%s' (%s) found at %s - linked=%s\",\n bridge_name, bridge_id, bridge_ip, event['linked'])\n self._callback('hue_bridge', 'hue-%s' % bridge_id, event)\n for bridge_id, bridge in self._bridges.iteritems():\n lights_by_id = bridge.get_light_objects(mode='id')\n for light_id in lights_by_id.iterkeys():\n light_details = bridge.get_light(light_id)\n logging.debug(\"Hue light %d ('%s') found on bridge '%s', on=%s\"\n , light_id, light_details['name'], bridge_id,\n light_details['state']['on'])\n light_id = 'hue-%s-%d' % (bridge_id, light_id)\n if self._lights.get(light_id, None) != light_details:\n self._callback('hue_light', light_id, light_details)\n self._lights[light_id] = light_details\n\n @proxy.command\n def set_state(self, bridge_id, device_id, mode, brightness=255,\n color_temperature=500):\n \"\"\"Turn a light on or off.\"\"\"\n logging.info(\n 'bridge_id = %s, device_id = %d, mode = %s, brightness = %s, color temp = %s'\n , bridge_id, device_id, mode, brightness, color_temperature)\n bridge = self._bridges.get(bridge_id, None)\n if not bridge:\n logging.error('Bridge %s not found!', bridge_id)\n return\n command = {'on': mode, 'bri': brightness}\n if color_temperature is not None:\n command['ct'] = color_temperature\n bridge.set_light(device_id, command)\n",
"<docstring token>\n<import token>\n\n\nclass Hue(scanning_proxy.ScanningProxy):\n \"\"\"Hue proxy object.\"\"\"\n\n def __init__(self, refresh_period, callback):\n super(Hue, self).__init__(refresh_period)\n self._callback = callback\n self._bridges = {}\n self._lights = {}\n\n def _scan_once(self):\n \"\"\"Find hue hubs on the network and tell appengine about them.\"\"\"\n logging.info('Starting hue bridge scan')\n response = requests.get('https://www.meethue.com/api/nupnp')\n assert response.status_code == 200, response.status_code\n bridges = response.json()\n for bridge in bridges:\n bridge_id = bridge['id']\n bridge_ip = bridge['internalipaddress']\n bridge_name = None\n event = None\n try:\n bridge = phue.Bridge(ip=bridge_ip)\n bridge_name = bridge.name\n if bridge_id not in self._bridges:\n self._bridges[bridge_id] = bridge\n event = {'name': bridge_name, 'linked': True}\n except phue.PhueRegistrationException:\n if bridge_id in self._bridges:\n del self._bridges[bridge_id]\n event = {'linked': False}\n if event is not None:\n logging.debug(\"Hue bridge '%s' (%s) found at %s - linked=%s\",\n bridge_name, bridge_id, bridge_ip, event['linked'])\n self._callback('hue_bridge', 'hue-%s' % bridge_id, event)\n for bridge_id, bridge in self._bridges.iteritems():\n lights_by_id = bridge.get_light_objects(mode='id')\n for light_id in lights_by_id.iterkeys():\n light_details = bridge.get_light(light_id)\n logging.debug(\"Hue light %d ('%s') found on bridge '%s', on=%s\"\n , light_id, light_details['name'], bridge_id,\n light_details['state']['on'])\n light_id = 'hue-%s-%d' % (bridge_id, light_id)\n if self._lights.get(light_id, None) != light_details:\n self._callback('hue_light', light_id, light_details)\n self._lights[light_id] = light_details\n\n @proxy.command\n def set_state(self, bridge_id, device_id, mode, brightness=255,\n color_temperature=500):\n \"\"\"Turn a light on or off.\"\"\"\n logging.info(\n 'bridge_id = %s, device_id = %d, mode = %s, brightness = %s, color temp = %s'\n , bridge_id, device_id, mode, brightness, color_temperature)\n bridge = self._bridges.get(bridge_id, None)\n if not bridge:\n logging.error('Bridge %s not found!', bridge_id)\n return\n command = {'on': mode, 'bri': brightness}\n if color_temperature is not None:\n command['ct'] = color_temperature\n bridge.set_light(device_id, command)\n",
"<docstring token>\n<import token>\n\n\nclass Hue(scanning_proxy.ScanningProxy):\n <docstring token>\n\n def __init__(self, refresh_period, callback):\n super(Hue, self).__init__(refresh_period)\n self._callback = callback\n self._bridges = {}\n self._lights = {}\n\n def _scan_once(self):\n \"\"\"Find hue hubs on the network and tell appengine about them.\"\"\"\n logging.info('Starting hue bridge scan')\n response = requests.get('https://www.meethue.com/api/nupnp')\n assert response.status_code == 200, response.status_code\n bridges = response.json()\n for bridge in bridges:\n bridge_id = bridge['id']\n bridge_ip = bridge['internalipaddress']\n bridge_name = None\n event = None\n try:\n bridge = phue.Bridge(ip=bridge_ip)\n bridge_name = bridge.name\n if bridge_id not in self._bridges:\n self._bridges[bridge_id] = bridge\n event = {'name': bridge_name, 'linked': True}\n except phue.PhueRegistrationException:\n if bridge_id in self._bridges:\n del self._bridges[bridge_id]\n event = {'linked': False}\n if event is not None:\n logging.debug(\"Hue bridge '%s' (%s) found at %s - linked=%s\",\n bridge_name, bridge_id, bridge_ip, event['linked'])\n self._callback('hue_bridge', 'hue-%s' % bridge_id, event)\n for bridge_id, bridge in self._bridges.iteritems():\n lights_by_id = bridge.get_light_objects(mode='id')\n for light_id in lights_by_id.iterkeys():\n light_details = bridge.get_light(light_id)\n logging.debug(\"Hue light %d ('%s') found on bridge '%s', on=%s\"\n , light_id, light_details['name'], bridge_id,\n light_details['state']['on'])\n light_id = 'hue-%s-%d' % (bridge_id, light_id)\n if self._lights.get(light_id, None) != light_details:\n self._callback('hue_light', light_id, light_details)\n self._lights[light_id] = light_details\n\n @proxy.command\n def set_state(self, bridge_id, device_id, mode, brightness=255,\n color_temperature=500):\n \"\"\"Turn a light on or off.\"\"\"\n logging.info(\n 'bridge_id = %s, device_id = %d, mode = %s, brightness = %s, color temp = %s'\n , bridge_id, device_id, mode, brightness, color_temperature)\n bridge = self._bridges.get(bridge_id, None)\n if not bridge:\n logging.error('Bridge %s not found!', bridge_id)\n return\n command = {'on': mode, 'bri': brightness}\n if color_temperature is not None:\n command['ct'] = color_temperature\n bridge.set_light(device_id, command)\n",
"<docstring token>\n<import token>\n\n\nclass Hue(scanning_proxy.ScanningProxy):\n <docstring token>\n <function token>\n\n def _scan_once(self):\n \"\"\"Find hue hubs on the network and tell appengine about them.\"\"\"\n logging.info('Starting hue bridge scan')\n response = requests.get('https://www.meethue.com/api/nupnp')\n assert response.status_code == 200, response.status_code\n bridges = response.json()\n for bridge in bridges:\n bridge_id = bridge['id']\n bridge_ip = bridge['internalipaddress']\n bridge_name = None\n event = None\n try:\n bridge = phue.Bridge(ip=bridge_ip)\n bridge_name = bridge.name\n if bridge_id not in self._bridges:\n self._bridges[bridge_id] = bridge\n event = {'name': bridge_name, 'linked': True}\n except phue.PhueRegistrationException:\n if bridge_id in self._bridges:\n del self._bridges[bridge_id]\n event = {'linked': False}\n if event is not None:\n logging.debug(\"Hue bridge '%s' (%s) found at %s - linked=%s\",\n bridge_name, bridge_id, bridge_ip, event['linked'])\n self._callback('hue_bridge', 'hue-%s' % bridge_id, event)\n for bridge_id, bridge in self._bridges.iteritems():\n lights_by_id = bridge.get_light_objects(mode='id')\n for light_id in lights_by_id.iterkeys():\n light_details = bridge.get_light(light_id)\n logging.debug(\"Hue light %d ('%s') found on bridge '%s', on=%s\"\n , light_id, light_details['name'], bridge_id,\n light_details['state']['on'])\n light_id = 'hue-%s-%d' % (bridge_id, light_id)\n if self._lights.get(light_id, None) != light_details:\n self._callback('hue_light', light_id, light_details)\n self._lights[light_id] = light_details\n\n @proxy.command\n def set_state(self, bridge_id, device_id, mode, brightness=255,\n color_temperature=500):\n \"\"\"Turn a light on or off.\"\"\"\n logging.info(\n 'bridge_id = %s, device_id = %d, mode = %s, brightness = %s, color temp = %s'\n , bridge_id, device_id, mode, brightness, color_temperature)\n bridge = self._bridges.get(bridge_id, None)\n if not bridge:\n logging.error('Bridge %s not found!', bridge_id)\n return\n command = {'on': mode, 'bri': brightness}\n if color_temperature is not None:\n command['ct'] = color_temperature\n bridge.set_light(device_id, command)\n",
"<docstring token>\n<import token>\n\n\nclass Hue(scanning_proxy.ScanningProxy):\n <docstring token>\n <function token>\n <function token>\n\n @proxy.command\n def set_state(self, bridge_id, device_id, mode, brightness=255,\n color_temperature=500):\n \"\"\"Turn a light on or off.\"\"\"\n logging.info(\n 'bridge_id = %s, device_id = %d, mode = %s, brightness = %s, color temp = %s'\n , bridge_id, device_id, mode, brightness, color_temperature)\n bridge = self._bridges.get(bridge_id, None)\n if not bridge:\n logging.error('Bridge %s not found!', bridge_id)\n return\n command = {'on': mode, 'bri': brightness}\n if color_temperature is not None:\n command['ct'] = color_temperature\n bridge.set_light(device_id, command)\n",
"<docstring token>\n<import token>\n\n\nclass Hue(scanning_proxy.ScanningProxy):\n <docstring token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
98,594 |
df680afa58db834805ab7074009d2e244e3f29e9
|
#!/usr/bin/env python
# coding: utf-8
# In[25]:
### geostatistical tools
# Mickey MacKie
# In[23]:
import numpy as np
import numpy.linalg as linalg
import pandas as pd
import sklearn as sklearn
from sklearn.neighbors import KDTree
import math
from scipy.spatial import distance_matrix
from tqdm import tqdm
import random
# In[24]:
# covariance function definition
def covar(t, d, r):
h = d / r
if t == 1: # Spherical
c = 1 - h * (1.5 - 0.5 * np.square(h))
c[h > 1] = 0
elif t == 2: # Exponential
c = np.exp(-3 * h)
elif t == 3: # Gaussian
c = np.exp(-3 * np.square(h))
return c
# get variogram along the major or minor axis
def axis_var(lagh, nug, nstruct, cc, vtype, a):
lagh = lagh
nstruct = nstruct # number of variogram structures
vtype = vtype # variogram types (Gaussian, etc.)
a = a # range for axis in question
cc = cc # contribution of each structure
n = len(lagh)
gamma_model = np.zeros(shape = (n))
# for each lag distance
for j in range(0,n):
c = nug
c = 0
h = np.matrix(lagh[j])
# for each structure in the variogram
for i in range(nstruct):
Q = h.copy()
d = Q / a[i]
c = c + covar(vtype[i], d, 1) * cc[i] # covariance
gamma_model[j] = 1+ nug - c # variance
return gamma_model
# make array of x,y coordinates based on corners and resolution
def pred_grid(xmin, xmax, ymin, ymax, pix):
cols = (xmax - xmin)/pix; rows = (ymax - ymin)/pix # number of rows and columns
x = np.arange(xmin,xmax,pix); y = np.arange(ymin,ymax,pix) # make arrays
xx, yy = np.meshgrid(x,y) # make grid
yy = np.flip(yy) # flip upside down
# shape into array
x = np.reshape(xx, (int(rows)*int(cols), 1))
y = np.reshape(yy, (int(rows)*int(cols), 1))
Pred_grid_xy = np.concatenate((x,y), axis = 1) # combine coordinates
return Pred_grid_xy
# rotation matrix (Azimuth = major axis direction)
def Rot_Mat(Azimuth, a_max, a_min):
theta = (Azimuth / 180.0) * np.pi
Rot_Mat = np.dot(
np.array([[1 / a_max, 0], [0, 1 / a_min]]),
np.array(
[
[np.cos(theta), np.sin(theta)],
[-np.sin(theta), np.cos(theta)],
]
),
)
return Rot_Mat
# covariance model
def cov(h1, h2, k, vario):
# unpack variogram parameters
Azimuth = vario[0]
nug = vario[1]
nstruct = vario[2]
vtype = vario[3]
cc = vario[4]
a_max = vario[5]
a_min = vario[6]
c = -nug # nugget effect is made negative because we're calculating covariance instead of variance
for i in range(nstruct):
Q1 = h1.copy()
Q2 = h2.copy()
# covariances between measurements
if k == 0:
d = distance_matrix(
np.matmul(Q1, Rot_Mat(Azimuth, a_max[i], a_min[i])),
np.matmul(Q2, Rot_Mat(Azimuth, a_max[i], a_min[i])),
)
# covariances between measurements and unknown
elif k == 1:
d = np.sqrt(
np.square(
(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i], a_min[i])))
- np.tile(
(
np.matmul(
Q2, Rot_Mat(Azimuth, a_max[i], a_min[i])
)
),
(k, 1),
)
).sum(axis=1)
)
d = np.asarray(d).reshape(len(d))
c = c + covar(vtype[i], d, 1) * cc[i]
return c
# simple kriging
def skrige(Pred_grid, df, xx, yy, data, k, vario):
Mean_1 = np.average(df[data]) # mean of data
Var_1 = np.var(df[data]); # variance of data
# make KDTree to search data for nearest neighbors
tree_data = KDTree(df[[xx,yy]].values)
# preallocate space for mean and variance
est_SK = np.zeros(shape=len(Pred_grid))
var_SK = np.zeros(shape=len(Pred_grid))
# preallocate space for data
X_Y = np.zeros((1, k, 2))
closematrix_Primary = np.zeros((1, k))
neardistmatrix = np.zeros((1, k))
for z in tqdm(range(0, len(Pred_grid))):
# find nearest data points
nearest_dist, nearest_ind = tree_data.query(Pred_grid[z : z + 1, :], k=k)
a = nearest_ind.ravel()
group = df.iloc[a, :]
closematrix_Primary[:] = group[data]
neardistmatrix[:] = nearest_dist
X_Y[:, :] = group[[xx, yy]]
# left hand side (covariance between data)
Kriging_Matrix = np.zeros(shape=((k, k)))
Kriging_Matrix = cov(X_Y[0], X_Y[0], 0, vario)
# Set up Right Hand Side (covariance between data and unknown)
r = np.zeros(shape=(k))
k_weights = r
r = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)
Kriging_Matrix.reshape(((k)), ((k)))
# Calculate Kriging Weights
k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)
# get estimates
est_SK[z] = k*Mean_1 + np.sum(k_weights*(closematrix_Primary[:] - Mean_1))
var_SK[z] = Var_1 - np.sum(k_weights*r)
return est_SK, var_SK
# ordinary kriging
def okrige(Pred_grid, df, xx, yy, data, k, vario):
Var_1 = np.var(df[data]); # variance of data
# make KDTree to search data for nearest neighbors
tree_data = KDTree(df[[xx,yy]].values)
# preallocate space for mean and variance
est_OK = np.zeros(shape=len(Pred_grid))
var_OK = np.zeros(shape=len(Pred_grid))
# preallocate space for data
X_Y = np.zeros((1, k, 2))
closematrix_Primary = np.zeros((1, k))
neardistmatrix = np.zeros((1, k))
for z in tqdm(range(0, len(Pred_grid))):
# find nearest data points
nearest_dist, nearest_ind = tree_data.query(Pred_grid[z : z + 1, :], k=k)
a = nearest_ind.ravel()
group = df.iloc[a, :]
closematrix_Primary[:] = group[data]
neardistmatrix[:] = nearest_dist
X_Y[:, :] = group[[xx, yy]]
# left hand side (covariance between data)
Kriging_Matrix = np.zeros(shape=((k+1, k+1)))
Kriging_Matrix[0:k,0:k] = cov(X_Y[0], X_Y[0], 0, vario)
Kriging_Matrix[k,0:k] = 1
Kriging_Matrix[0:k,k] = 1
# Set up Right Hand Side (covariance between data and unknown)
r = np.zeros(shape=(k+1))
k_weights = r
r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)
r[k] = 1 # unbiasedness constraint
Kriging_Matrix.reshape(((k+1)), ((k+1)))
# Calculate Kriging Weights
k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)
# get estimates
est_OK[z] = np.sum(k_weights[0:k]*closematrix_Primary[:])
var_OK[z] = Var_1 - np.sum(k_weights[0:k]*r[0:k])
return est_OK, var_OK
# sequential Gaussian simulation
def sgsim(Pred_grid, df, xx, yy, data, k, vario):
# generate random array for simulation order
xyindex = np.arange(len(Pred_grid))
random.shuffle(xyindex)
Var_1 = np.var(df[data]); # variance of data
# preallocate space for simulation
sgs = np.zeros(shape=len(Pred_grid))
# preallocate space for data
X_Y = np.zeros((1, k, 2))
closematrix_Primary = np.zeros((1, k))
neardistmatrix = np.zeros((1, k))
with tqdm(total=len(Pred_grid), position=0, leave=True) as pbar:
for i in tqdm(range(0, len(Pred_grid)), position=0, leave=True):
pbar.update()
z = xyindex[i]
# make KDTree to search data for nearest neighbors
tree_data = KDTree(df[[xx,yy]].values)
# find nearest data points
nearest_dist, nearest_ind = tree_data.query(Pred_grid[z : z + 1, :], k=k)
a = nearest_ind.ravel()
group = df.iloc[a, :]
closematrix_Primary[:] = group[data]
neardistmatrix[:] = nearest_dist
X_Y[:, :] = group[[xx, yy]]
# left hand side (covariance between data)
Kriging_Matrix = np.zeros(shape=((k+1, k+1)))
Kriging_Matrix[0:k,0:k] = cov(X_Y[0], X_Y[0], 0, vario)
Kriging_Matrix[k,0:k] = 1
Kriging_Matrix[0:k,k] = 1
# Set up Right Hand Side (covariance between data and unknown)
r = np.zeros(shape=(k+1))
k_weights = r
r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)
r[k] = 1 # unbiasedness constraint
Kriging_Matrix.reshape(((k+1)), ((k+1)))
# Calculate Kriging Weights
k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)
# get estimates
est = np.sum(k_weights[0:k]*closematrix_Primary[:]) # kriging mean
var = Var_1 - np.sum(k_weights[0:k]*r[0:k]) # kriging variance
if (var < 0): # make sure variances are non-negative
var = 0
sgs[z] = np.random.normal(est,math.sqrt(var),1) # simulate by randomly sampling a value
# update the conditioning data
coords = Pred_grid[z:z+1,:]
dnew = {xx: [coords[0,0]], yy: [coords[0,1]], data: [sgs[z]]}
dfnew = pd.DataFrame(data = dnew)
df = pd.concat([df,dfnew], sort=False) # add new points by concatenating dataframes
return sgs
# In[ ]:
|
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[25]:\n\n\n### geostatistical tools\n# Mickey MacKie\n\n\n# In[23]:\n\n\nimport numpy as np\nimport numpy.linalg as linalg\nimport pandas as pd\nimport sklearn as sklearn\nfrom sklearn.neighbors import KDTree\nimport math\nfrom scipy.spatial import distance_matrix\nfrom tqdm import tqdm\nimport random\n\n\n# In[24]:\n\n\n# covariance function definition\ndef covar(t, d, r):\n h = d / r\n if t == 1: # Spherical\n c = 1 - h * (1.5 - 0.5 * np.square(h))\n c[h > 1] = 0\n elif t == 2: # Exponential\n c = np.exp(-3 * h)\n elif t == 3: # Gaussian\n c = np.exp(-3 * np.square(h))\n return c\n\n\n\n# get variogram along the major or minor axis\ndef axis_var(lagh, nug, nstruct, cc, vtype, a):\n lagh = lagh\n nstruct = nstruct # number of variogram structures\n vtype = vtype # variogram types (Gaussian, etc.)\n a = a # range for axis in question\n cc = cc # contribution of each structure\n \n n = len(lagh)\n gamma_model = np.zeros(shape = (n))\n \n # for each lag distance\n for j in range(0,n):\n c = nug\n c = 0\n h = np.matrix(lagh[j])\n \n # for each structure in the variogram\n for i in range(nstruct):\n Q = h.copy()\n d = Q / a[i]\n c = c + covar(vtype[i], d, 1) * cc[i] # covariance\n \n gamma_model[j] = 1+ nug - c # variance\n return gamma_model\n\n\n\n# make array of x,y coordinates based on corners and resolution\ndef pred_grid(xmin, xmax, ymin, ymax, pix):\n cols = (xmax - xmin)/pix; rows = (ymax - ymin)/pix # number of rows and columns\n x = np.arange(xmin,xmax,pix); y = np.arange(ymin,ymax,pix) # make arrays\n\n xx, yy = np.meshgrid(x,y) # make grid\n yy = np.flip(yy) # flip upside down\n\n # shape into array\n x = np.reshape(xx, (int(rows)*int(cols), 1))\n y = np.reshape(yy, (int(rows)*int(cols), 1))\n\n Pred_grid_xy = np.concatenate((x,y), axis = 1) # combine coordinates\n return Pred_grid_xy\n\n\n\n# rotation matrix (Azimuth = major axis direction)\ndef Rot_Mat(Azimuth, a_max, a_min):\n theta = (Azimuth / 180.0) * np.pi\n Rot_Mat = np.dot(\n np.array([[1 / a_max, 0], [0, 1 / a_min]]),\n np.array(\n [\n [np.cos(theta), np.sin(theta)],\n [-np.sin(theta), np.cos(theta)],\n ]\n ),\n )\n return Rot_Mat\n\n\n\n# covariance model\ndef cov(h1, h2, k, vario):\n # unpack variogram parameters\n Azimuth = vario[0]\n nug = vario[1]\n nstruct = vario[2]\n vtype = vario[3]\n cc = vario[4]\n a_max = vario[5]\n a_min = vario[6]\n \n c = -nug # nugget effect is made negative because we're calculating covariance instead of variance\n for i in range(nstruct):\n Q1 = h1.copy()\n Q2 = h2.copy()\n \n # covariances between measurements\n if k == 0:\n d = distance_matrix(\n np.matmul(Q1, Rot_Mat(Azimuth, a_max[i], a_min[i])),\n np.matmul(Q2, Rot_Mat(Azimuth, a_max[i], a_min[i])),\n )\n \n # covariances between measurements and unknown\n elif k == 1:\n d = np.sqrt(\n np.square(\n (np.matmul(Q1, Rot_Mat(Azimuth, a_max[i], a_min[i])))\n - np.tile(\n (\n np.matmul(\n Q2, Rot_Mat(Azimuth, a_max[i], a_min[i])\n )\n ),\n (k, 1),\n )\n ).sum(axis=1)\n )\n d = np.asarray(d).reshape(len(d))\n c = c + covar(vtype[i], d, 1) * cc[i]\n return c\n\n\n\n# simple kriging\ndef skrige(Pred_grid, df, xx, yy, data, k, vario):\n \n Mean_1 = np.average(df[data]) # mean of data\n Var_1 = np.var(df[data]); # variance of data \n \n # make KDTree to search data for nearest neighbors\n tree_data = KDTree(df[[xx,yy]].values) \n \n # preallocate space for mean and variance\n est_SK = np.zeros(shape=len(Pred_grid))\n var_SK = np.zeros(shape=len(Pred_grid))\n \n # preallocate space for data\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k)) \n neardistmatrix = np.zeros((1, k))\n \n for z in tqdm(range(0, len(Pred_grid))):\n # find nearest data points\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z : z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n \n # left hand side (covariance between data)\n Kriging_Matrix = np.zeros(shape=((k, k)))\n Kriging_Matrix = cov(X_Y[0], X_Y[0], 0, vario)\n \n # Set up Right Hand Side (covariance between data and unknown)\n r = np.zeros(shape=(k))\n k_weights = r\n r = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n Kriging_Matrix.reshape(((k)), ((k)))\n \n # Calculate Kriging Weights\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n\n # get estimates\n est_SK[z] = k*Mean_1 + np.sum(k_weights*(closematrix_Primary[:] - Mean_1))\n var_SK[z] = Var_1 - np.sum(k_weights*r)\n \n return est_SK, var_SK\n\n\n\n# ordinary kriging\ndef okrige(Pred_grid, df, xx, yy, data, k, vario):\n \n Var_1 = np.var(df[data]); # variance of data \n \n # make KDTree to search data for nearest neighbors\n tree_data = KDTree(df[[xx,yy]].values) \n \n # preallocate space for mean and variance\n est_OK = np.zeros(shape=len(Pred_grid))\n var_OK = np.zeros(shape=len(Pred_grid))\n \n # preallocate space for data\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n \n for z in tqdm(range(0, len(Pred_grid))):\n # find nearest data points\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z : z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n \n # left hand side (covariance between data)\n Kriging_Matrix = np.zeros(shape=((k+1, k+1)))\n Kriging_Matrix[0:k,0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k,0:k] = 1\n Kriging_Matrix[0:k,k] = 1\n \n # Set up Right Hand Side (covariance between data and unknown)\n r = np.zeros(shape=(k+1))\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1 # unbiasedness constraint\n Kriging_Matrix.reshape(((k+1)), ((k+1)))\n \n # Calculate Kriging Weights\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n\n # get estimates\n est_OK[z] = np.sum(k_weights[0:k]*closematrix_Primary[:])\n var_OK[z] = Var_1 - np.sum(k_weights[0:k]*r[0:k])\n \n return est_OK, var_OK\n\n\n\n\n# sequential Gaussian simulation\ndef sgsim(Pred_grid, df, xx, yy, data, k, vario):\n \n # generate random array for simulation order\n xyindex = np.arange(len(Pred_grid))\n random.shuffle(xyindex)\n\n Var_1 = np.var(df[data]); # variance of data \n \n # preallocate space for simulation\n sgs = np.zeros(shape=len(Pred_grid))\n \n # preallocate space for data\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n \n with tqdm(total=len(Pred_grid), position=0, leave=True) as pbar:\n for i in tqdm(range(0, len(Pred_grid)), position=0, leave=True):\n pbar.update()\n z = xyindex[i]\n \n # make KDTree to search data for nearest neighbors\n tree_data = KDTree(df[[xx,yy]].values) \n \n # find nearest data points\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z : z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n \n # left hand side (covariance between data)\n Kriging_Matrix = np.zeros(shape=((k+1, k+1)))\n Kriging_Matrix[0:k,0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k,0:k] = 1\n Kriging_Matrix[0:k,k] = 1\n \n # Set up Right Hand Side (covariance between data and unknown)\n r = np.zeros(shape=(k+1))\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1 # unbiasedness constraint\n Kriging_Matrix.reshape(((k+1)), ((k+1)))\n \n # Calculate Kriging Weights\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n\n # get estimates\n est = np.sum(k_weights[0:k]*closematrix_Primary[:]) # kriging mean\n var = Var_1 - np.sum(k_weights[0:k]*r[0:k]) # kriging variance\n \n if (var < 0): # make sure variances are non-negative\n var = 0 \n \n sgs[z] = np.random.normal(est,math.sqrt(var),1) # simulate by randomly sampling a value\n \n # update the conditioning data\n coords = Pred_grid[z:z+1,:]\n dnew = {xx: [coords[0,0]], yy: [coords[0,1]], data: [sgs[z]]} \n dfnew = pd.DataFrame(data = dnew)\n df = pd.concat([df,dfnew], sort=False) # add new points by concatenating dataframes \n \n return sgs\n\n\n# In[ ]:\n\n\n\n\n",
"import numpy as np\nimport numpy.linalg as linalg\nimport pandas as pd\nimport sklearn as sklearn\nfrom sklearn.neighbors import KDTree\nimport math\nfrom scipy.spatial import distance_matrix\nfrom tqdm import tqdm\nimport random\n\n\ndef covar(t, d, r):\n h = d / r\n if t == 1:\n c = 1 - h * (1.5 - 0.5 * np.square(h))\n c[h > 1] = 0\n elif t == 2:\n c = np.exp(-3 * h)\n elif t == 3:\n c = np.exp(-3 * np.square(h))\n return c\n\n\ndef axis_var(lagh, nug, nstruct, cc, vtype, a):\n lagh = lagh\n nstruct = nstruct\n vtype = vtype\n a = a\n cc = cc\n n = len(lagh)\n gamma_model = np.zeros(shape=n)\n for j in range(0, n):\n c = nug\n c = 0\n h = np.matrix(lagh[j])\n for i in range(nstruct):\n Q = h.copy()\n d = Q / a[i]\n c = c + covar(vtype[i], d, 1) * cc[i]\n gamma_model[j] = 1 + nug - c\n return gamma_model\n\n\ndef pred_grid(xmin, xmax, ymin, ymax, pix):\n cols = (xmax - xmin) / pix\n rows = (ymax - ymin) / pix\n x = np.arange(xmin, xmax, pix)\n y = np.arange(ymin, ymax, pix)\n xx, yy = np.meshgrid(x, y)\n yy = np.flip(yy)\n x = np.reshape(xx, (int(rows) * int(cols), 1))\n y = np.reshape(yy, (int(rows) * int(cols), 1))\n Pred_grid_xy = np.concatenate((x, y), axis=1)\n return Pred_grid_xy\n\n\ndef Rot_Mat(Azimuth, a_max, a_min):\n theta = Azimuth / 180.0 * np.pi\n Rot_Mat = np.dot(np.array([[1 / a_max, 0], [0, 1 / a_min]]), np.array([\n [np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]))\n return Rot_Mat\n\n\ndef cov(h1, h2, k, vario):\n Azimuth = vario[0]\n nug = vario[1]\n nstruct = vario[2]\n vtype = vario[3]\n cc = vario[4]\n a_max = vario[5]\n a_min = vario[6]\n c = -nug\n for i in range(nstruct):\n Q1 = h1.copy()\n Q2 = h2.copy()\n if k == 0:\n d = distance_matrix(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])), np.matmul(Q2, Rot_Mat(Azimuth, a_max[i], a_min[i]))\n )\n elif k == 1:\n d = np.sqrt(np.square(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])) - np.tile(np.matmul(Q2, Rot_Mat(Azimuth, a_max[i\n ], a_min[i])), (k, 1))).sum(axis=1))\n d = np.asarray(d).reshape(len(d))\n c = c + covar(vtype[i], d, 1) * cc[i]\n return c\n\n\ndef skrige(Pred_grid, df, xx, yy, data, k, vario):\n Mean_1 = np.average(df[data])\n Var_1 = np.var(df[data])\n tree_data = KDTree(df[[xx, yy]].values)\n est_SK = np.zeros(shape=len(Pred_grid))\n var_SK = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n for z in tqdm(range(0, len(Pred_grid))):\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k, k))\n Kriging_Matrix = cov(X_Y[0], X_Y[0], 0, vario)\n r = np.zeros(shape=k)\n k_weights = r\n r = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n Kriging_Matrix.reshape(k, k)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est_SK[z] = k * Mean_1 + np.sum(k_weights * (closematrix_Primary[:] -\n Mean_1))\n var_SK[z] = Var_1 - np.sum(k_weights * r)\n return est_SK, var_SK\n\n\ndef okrige(Pred_grid, df, xx, yy, data, k, vario):\n Var_1 = np.var(df[data])\n tree_data = KDTree(df[[xx, yy]].values)\n est_OK = np.zeros(shape=len(Pred_grid))\n var_OK = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n for z in tqdm(range(0, len(Pred_grid))):\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k + 1, k + 1))\n Kriging_Matrix[0:k, 0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k, 0:k] = 1\n Kriging_Matrix[0:k, k] = 1\n r = np.zeros(shape=k + 1)\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1\n Kriging_Matrix.reshape(k + 1, k + 1)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est_OK[z] = np.sum(k_weights[0:k] * closematrix_Primary[:])\n var_OK[z] = Var_1 - np.sum(k_weights[0:k] * r[0:k])\n return est_OK, var_OK\n\n\ndef sgsim(Pred_grid, df, xx, yy, data, k, vario):\n xyindex = np.arange(len(Pred_grid))\n random.shuffle(xyindex)\n Var_1 = np.var(df[data])\n sgs = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n with tqdm(total=len(Pred_grid), position=0, leave=True) as pbar:\n for i in tqdm(range(0, len(Pred_grid)), position=0, leave=True):\n pbar.update()\n z = xyindex[i]\n tree_data = KDTree(df[[xx, yy]].values)\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1,\n :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k + 1, k + 1))\n Kriging_Matrix[0:k, 0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k, 0:k] = 1\n Kriging_Matrix[0:k, k] = 1\n r = np.zeros(shape=k + 1)\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1\n Kriging_Matrix.reshape(k + 1, k + 1)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est = np.sum(k_weights[0:k] * closematrix_Primary[:])\n var = Var_1 - np.sum(k_weights[0:k] * r[0:k])\n if var < 0:\n var = 0\n sgs[z] = np.random.normal(est, math.sqrt(var), 1)\n coords = Pred_grid[z:z + 1, :]\n dnew = {xx: [coords[0, 0]], yy: [coords[0, 1]], data: [sgs[z]]}\n dfnew = pd.DataFrame(data=dnew)\n df = pd.concat([df, dfnew], sort=False)\n return sgs\n",
"<import token>\n\n\ndef covar(t, d, r):\n h = d / r\n if t == 1:\n c = 1 - h * (1.5 - 0.5 * np.square(h))\n c[h > 1] = 0\n elif t == 2:\n c = np.exp(-3 * h)\n elif t == 3:\n c = np.exp(-3 * np.square(h))\n return c\n\n\ndef axis_var(lagh, nug, nstruct, cc, vtype, a):\n lagh = lagh\n nstruct = nstruct\n vtype = vtype\n a = a\n cc = cc\n n = len(lagh)\n gamma_model = np.zeros(shape=n)\n for j in range(0, n):\n c = nug\n c = 0\n h = np.matrix(lagh[j])\n for i in range(nstruct):\n Q = h.copy()\n d = Q / a[i]\n c = c + covar(vtype[i], d, 1) * cc[i]\n gamma_model[j] = 1 + nug - c\n return gamma_model\n\n\ndef pred_grid(xmin, xmax, ymin, ymax, pix):\n cols = (xmax - xmin) / pix\n rows = (ymax - ymin) / pix\n x = np.arange(xmin, xmax, pix)\n y = np.arange(ymin, ymax, pix)\n xx, yy = np.meshgrid(x, y)\n yy = np.flip(yy)\n x = np.reshape(xx, (int(rows) * int(cols), 1))\n y = np.reshape(yy, (int(rows) * int(cols), 1))\n Pred_grid_xy = np.concatenate((x, y), axis=1)\n return Pred_grid_xy\n\n\ndef Rot_Mat(Azimuth, a_max, a_min):\n theta = Azimuth / 180.0 * np.pi\n Rot_Mat = np.dot(np.array([[1 / a_max, 0], [0, 1 / a_min]]), np.array([\n [np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]))\n return Rot_Mat\n\n\ndef cov(h1, h2, k, vario):\n Azimuth = vario[0]\n nug = vario[1]\n nstruct = vario[2]\n vtype = vario[3]\n cc = vario[4]\n a_max = vario[5]\n a_min = vario[6]\n c = -nug\n for i in range(nstruct):\n Q1 = h1.copy()\n Q2 = h2.copy()\n if k == 0:\n d = distance_matrix(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])), np.matmul(Q2, Rot_Mat(Azimuth, a_max[i], a_min[i]))\n )\n elif k == 1:\n d = np.sqrt(np.square(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])) - np.tile(np.matmul(Q2, Rot_Mat(Azimuth, a_max[i\n ], a_min[i])), (k, 1))).sum(axis=1))\n d = np.asarray(d).reshape(len(d))\n c = c + covar(vtype[i], d, 1) * cc[i]\n return c\n\n\ndef skrige(Pred_grid, df, xx, yy, data, k, vario):\n Mean_1 = np.average(df[data])\n Var_1 = np.var(df[data])\n tree_data = KDTree(df[[xx, yy]].values)\n est_SK = np.zeros(shape=len(Pred_grid))\n var_SK = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n for z in tqdm(range(0, len(Pred_grid))):\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k, k))\n Kriging_Matrix = cov(X_Y[0], X_Y[0], 0, vario)\n r = np.zeros(shape=k)\n k_weights = r\n r = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n Kriging_Matrix.reshape(k, k)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est_SK[z] = k * Mean_1 + np.sum(k_weights * (closematrix_Primary[:] -\n Mean_1))\n var_SK[z] = Var_1 - np.sum(k_weights * r)\n return est_SK, var_SK\n\n\ndef okrige(Pred_grid, df, xx, yy, data, k, vario):\n Var_1 = np.var(df[data])\n tree_data = KDTree(df[[xx, yy]].values)\n est_OK = np.zeros(shape=len(Pred_grid))\n var_OK = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n for z in tqdm(range(0, len(Pred_grid))):\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k + 1, k + 1))\n Kriging_Matrix[0:k, 0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k, 0:k] = 1\n Kriging_Matrix[0:k, k] = 1\n r = np.zeros(shape=k + 1)\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1\n Kriging_Matrix.reshape(k + 1, k + 1)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est_OK[z] = np.sum(k_weights[0:k] * closematrix_Primary[:])\n var_OK[z] = Var_1 - np.sum(k_weights[0:k] * r[0:k])\n return est_OK, var_OK\n\n\ndef sgsim(Pred_grid, df, xx, yy, data, k, vario):\n xyindex = np.arange(len(Pred_grid))\n random.shuffle(xyindex)\n Var_1 = np.var(df[data])\n sgs = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n with tqdm(total=len(Pred_grid), position=0, leave=True) as pbar:\n for i in tqdm(range(0, len(Pred_grid)), position=0, leave=True):\n pbar.update()\n z = xyindex[i]\n tree_data = KDTree(df[[xx, yy]].values)\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1,\n :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k + 1, k + 1))\n Kriging_Matrix[0:k, 0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k, 0:k] = 1\n Kriging_Matrix[0:k, k] = 1\n r = np.zeros(shape=k + 1)\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1\n Kriging_Matrix.reshape(k + 1, k + 1)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est = np.sum(k_weights[0:k] * closematrix_Primary[:])\n var = Var_1 - np.sum(k_weights[0:k] * r[0:k])\n if var < 0:\n var = 0\n sgs[z] = np.random.normal(est, math.sqrt(var), 1)\n coords = Pred_grid[z:z + 1, :]\n dnew = {xx: [coords[0, 0]], yy: [coords[0, 1]], data: [sgs[z]]}\n dfnew = pd.DataFrame(data=dnew)\n df = pd.concat([df, dfnew], sort=False)\n return sgs\n",
"<import token>\n\n\ndef covar(t, d, r):\n h = d / r\n if t == 1:\n c = 1 - h * (1.5 - 0.5 * np.square(h))\n c[h > 1] = 0\n elif t == 2:\n c = np.exp(-3 * h)\n elif t == 3:\n c = np.exp(-3 * np.square(h))\n return c\n\n\ndef axis_var(lagh, nug, nstruct, cc, vtype, a):\n lagh = lagh\n nstruct = nstruct\n vtype = vtype\n a = a\n cc = cc\n n = len(lagh)\n gamma_model = np.zeros(shape=n)\n for j in range(0, n):\n c = nug\n c = 0\n h = np.matrix(lagh[j])\n for i in range(nstruct):\n Q = h.copy()\n d = Q / a[i]\n c = c + covar(vtype[i], d, 1) * cc[i]\n gamma_model[j] = 1 + nug - c\n return gamma_model\n\n\n<function token>\n\n\ndef Rot_Mat(Azimuth, a_max, a_min):\n theta = Azimuth / 180.0 * np.pi\n Rot_Mat = np.dot(np.array([[1 / a_max, 0], [0, 1 / a_min]]), np.array([\n [np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]))\n return Rot_Mat\n\n\ndef cov(h1, h2, k, vario):\n Azimuth = vario[0]\n nug = vario[1]\n nstruct = vario[2]\n vtype = vario[3]\n cc = vario[4]\n a_max = vario[5]\n a_min = vario[6]\n c = -nug\n for i in range(nstruct):\n Q1 = h1.copy()\n Q2 = h2.copy()\n if k == 0:\n d = distance_matrix(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])), np.matmul(Q2, Rot_Mat(Azimuth, a_max[i], a_min[i]))\n )\n elif k == 1:\n d = np.sqrt(np.square(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])) - np.tile(np.matmul(Q2, Rot_Mat(Azimuth, a_max[i\n ], a_min[i])), (k, 1))).sum(axis=1))\n d = np.asarray(d).reshape(len(d))\n c = c + covar(vtype[i], d, 1) * cc[i]\n return c\n\n\ndef skrige(Pred_grid, df, xx, yy, data, k, vario):\n Mean_1 = np.average(df[data])\n Var_1 = np.var(df[data])\n tree_data = KDTree(df[[xx, yy]].values)\n est_SK = np.zeros(shape=len(Pred_grid))\n var_SK = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n for z in tqdm(range(0, len(Pred_grid))):\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k, k))\n Kriging_Matrix = cov(X_Y[0], X_Y[0], 0, vario)\n r = np.zeros(shape=k)\n k_weights = r\n r = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n Kriging_Matrix.reshape(k, k)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est_SK[z] = k * Mean_1 + np.sum(k_weights * (closematrix_Primary[:] -\n Mean_1))\n var_SK[z] = Var_1 - np.sum(k_weights * r)\n return est_SK, var_SK\n\n\ndef okrige(Pred_grid, df, xx, yy, data, k, vario):\n Var_1 = np.var(df[data])\n tree_data = KDTree(df[[xx, yy]].values)\n est_OK = np.zeros(shape=len(Pred_grid))\n var_OK = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n for z in tqdm(range(0, len(Pred_grid))):\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k + 1, k + 1))\n Kriging_Matrix[0:k, 0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k, 0:k] = 1\n Kriging_Matrix[0:k, k] = 1\n r = np.zeros(shape=k + 1)\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1\n Kriging_Matrix.reshape(k + 1, k + 1)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est_OK[z] = np.sum(k_weights[0:k] * closematrix_Primary[:])\n var_OK[z] = Var_1 - np.sum(k_weights[0:k] * r[0:k])\n return est_OK, var_OK\n\n\ndef sgsim(Pred_grid, df, xx, yy, data, k, vario):\n xyindex = np.arange(len(Pred_grid))\n random.shuffle(xyindex)\n Var_1 = np.var(df[data])\n sgs = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n with tqdm(total=len(Pred_grid), position=0, leave=True) as pbar:\n for i in tqdm(range(0, len(Pred_grid)), position=0, leave=True):\n pbar.update()\n z = xyindex[i]\n tree_data = KDTree(df[[xx, yy]].values)\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1,\n :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k + 1, k + 1))\n Kriging_Matrix[0:k, 0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k, 0:k] = 1\n Kriging_Matrix[0:k, k] = 1\n r = np.zeros(shape=k + 1)\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1\n Kriging_Matrix.reshape(k + 1, k + 1)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est = np.sum(k_weights[0:k] * closematrix_Primary[:])\n var = Var_1 - np.sum(k_weights[0:k] * r[0:k])\n if var < 0:\n var = 0\n sgs[z] = np.random.normal(est, math.sqrt(var), 1)\n coords = Pred_grid[z:z + 1, :]\n dnew = {xx: [coords[0, 0]], yy: [coords[0, 1]], data: [sgs[z]]}\n dfnew = pd.DataFrame(data=dnew)\n df = pd.concat([df, dfnew], sort=False)\n return sgs\n",
"<import token>\n<function token>\n\n\ndef axis_var(lagh, nug, nstruct, cc, vtype, a):\n lagh = lagh\n nstruct = nstruct\n vtype = vtype\n a = a\n cc = cc\n n = len(lagh)\n gamma_model = np.zeros(shape=n)\n for j in range(0, n):\n c = nug\n c = 0\n h = np.matrix(lagh[j])\n for i in range(nstruct):\n Q = h.copy()\n d = Q / a[i]\n c = c + covar(vtype[i], d, 1) * cc[i]\n gamma_model[j] = 1 + nug - c\n return gamma_model\n\n\n<function token>\n\n\ndef Rot_Mat(Azimuth, a_max, a_min):\n theta = Azimuth / 180.0 * np.pi\n Rot_Mat = np.dot(np.array([[1 / a_max, 0], [0, 1 / a_min]]), np.array([\n [np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]))\n return Rot_Mat\n\n\ndef cov(h1, h2, k, vario):\n Azimuth = vario[0]\n nug = vario[1]\n nstruct = vario[2]\n vtype = vario[3]\n cc = vario[4]\n a_max = vario[5]\n a_min = vario[6]\n c = -nug\n for i in range(nstruct):\n Q1 = h1.copy()\n Q2 = h2.copy()\n if k == 0:\n d = distance_matrix(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])), np.matmul(Q2, Rot_Mat(Azimuth, a_max[i], a_min[i]))\n )\n elif k == 1:\n d = np.sqrt(np.square(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])) - np.tile(np.matmul(Q2, Rot_Mat(Azimuth, a_max[i\n ], a_min[i])), (k, 1))).sum(axis=1))\n d = np.asarray(d).reshape(len(d))\n c = c + covar(vtype[i], d, 1) * cc[i]\n return c\n\n\ndef skrige(Pred_grid, df, xx, yy, data, k, vario):\n Mean_1 = np.average(df[data])\n Var_1 = np.var(df[data])\n tree_data = KDTree(df[[xx, yy]].values)\n est_SK = np.zeros(shape=len(Pred_grid))\n var_SK = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n for z in tqdm(range(0, len(Pred_grid))):\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k, k))\n Kriging_Matrix = cov(X_Y[0], X_Y[0], 0, vario)\n r = np.zeros(shape=k)\n k_weights = r\n r = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n Kriging_Matrix.reshape(k, k)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est_SK[z] = k * Mean_1 + np.sum(k_weights * (closematrix_Primary[:] -\n Mean_1))\n var_SK[z] = Var_1 - np.sum(k_weights * r)\n return est_SK, var_SK\n\n\ndef okrige(Pred_grid, df, xx, yy, data, k, vario):\n Var_1 = np.var(df[data])\n tree_data = KDTree(df[[xx, yy]].values)\n est_OK = np.zeros(shape=len(Pred_grid))\n var_OK = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n for z in tqdm(range(0, len(Pred_grid))):\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k + 1, k + 1))\n Kriging_Matrix[0:k, 0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k, 0:k] = 1\n Kriging_Matrix[0:k, k] = 1\n r = np.zeros(shape=k + 1)\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1\n Kriging_Matrix.reshape(k + 1, k + 1)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est_OK[z] = np.sum(k_weights[0:k] * closematrix_Primary[:])\n var_OK[z] = Var_1 - np.sum(k_weights[0:k] * r[0:k])\n return est_OK, var_OK\n\n\ndef sgsim(Pred_grid, df, xx, yy, data, k, vario):\n xyindex = np.arange(len(Pred_grid))\n random.shuffle(xyindex)\n Var_1 = np.var(df[data])\n sgs = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n with tqdm(total=len(Pred_grid), position=0, leave=True) as pbar:\n for i in tqdm(range(0, len(Pred_grid)), position=0, leave=True):\n pbar.update()\n z = xyindex[i]\n tree_data = KDTree(df[[xx, yy]].values)\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1,\n :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k + 1, k + 1))\n Kriging_Matrix[0:k, 0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k, 0:k] = 1\n Kriging_Matrix[0:k, k] = 1\n r = np.zeros(shape=k + 1)\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1\n Kriging_Matrix.reshape(k + 1, k + 1)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est = np.sum(k_weights[0:k] * closematrix_Primary[:])\n var = Var_1 - np.sum(k_weights[0:k] * r[0:k])\n if var < 0:\n var = 0\n sgs[z] = np.random.normal(est, math.sqrt(var), 1)\n coords = Pred_grid[z:z + 1, :]\n dnew = {xx: [coords[0, 0]], yy: [coords[0, 1]], data: [sgs[z]]}\n dfnew = pd.DataFrame(data=dnew)\n df = pd.concat([df, dfnew], sort=False)\n return sgs\n",
"<import token>\n<function token>\n\n\ndef axis_var(lagh, nug, nstruct, cc, vtype, a):\n lagh = lagh\n nstruct = nstruct\n vtype = vtype\n a = a\n cc = cc\n n = len(lagh)\n gamma_model = np.zeros(shape=n)\n for j in range(0, n):\n c = nug\n c = 0\n h = np.matrix(lagh[j])\n for i in range(nstruct):\n Q = h.copy()\n d = Q / a[i]\n c = c + covar(vtype[i], d, 1) * cc[i]\n gamma_model[j] = 1 + nug - c\n return gamma_model\n\n\n<function token>\n\n\ndef Rot_Mat(Azimuth, a_max, a_min):\n theta = Azimuth / 180.0 * np.pi\n Rot_Mat = np.dot(np.array([[1 / a_max, 0], [0, 1 / a_min]]), np.array([\n [np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]))\n return Rot_Mat\n\n\ndef cov(h1, h2, k, vario):\n Azimuth = vario[0]\n nug = vario[1]\n nstruct = vario[2]\n vtype = vario[3]\n cc = vario[4]\n a_max = vario[5]\n a_min = vario[6]\n c = -nug\n for i in range(nstruct):\n Q1 = h1.copy()\n Q2 = h2.copy()\n if k == 0:\n d = distance_matrix(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])), np.matmul(Q2, Rot_Mat(Azimuth, a_max[i], a_min[i]))\n )\n elif k == 1:\n d = np.sqrt(np.square(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])) - np.tile(np.matmul(Q2, Rot_Mat(Azimuth, a_max[i\n ], a_min[i])), (k, 1))).sum(axis=1))\n d = np.asarray(d).reshape(len(d))\n c = c + covar(vtype[i], d, 1) * cc[i]\n return c\n\n\ndef skrige(Pred_grid, df, xx, yy, data, k, vario):\n Mean_1 = np.average(df[data])\n Var_1 = np.var(df[data])\n tree_data = KDTree(df[[xx, yy]].values)\n est_SK = np.zeros(shape=len(Pred_grid))\n var_SK = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n for z in tqdm(range(0, len(Pred_grid))):\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1, :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k, k))\n Kriging_Matrix = cov(X_Y[0], X_Y[0], 0, vario)\n r = np.zeros(shape=k)\n k_weights = r\n r = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n Kriging_Matrix.reshape(k, k)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est_SK[z] = k * Mean_1 + np.sum(k_weights * (closematrix_Primary[:] -\n Mean_1))\n var_SK[z] = Var_1 - np.sum(k_weights * r)\n return est_SK, var_SK\n\n\n<function token>\n\n\ndef sgsim(Pred_grid, df, xx, yy, data, k, vario):\n xyindex = np.arange(len(Pred_grid))\n random.shuffle(xyindex)\n Var_1 = np.var(df[data])\n sgs = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n with tqdm(total=len(Pred_grid), position=0, leave=True) as pbar:\n for i in tqdm(range(0, len(Pred_grid)), position=0, leave=True):\n pbar.update()\n z = xyindex[i]\n tree_data = KDTree(df[[xx, yy]].values)\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1,\n :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k + 1, k + 1))\n Kriging_Matrix[0:k, 0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k, 0:k] = 1\n Kriging_Matrix[0:k, k] = 1\n r = np.zeros(shape=k + 1)\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1\n Kriging_Matrix.reshape(k + 1, k + 1)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est = np.sum(k_weights[0:k] * closematrix_Primary[:])\n var = Var_1 - np.sum(k_weights[0:k] * r[0:k])\n if var < 0:\n var = 0\n sgs[z] = np.random.normal(est, math.sqrt(var), 1)\n coords = Pred_grid[z:z + 1, :]\n dnew = {xx: [coords[0, 0]], yy: [coords[0, 1]], data: [sgs[z]]}\n dfnew = pd.DataFrame(data=dnew)\n df = pd.concat([df, dfnew], sort=False)\n return sgs\n",
"<import token>\n<function token>\n\n\ndef axis_var(lagh, nug, nstruct, cc, vtype, a):\n lagh = lagh\n nstruct = nstruct\n vtype = vtype\n a = a\n cc = cc\n n = len(lagh)\n gamma_model = np.zeros(shape=n)\n for j in range(0, n):\n c = nug\n c = 0\n h = np.matrix(lagh[j])\n for i in range(nstruct):\n Q = h.copy()\n d = Q / a[i]\n c = c + covar(vtype[i], d, 1) * cc[i]\n gamma_model[j] = 1 + nug - c\n return gamma_model\n\n\n<function token>\n\n\ndef Rot_Mat(Azimuth, a_max, a_min):\n theta = Azimuth / 180.0 * np.pi\n Rot_Mat = np.dot(np.array([[1 / a_max, 0], [0, 1 / a_min]]), np.array([\n [np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]))\n return Rot_Mat\n\n\ndef cov(h1, h2, k, vario):\n Azimuth = vario[0]\n nug = vario[1]\n nstruct = vario[2]\n vtype = vario[3]\n cc = vario[4]\n a_max = vario[5]\n a_min = vario[6]\n c = -nug\n for i in range(nstruct):\n Q1 = h1.copy()\n Q2 = h2.copy()\n if k == 0:\n d = distance_matrix(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])), np.matmul(Q2, Rot_Mat(Azimuth, a_max[i], a_min[i]))\n )\n elif k == 1:\n d = np.sqrt(np.square(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])) - np.tile(np.matmul(Q2, Rot_Mat(Azimuth, a_max[i\n ], a_min[i])), (k, 1))).sum(axis=1))\n d = np.asarray(d).reshape(len(d))\n c = c + covar(vtype[i], d, 1) * cc[i]\n return c\n\n\n<function token>\n<function token>\n\n\ndef sgsim(Pred_grid, df, xx, yy, data, k, vario):\n xyindex = np.arange(len(Pred_grid))\n random.shuffle(xyindex)\n Var_1 = np.var(df[data])\n sgs = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n with tqdm(total=len(Pred_grid), position=0, leave=True) as pbar:\n for i in tqdm(range(0, len(Pred_grid)), position=0, leave=True):\n pbar.update()\n z = xyindex[i]\n tree_data = KDTree(df[[xx, yy]].values)\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1,\n :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k + 1, k + 1))\n Kriging_Matrix[0:k, 0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k, 0:k] = 1\n Kriging_Matrix[0:k, k] = 1\n r = np.zeros(shape=k + 1)\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1\n Kriging_Matrix.reshape(k + 1, k + 1)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est = np.sum(k_weights[0:k] * closematrix_Primary[:])\n var = Var_1 - np.sum(k_weights[0:k] * r[0:k])\n if var < 0:\n var = 0\n sgs[z] = np.random.normal(est, math.sqrt(var), 1)\n coords = Pred_grid[z:z + 1, :]\n dnew = {xx: [coords[0, 0]], yy: [coords[0, 1]], data: [sgs[z]]}\n dfnew = pd.DataFrame(data=dnew)\n df = pd.concat([df, dfnew], sort=False)\n return sgs\n",
"<import token>\n<function token>\n\n\ndef axis_var(lagh, nug, nstruct, cc, vtype, a):\n lagh = lagh\n nstruct = nstruct\n vtype = vtype\n a = a\n cc = cc\n n = len(lagh)\n gamma_model = np.zeros(shape=n)\n for j in range(0, n):\n c = nug\n c = 0\n h = np.matrix(lagh[j])\n for i in range(nstruct):\n Q = h.copy()\n d = Q / a[i]\n c = c + covar(vtype[i], d, 1) * cc[i]\n gamma_model[j] = 1 + nug - c\n return gamma_model\n\n\n<function token>\n<function token>\n\n\ndef cov(h1, h2, k, vario):\n Azimuth = vario[0]\n nug = vario[1]\n nstruct = vario[2]\n vtype = vario[3]\n cc = vario[4]\n a_max = vario[5]\n a_min = vario[6]\n c = -nug\n for i in range(nstruct):\n Q1 = h1.copy()\n Q2 = h2.copy()\n if k == 0:\n d = distance_matrix(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])), np.matmul(Q2, Rot_Mat(Azimuth, a_max[i], a_min[i]))\n )\n elif k == 1:\n d = np.sqrt(np.square(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])) - np.tile(np.matmul(Q2, Rot_Mat(Azimuth, a_max[i\n ], a_min[i])), (k, 1))).sum(axis=1))\n d = np.asarray(d).reshape(len(d))\n c = c + covar(vtype[i], d, 1) * cc[i]\n return c\n\n\n<function token>\n<function token>\n\n\ndef sgsim(Pred_grid, df, xx, yy, data, k, vario):\n xyindex = np.arange(len(Pred_grid))\n random.shuffle(xyindex)\n Var_1 = np.var(df[data])\n sgs = np.zeros(shape=len(Pred_grid))\n X_Y = np.zeros((1, k, 2))\n closematrix_Primary = np.zeros((1, k))\n neardistmatrix = np.zeros((1, k))\n with tqdm(total=len(Pred_grid), position=0, leave=True) as pbar:\n for i in tqdm(range(0, len(Pred_grid)), position=0, leave=True):\n pbar.update()\n z = xyindex[i]\n tree_data = KDTree(df[[xx, yy]].values)\n nearest_dist, nearest_ind = tree_data.query(Pred_grid[z:z + 1,\n :], k=k)\n a = nearest_ind.ravel()\n group = df.iloc[a, :]\n closematrix_Primary[:] = group[data]\n neardistmatrix[:] = nearest_dist\n X_Y[:, :] = group[[xx, yy]]\n Kriging_Matrix = np.zeros(shape=(k + 1, k + 1))\n Kriging_Matrix[0:k, 0:k] = cov(X_Y[0], X_Y[0], 0, vario)\n Kriging_Matrix[k, 0:k] = 1\n Kriging_Matrix[0:k, k] = 1\n r = np.zeros(shape=k + 1)\n k_weights = r\n r[0:k] = cov(X_Y[0], np.tile(Pred_grid[z], (k, 1)), 1, vario)\n r[k] = 1\n Kriging_Matrix.reshape(k + 1, k + 1)\n k_weights = np.dot(np.linalg.pinv(Kriging_Matrix), r)\n est = np.sum(k_weights[0:k] * closematrix_Primary[:])\n var = Var_1 - np.sum(k_weights[0:k] * r[0:k])\n if var < 0:\n var = 0\n sgs[z] = np.random.normal(est, math.sqrt(var), 1)\n coords = Pred_grid[z:z + 1, :]\n dnew = {xx: [coords[0, 0]], yy: [coords[0, 1]], data: [sgs[z]]}\n dfnew = pd.DataFrame(data=dnew)\n df = pd.concat([df, dfnew], sort=False)\n return sgs\n",
"<import token>\n<function token>\n\n\ndef axis_var(lagh, nug, nstruct, cc, vtype, a):\n lagh = lagh\n nstruct = nstruct\n vtype = vtype\n a = a\n cc = cc\n n = len(lagh)\n gamma_model = np.zeros(shape=n)\n for j in range(0, n):\n c = nug\n c = 0\n h = np.matrix(lagh[j])\n for i in range(nstruct):\n Q = h.copy()\n d = Q / a[i]\n c = c + covar(vtype[i], d, 1) * cc[i]\n gamma_model[j] = 1 + nug - c\n return gamma_model\n\n\n<function token>\n<function token>\n\n\ndef cov(h1, h2, k, vario):\n Azimuth = vario[0]\n nug = vario[1]\n nstruct = vario[2]\n vtype = vario[3]\n cc = vario[4]\n a_max = vario[5]\n a_min = vario[6]\n c = -nug\n for i in range(nstruct):\n Q1 = h1.copy()\n Q2 = h2.copy()\n if k == 0:\n d = distance_matrix(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])), np.matmul(Q2, Rot_Mat(Azimuth, a_max[i], a_min[i]))\n )\n elif k == 1:\n d = np.sqrt(np.square(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])) - np.tile(np.matmul(Q2, Rot_Mat(Azimuth, a_max[i\n ], a_min[i])), (k, 1))).sum(axis=1))\n d = np.asarray(d).reshape(len(d))\n c = c + covar(vtype[i], d, 1) * cc[i]\n return c\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef cov(h1, h2, k, vario):\n Azimuth = vario[0]\n nug = vario[1]\n nstruct = vario[2]\n vtype = vario[3]\n cc = vario[4]\n a_max = vario[5]\n a_min = vario[6]\n c = -nug\n for i in range(nstruct):\n Q1 = h1.copy()\n Q2 = h2.copy()\n if k == 0:\n d = distance_matrix(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])), np.matmul(Q2, Rot_Mat(Azimuth, a_max[i], a_min[i]))\n )\n elif k == 1:\n d = np.sqrt(np.square(np.matmul(Q1, Rot_Mat(Azimuth, a_max[i],\n a_min[i])) - np.tile(np.matmul(Q2, Rot_Mat(Azimuth, a_max[i\n ], a_min[i])), (k, 1))).sum(axis=1))\n d = np.asarray(d).reshape(len(d))\n c = c + covar(vtype[i], d, 1) * cc[i]\n return c\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,595 |
b5b4b57e95ed1a0c91b14050e168572e470d1549
|
with open('arduino-eepromdump.txt') as f:
for line in f:
for i in line.split(' '):
if i == '':
continue
if int(i) == 0:
print('00', end='')
elif int(i) <= 9:
print('0', end='')
print(int(i), end='')
else:
print('{:02x}'.format(int(i)), end='')
print(' ', end='')
print()
|
[
"with open('arduino-eepromdump.txt') as f:\r\n for line in f:\r\n for i in line.split(' '):\r\n if i == '':\r\n continue\r\n if int(i) == 0:\r\n print('00', end='')\r\n elif int(i) <= 9:\r\n print('0', end='')\r\n print(int(i), end='')\r\n else:\r\n print('{:02x}'.format(int(i)), end='')\r\n print(' ', end='')\r\nprint()\r\n",
"with open('arduino-eepromdump.txt') as f:\n for line in f:\n for i in line.split(' '):\n if i == '':\n continue\n if int(i) == 0:\n print('00', end='')\n elif int(i) <= 9:\n print('0', end='')\n print(int(i), end='')\n else:\n print('{:02x}'.format(int(i)), end='')\n print(' ', end='')\nprint()\n",
"<code token>\n"
] | false |
98,596 |
25d43bd88b6d5bcda2f0c9465d4ae534c7ae5733
|
"""This module implements utilities for manipulating text.
* color add ansi colors and styles to strings
"""
FG_COLORS = {
'black' : '30',
'red' : '31',
'green' : '32',
'yellow' : '33',
'blue' : '34',
'purple' : '35',
'cyan' : '36',
'white' : '37',
}
FXS = {
'normal' : '0',
'bold' : '1',
'underline': '4',
}
BG_COLORS = {
'black' : '40',
'red' : '41',
'green' : '42',
'yellow' : '44',
'blue' : '44',
'purple' : '45',
'cyan' : '46',
'white' : '47',
}
ESCAPE = '\033['
def color(string, fg=None, fx=None, bg=None):
"""Changes the color and style of a string to be printed in a terminal.
Parameters
----------
string : str
The text to colorize.
fg : str
The text color (e.g. 'red', 'cyan', 'black', 'yellow', etc.).
fg : str
The text style (e.g. 'normal', 'bold', and 'underline').
bg : str
The background color (e.g. 'red', 'cyan', 'black', 'yellow', etc.).
"""
keys = (fg, fx, bg)
tables = (FG_COLORS, FXS, BG_COLORS)
codes = [table[key] for table, key in zip(tables, keys) if key is not None]
return ESCAPE + ';'.join(codes) + 'm' + string + ESCAPE + '0m'
|
[
"\"\"\"This module implements utilities for manipulating text.\n\n* color add ansi colors and styles to strings\n\"\"\"\n\n\nFG_COLORS = {\n 'black' : '30',\n 'red' : '31',\n 'green' : '32',\n 'yellow' : '33',\n 'blue' : '34',\n 'purple' : '35',\n 'cyan' : '36',\n 'white' : '37',\n}\n\nFXS = {\n 'normal' : '0',\n 'bold' : '1',\n 'underline': '4',\n}\n\nBG_COLORS = {\n 'black' : '40',\n 'red' : '41',\n 'green' : '42',\n 'yellow' : '44',\n 'blue' : '44',\n 'purple' : '45',\n 'cyan' : '46',\n 'white' : '47',\n}\n\n\nESCAPE = '\\033['\n\n\ndef color(string, fg=None, fx=None, bg=None):\n \"\"\"Changes the color and style of a string to be printed in a terminal.\n\n Parameters\n ----------\n string : str\n The text to colorize.\n fg : str\n The text color (e.g. 'red', 'cyan', 'black', 'yellow', etc.).\n fg : str\n The text style (e.g. 'normal', 'bold', and 'underline').\n bg : str\n The background color (e.g. 'red', 'cyan', 'black', 'yellow', etc.).\n \"\"\"\n keys = (fg, fx, bg)\n tables = (FG_COLORS, FXS, BG_COLORS)\n codes = [table[key] for table, key in zip(tables, keys) if key is not None]\n return ESCAPE + ';'.join(codes) + 'm' + string + ESCAPE + '0m'\n",
"<docstring token>\nFG_COLORS = {'black': '30', 'red': '31', 'green': '32', 'yellow': '33',\n 'blue': '34', 'purple': '35', 'cyan': '36', 'white': '37'}\nFXS = {'normal': '0', 'bold': '1', 'underline': '4'}\nBG_COLORS = {'black': '40', 'red': '41', 'green': '42', 'yellow': '44',\n 'blue': '44', 'purple': '45', 'cyan': '46', 'white': '47'}\nESCAPE = '\\x1b['\n\n\ndef color(string, fg=None, fx=None, bg=None):\n \"\"\"Changes the color and style of a string to be printed in a terminal.\n\n Parameters\n ----------\n string : str\n The text to colorize.\n fg : str\n The text color (e.g. 'red', 'cyan', 'black', 'yellow', etc.).\n fg : str\n The text style (e.g. 'normal', 'bold', and 'underline').\n bg : str\n The background color (e.g. 'red', 'cyan', 'black', 'yellow', etc.).\n \"\"\"\n keys = fg, fx, bg\n tables = FG_COLORS, FXS, BG_COLORS\n codes = [table[key] for table, key in zip(tables, keys) if key is not None]\n return ESCAPE + ';'.join(codes) + 'm' + string + ESCAPE + '0m'\n",
"<docstring token>\n<assignment token>\n\n\ndef color(string, fg=None, fx=None, bg=None):\n \"\"\"Changes the color and style of a string to be printed in a terminal.\n\n Parameters\n ----------\n string : str\n The text to colorize.\n fg : str\n The text color (e.g. 'red', 'cyan', 'black', 'yellow', etc.).\n fg : str\n The text style (e.g. 'normal', 'bold', and 'underline').\n bg : str\n The background color (e.g. 'red', 'cyan', 'black', 'yellow', etc.).\n \"\"\"\n keys = fg, fx, bg\n tables = FG_COLORS, FXS, BG_COLORS\n codes = [table[key] for table, key in zip(tables, keys) if key is not None]\n return ESCAPE + ';'.join(codes) + 'm' + string + ESCAPE + '0m'\n",
"<docstring token>\n<assignment token>\n<function token>\n"
] | false |
98,597 |
2a57dc4d76bdfac255b3f5d61f2b55c6a010e4b1
|
### Helper function for simulation model
#### Author: Yiran Jing
#### Date: Feb 2020
import pandas as pd
import numpy as np
import operator
import matplotlib.pyplot as plt
import pandas as pd
import pandas
import datetime
import matplotlib.dates as mdates
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
import warnings
warnings.filterwarnings('ignore')
########################
## dataset help function
########################
def get_province_df(df, provinceName: str) -> pandas.core.frame.DataFrame:
"""
Return time series data of given province
"""
return df[(df['province']==provinceName) & (df['city'].isnull())]
def get_China_total(df) -> pandas.core.frame.DataFrame:
"""
Return time series data of China total (including HK and Taiwan)
"""
return df[(df['countryCode']=='CN') & (df['province'].isnull())]
def get_China_exhubei(df) -> pandas.core.frame.DataFrame:
"""
Return time series data of China total (including HK and Taiwan)
"""
return df[(df['countryCode']=='CN') & (df['province']!='Hubei Province') & ~(df['province'].isnull()) \
& ~(df['city'].isnull())]
##################
## Clean data
##################
def add_days(DXYArea: pandas.core.frame.DataFrame) -> pandas.core.frame.DataFrame:
"""
Create a new column: Days, number of days after 2019-12-08 (detect the first case)
"""
DXYArea['date'] = pd.to_datetime(DXYArea['date'])
first_day = datetime.datetime(2019, 12, 8) # the time when detected the first case (2019-12-08)
DXYArea['Days'] = (DXYArea['date'] - first_day).dt.days
return DXYArea
def split_train_test_by_date(df: pandas.core.frame.DataFrame, splicer):
"""
Separate Train and Test dataset in time series
"""
if type(splicer) == float: ## check if splicer is a float
ndays = 3 if (df['date'].max() - df['date'].min()).days < 3 else splicer * (df['date'].max() - df['date'].min()).days
ndays = np.ceil(ndays)
elif type(splicer) == int:
ndays = splicer
else:
raise Exception('split value should not be greater than length of data')
split_date = df['date'].max() - datetime.timedelta(days=ndays)
## Separate Train and Test dataset
Train = df[df['date'] < split_date]
Test = df[df['date'] >= split_date]
print("Train dataset: data before {} \nTest dataset: the last {} days".format(split_date,ndays))
return Train, Test
def data_processing(df, splicer, features_to_engineer):
overall_df = pd.DataFrame(df.groupby(['date']).agg({'confirmed': "sum",
'suspected':'sum',
'cured': "sum",
'dead': 'sum',
'Days': 'mean'})).reset_index()
Train, Test = split_train_test_by_date(overall_df, splicer)
print(Train)
X_train = Train.loc[:,['Days']+[x+'_lag1' for x in features_to_engineer]]
y_train = Train['confirmed']
X_test = Test.loc[:,['Days']+[x+'_lag1' for x in features_to_engineer]]
y_test = Test['confirmed']
return X_train, X_test, y_train, y_test
##################
### feature engineering
##################
def feature_engineering(df:pandas.core.frame.DataFrame, features_to_engineer):
for feature in features_to_engineer:
df[f'{feature}_lag1'] = df[f'{feature}'].shift()
df[f'{feature}_lag1'].fillna(0, inplace = True)
return df
##################
### EDA
##################
def tsplot_conf_dead_cured(df, title_prefix, figsize=(13,10), fontsize=18, logy=False):
fig = plt.figure()
ax1 = fig.add_subplot(211)
plot_df = df.groupby('date').agg('sum')
plot_df.plot(y=['confirmed'], style='-*', ax=ax1, grid=True, figsize=figsize, logy=logy, color='black', marker='o')
if logy:
ax1.set_ylabel("log(confirmed)", color="black", fontsize=14)
else:
ax1.set_ylabel("confirmed", color="black", fontsize=14)
if 'dailyNew_confirmed' in df.columns:
ax11 = ax1.twinx()
ax11.bar(x=plot_df.index, height=plot_df['dailyNew_confirmed'], alpha=0.3, color='blue')
ax11.set_ylabel('dailyNew_confirmed', color='blue', fontsize=14)
ax2 = fig.add_subplot(212)
plot_df.plot(y=['dead', 'cured'], style=':*', grid=True, ax=ax2, figsize=figsize, sharex=False, logy=logy)
ax2.set_ylabel("count")
title = title_prefix + ' Cumulative Confirmed, Death, Cure'
fig.suptitle(title, fontsize=fontsize)
def draw_province_trend(title_prefix: str, df: pandas.core.frame.DataFrame):
"""
df is the daily dataset from DXY
"""
sub_df = df[df['province'] == title_prefix]
tsplot_conf_dead_cured(sub_df, title_prefix)
def draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):
"""
df is the daily dataset from DXY
"""
sub_df = df[df['city'] == title_prefix]
tsplot_conf_dead_cured(sub_df, title_prefix)
###################
## Modelling
###################
### general additive model
def draw_fit_plot(degree: int, area: str, X_train, X_test, y_train, y_test, y_train_predicted, y_test_predict, df):
if len(y_test)>0:
x = pd.Series(np.concatenate((X_train, X_test)))
y = pd.Series(np.concatenate((y_train, y_test)))
else:
x = X_train; y = y_train
fig, ax = plt.subplots()
#fig.canvas.draw()
plt.scatter(x, y, s=10, c = 'black')
plt.plot(X_train, y_train_predicted, color='green')
plt.plot(X_test, y_test_predict, color = 'blue')
plt.title("Polynomial Regression {} with degree = {}".format(area, degree))
plt.ylabel('Confirmed cases')
plt.xlabel('2020 Date')
datemin = df['date'].min()
numdays = len(X_train) + len(X_test)
labels = list((datemin + datetime.timedelta(days=x)).strftime('%m-%d') for x in range(numdays))
x = pd.Series(np.concatenate((X_train, X_test)))
plt.xticks(x, labels,rotation=60)
#fig.autofmt_xdate() # axes up to make room for them
plt.show()
def fit_pygam_model(X_train: pandas.core.frame.DataFrame,
X_test: pandas.core.frame.DataFrame,
y_train: pandas.core.frame.DataFrame,
y_test: pandas.core.frame.DataFrame):
'''
Creates a general additive model LinearGAM (normally distributed errors)
with grid search. Returns the best model with given hyperparameters.
hyperparameters: n_splines and lam regularization parameter.
'''
from pygam import LinearGAM
gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.arange(3,20), lam = np.logspace(-3, 3, 11))
print(gam.summary())
y_train_predicted = gam.predict(X_train)
y_test_predicted = np.floor(gam.predict(X_test))
rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))
mae_train = mean_absolute_error(y_train, y_train_predicted)
r2_train = r2_score(y_train, y_train_predicted)
print("RMSE of training set is {}".format(rmse_train))
print("MAE of testing set is {}".format(mae_train))
print("R2 score of training set is {}\n".format(r2_train))
if len(y_test)>0:
rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))
mae_test = mean_absolute_error(y_test, y_test_predicted)
r2_test = r2_score(y_test, y_test_predicted)
print("RMSE of testing set is {}".format(rmse_test))
print("MAE of testing set is {}".format(mae_test))
print("R2 score of testing set is {}\n".format(r2_test))
'''
Visualize the feature significance and confidence intervals
'''
num_features = len(X_train.columns)
fig = plt.figure(figsize=(18,12))
fig.subplots_adjust(hspace=0.4)
cnt = 1
p_values = gam.statistics_['p_values']
for i in range(num_features):
axs = fig.add_subplot(num_features,1,cnt)
m = gam.generate_X_grid(term=i)
axs.plot(m[:,i], gam.partial_dependence(term=i, X=m)) # this is the actual coefficents
axs.plot(m[:,i], gam.partial_dependence(term=i, X=m, width=.95)[1],c='r',ls='--') # this plots the confidence intervals
axs.set_title(X_train.columns[i] + ('*' if p_values[cnt]<0.05 else ''))
cnt += 1
|
[
"### Helper function for simulation model\n\n#### Author: Yiran Jing\n#### Date: Feb 2020\n\nimport pandas as pd\nimport numpy as np\nimport operator\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pandas\nimport datetime\nimport matplotlib.dates as mdates\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error\nimport warnings\nwarnings.filterwarnings('ignore')\n\n########################\n## dataset help function\n########################\n\ndef get_province_df(df, provinceName: str) -> pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province']==provinceName) & (df['city'].isnull())]\n\n\ndef get_China_total(df) -> pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode']=='CN') & (df['province'].isnull())]\n\n\ndef get_China_exhubei(df) -> pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode']=='CN') & (df['province']!='Hubei Province') & ~(df['province'].isnull()) \\\n & ~(df['city'].isnull())]\n\n##################\n## Clean data\n##################\ndef add_days(DXYArea: pandas.core.frame.DataFrame) -> pandas.core.frame.DataFrame:\n \"\"\"\n Create a new column: Days, number of days after 2019-12-08 (detect the first case)\n \"\"\"\n DXYArea['date'] = pd.to_datetime(DXYArea['date'])\n first_day = datetime.datetime(2019, 12, 8) # the time when detected the first case (2019-12-08)\n DXYArea['Days'] = (DXYArea['date'] - first_day).dt.days\n return DXYArea\n\ndef split_train_test_by_date(df: pandas.core.frame.DataFrame, splicer):\n \"\"\"\n Separate Train and Test dataset in time series\n \"\"\"\n if type(splicer) == float: ## check if splicer is a float\n ndays = 3 if (df['date'].max() - df['date'].min()).days < 3 else splicer * (df['date'].max() - df['date'].min()).days\n ndays = np.ceil(ndays)\n elif type(splicer) == int:\n ndays = splicer \n else:\n raise Exception('split value should not be greater than length of data')\n\n split_date = df['date'].max() - datetime.timedelta(days=ndays)\n \n ## Separate Train and Test dataset\n Train = df[df['date'] < split_date]\n Test = df[df['date'] >= split_date]\n print(\"Train dataset: data before {} \\nTest dataset: the last {} days\".format(split_date,ndays))\n \n return Train, Test\n\ndef data_processing(df, splicer, features_to_engineer):\n\n overall_df = pd.DataFrame(df.groupby(['date']).agg({'confirmed': \"sum\",\n 'suspected':'sum',\n 'cured': \"sum\",\n 'dead': 'sum',\n 'Days': 'mean'})).reset_index()\n \n \n \n Train, Test = split_train_test_by_date(overall_df, splicer)\n print(Train)\n\n X_train = Train.loc[:,['Days']+[x+'_lag1' for x in features_to_engineer]]\n y_train = Train['confirmed']\n X_test = Test.loc[:,['Days']+[x+'_lag1' for x in features_to_engineer]]\n y_test = Test['confirmed']\n \n return X_train, X_test, y_train, y_test\n\n##################\n### feature engineering\n##################\n\ndef feature_engineering(df:pandas.core.frame.DataFrame, features_to_engineer):\n for feature in features_to_engineer:\n df[f'{feature}_lag1'] = df[f'{feature}'].shift()\n df[f'{feature}_lag1'].fillna(0, inplace = True)\n return df\n\n##################\n### EDA\n##################\n\ndef tsplot_conf_dead_cured(df, title_prefix, figsize=(13,10), fontsize=18, logy=False):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n plot_df = df.groupby('date').agg('sum')\n plot_df.plot(y=['confirmed'], style='-*', ax=ax1, grid=True, figsize=figsize, logy=logy, color='black', marker='o')\n if logy:\n ax1.set_ylabel(\"log(confirmed)\", color=\"black\", fontsize=14)\n else:\n ax1.set_ylabel(\"confirmed\", color=\"black\", fontsize=14)\n if 'dailyNew_confirmed' in df.columns:\n ax11 = ax1.twinx()\n ax11.bar(x=plot_df.index, height=plot_df['dailyNew_confirmed'], alpha=0.3, color='blue')\n ax11.set_ylabel('dailyNew_confirmed', color='blue', fontsize=14)\n ax2 = fig.add_subplot(212)\n plot_df.plot(y=['dead', 'cured'], style=':*', grid=True, ax=ax2, figsize=figsize, sharex=False, logy=logy)\n ax2.set_ylabel(\"count\")\n title = title_prefix + ' Cumulative Confirmed, Death, Cure'\n fig.suptitle(title, fontsize=fontsize)\n \ndef draw_province_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['province'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n \ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\n###################\n## Modelling\n###################\n\n### general additive model\n\ndef draw_fit_plot(degree: int, area: str, X_train, X_test, y_train, y_test, y_train_predicted, y_test_predict, df):\n if len(y_test)>0:\n x = pd.Series(np.concatenate((X_train, X_test)))\n y = pd.Series(np.concatenate((y_train, y_test)))\n else:\n x = X_train; y = y_train\n \n fig, ax = plt.subplots()\n #fig.canvas.draw()\n plt.scatter(x, y, s=10, c = 'black')\n plt.plot(X_train, y_train_predicted, color='green')\n plt.plot(X_test, y_test_predict, color = 'blue')\n plt.title(\"Polynomial Regression {} with degree = {}\".format(area, degree))\n plt.ylabel('Confirmed cases')\n plt.xlabel('2020 Date')\n \n datemin = df['date'].min()\n numdays = len(X_train) + len(X_test)\n labels = list((datemin + datetime.timedelta(days=x)).strftime('%m-%d') for x in range(numdays))\n \n x = pd.Series(np.concatenate((X_train, X_test)))\n plt.xticks(x, labels,rotation=60)\n #fig.autofmt_xdate() # axes up to make room for them\n \n plt.show()\n \n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, \n X_test: pandas.core.frame.DataFrame,\n y_train: pandas.core.frame.DataFrame, \n y_test: pandas.core.frame.DataFrame):\n '''\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n '''\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.arange(3,20), lam = np.logspace(-3, 3, 11))\n print(gam.summary())\n \n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n \n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print(\"RMSE of training set is {}\".format(rmse_train))\n print(\"MAE of testing set is {}\".format(mae_train))\n print(\"R2 score of training set is {}\\n\".format(r2_train))\n \n if len(y_test)>0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print(\"RMSE of testing set is {}\".format(rmse_test))\n print(\"MAE of testing set is {}\".format(mae_test))\n print(\"R2 score of testing set is {}\\n\".format(r2_test))\n \n '''\n Visualize the feature significance and confidence intervals\n '''\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18,12))\n fig.subplots_adjust(hspace=0.4)\n\n cnt = 1\n p_values = gam.statistics_['p_values']\n\n for i in range(num_features):\n axs = fig.add_subplot(num_features,1,cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:,i], gam.partial_dependence(term=i, X=m)) # this is the actual coefficents\n axs.plot(m[:,i], gam.partial_dependence(term=i, X=m, width=.95)[1],c='r',ls='--') # this plots the confidence intervals\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt]<0.05 else ''))\n cnt += 1\n\n ",
"import pandas as pd\nimport numpy as np\nimport operator\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pandas\nimport datetime\nimport matplotlib.dates as mdates\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\ndef get_province_df(df, provinceName: str) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province'] == provinceName) & df['city'].isnull()]\n\n\ndef get_China_total(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & df['province'].isnull()]\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\ndef add_days(DXYArea: pandas.core.frame.DataFrame\n ) ->pandas.core.frame.DataFrame:\n \"\"\"\n Create a new column: Days, number of days after 2019-12-08 (detect the first case)\n \"\"\"\n DXYArea['date'] = pd.to_datetime(DXYArea['date'])\n first_day = datetime.datetime(2019, 12, 8)\n DXYArea['Days'] = (DXYArea['date'] - first_day).dt.days\n return DXYArea\n\n\ndef split_train_test_by_date(df: pandas.core.frame.DataFrame, splicer):\n \"\"\"\n Separate Train and Test dataset in time series\n \"\"\"\n if type(splicer) == float:\n ndays = 3 if (df['date'].max() - df['date'].min()\n ).days < 3 else splicer * (df['date'].max() - df['date'].min()\n ).days\n ndays = np.ceil(ndays)\n elif type(splicer) == int:\n ndays = splicer\n else:\n raise Exception('split value should not be greater than length of data'\n )\n split_date = df['date'].max() - datetime.timedelta(days=ndays)\n Train = df[df['date'] < split_date]\n Test = df[df['date'] >= split_date]\n print('Train dataset: data before {} \\nTest dataset: the last {} days'.\n format(split_date, ndays))\n return Train, Test\n\n\ndef data_processing(df, splicer, features_to_engineer):\n overall_df = pd.DataFrame(df.groupby(['date']).agg({'confirmed': 'sum',\n 'suspected': 'sum', 'cured': 'sum', 'dead': 'sum', 'Days': 'mean'})\n ).reset_index()\n Train, Test = split_train_test_by_date(overall_df, splicer)\n print(Train)\n X_train = Train.loc[:, ['Days'] + [(x + '_lag1') for x in\n features_to_engineer]]\n y_train = Train['confirmed']\n X_test = Test.loc[:, ['Days'] + [(x + '_lag1') for x in\n features_to_engineer]]\n y_test = Test['confirmed']\n return X_train, X_test, y_train, y_test\n\n\ndef feature_engineering(df: pandas.core.frame.DataFrame, features_to_engineer):\n for feature in features_to_engineer:\n df[f'{feature}_lag1'] = df[f'{feature}'].shift()\n df[f'{feature}_lag1'].fillna(0, inplace=True)\n return df\n\n\ndef tsplot_conf_dead_cured(df, title_prefix, figsize=(13, 10), fontsize=18,\n logy=False):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n plot_df = df.groupby('date').agg('sum')\n plot_df.plot(y=['confirmed'], style='-*', ax=ax1, grid=True, figsize=\n figsize, logy=logy, color='black', marker='o')\n if logy:\n ax1.set_ylabel('log(confirmed)', color='black', fontsize=14)\n else:\n ax1.set_ylabel('confirmed', color='black', fontsize=14)\n if 'dailyNew_confirmed' in df.columns:\n ax11 = ax1.twinx()\n ax11.bar(x=plot_df.index, height=plot_df['dailyNew_confirmed'],\n alpha=0.3, color='blue')\n ax11.set_ylabel('dailyNew_confirmed', color='blue', fontsize=14)\n ax2 = fig.add_subplot(212)\n plot_df.plot(y=['dead', 'cured'], style=':*', grid=True, ax=ax2,\n figsize=figsize, sharex=False, logy=logy)\n ax2.set_ylabel('count')\n title = title_prefix + ' Cumulative Confirmed, Death, Cure'\n fig.suptitle(title, fontsize=fontsize)\n\n\ndef draw_province_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['province'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\ndef draw_fit_plot(degree: int, area: str, X_train, X_test, y_train, y_test,\n y_train_predicted, y_test_predict, df):\n if len(y_test) > 0:\n x = pd.Series(np.concatenate((X_train, X_test)))\n y = pd.Series(np.concatenate((y_train, y_test)))\n else:\n x = X_train\n y = y_train\n fig, ax = plt.subplots()\n plt.scatter(x, y, s=10, c='black')\n plt.plot(X_train, y_train_predicted, color='green')\n plt.plot(X_test, y_test_predict, color='blue')\n plt.title('Polynomial Regression {} with degree = {}'.format(area, degree))\n plt.ylabel('Confirmed cases')\n plt.xlabel('2020 Date')\n datemin = df['date'].min()\n numdays = len(X_train) + len(X_test)\n labels = list((datemin + datetime.timedelta(days=x)).strftime('%m-%d') for\n x in range(numdays))\n x = pd.Series(np.concatenate((X_train, X_test)))\n plt.xticks(x, labels, rotation=60)\n plt.show()\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\nwarnings.filterwarnings('ignore')\n\n\ndef get_province_df(df, provinceName: str) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province'] == provinceName) & df['city'].isnull()]\n\n\ndef get_China_total(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & df['province'].isnull()]\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\ndef add_days(DXYArea: pandas.core.frame.DataFrame\n ) ->pandas.core.frame.DataFrame:\n \"\"\"\n Create a new column: Days, number of days after 2019-12-08 (detect the first case)\n \"\"\"\n DXYArea['date'] = pd.to_datetime(DXYArea['date'])\n first_day = datetime.datetime(2019, 12, 8)\n DXYArea['Days'] = (DXYArea['date'] - first_day).dt.days\n return DXYArea\n\n\ndef split_train_test_by_date(df: pandas.core.frame.DataFrame, splicer):\n \"\"\"\n Separate Train and Test dataset in time series\n \"\"\"\n if type(splicer) == float:\n ndays = 3 if (df['date'].max() - df['date'].min()\n ).days < 3 else splicer * (df['date'].max() - df['date'].min()\n ).days\n ndays = np.ceil(ndays)\n elif type(splicer) == int:\n ndays = splicer\n else:\n raise Exception('split value should not be greater than length of data'\n )\n split_date = df['date'].max() - datetime.timedelta(days=ndays)\n Train = df[df['date'] < split_date]\n Test = df[df['date'] >= split_date]\n print('Train dataset: data before {} \\nTest dataset: the last {} days'.\n format(split_date, ndays))\n return Train, Test\n\n\ndef data_processing(df, splicer, features_to_engineer):\n overall_df = pd.DataFrame(df.groupby(['date']).agg({'confirmed': 'sum',\n 'suspected': 'sum', 'cured': 'sum', 'dead': 'sum', 'Days': 'mean'})\n ).reset_index()\n Train, Test = split_train_test_by_date(overall_df, splicer)\n print(Train)\n X_train = Train.loc[:, ['Days'] + [(x + '_lag1') for x in\n features_to_engineer]]\n y_train = Train['confirmed']\n X_test = Test.loc[:, ['Days'] + [(x + '_lag1') for x in\n features_to_engineer]]\n y_test = Test['confirmed']\n return X_train, X_test, y_train, y_test\n\n\ndef feature_engineering(df: pandas.core.frame.DataFrame, features_to_engineer):\n for feature in features_to_engineer:\n df[f'{feature}_lag1'] = df[f'{feature}'].shift()\n df[f'{feature}_lag1'].fillna(0, inplace=True)\n return df\n\n\ndef tsplot_conf_dead_cured(df, title_prefix, figsize=(13, 10), fontsize=18,\n logy=False):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n plot_df = df.groupby('date').agg('sum')\n plot_df.plot(y=['confirmed'], style='-*', ax=ax1, grid=True, figsize=\n figsize, logy=logy, color='black', marker='o')\n if logy:\n ax1.set_ylabel('log(confirmed)', color='black', fontsize=14)\n else:\n ax1.set_ylabel('confirmed', color='black', fontsize=14)\n if 'dailyNew_confirmed' in df.columns:\n ax11 = ax1.twinx()\n ax11.bar(x=plot_df.index, height=plot_df['dailyNew_confirmed'],\n alpha=0.3, color='blue')\n ax11.set_ylabel('dailyNew_confirmed', color='blue', fontsize=14)\n ax2 = fig.add_subplot(212)\n plot_df.plot(y=['dead', 'cured'], style=':*', grid=True, ax=ax2,\n figsize=figsize, sharex=False, logy=logy)\n ax2.set_ylabel('count')\n title = title_prefix + ' Cumulative Confirmed, Death, Cure'\n fig.suptitle(title, fontsize=fontsize)\n\n\ndef draw_province_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['province'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\ndef draw_fit_plot(degree: int, area: str, X_train, X_test, y_train, y_test,\n y_train_predicted, y_test_predict, df):\n if len(y_test) > 0:\n x = pd.Series(np.concatenate((X_train, X_test)))\n y = pd.Series(np.concatenate((y_train, y_test)))\n else:\n x = X_train\n y = y_train\n fig, ax = plt.subplots()\n plt.scatter(x, y, s=10, c='black')\n plt.plot(X_train, y_train_predicted, color='green')\n plt.plot(X_test, y_test_predict, color='blue')\n plt.title('Polynomial Regression {} with degree = {}'.format(area, degree))\n plt.ylabel('Confirmed cases')\n plt.xlabel('2020 Date')\n datemin = df['date'].min()\n numdays = len(X_train) + len(X_test)\n labels = list((datemin + datetime.timedelta(days=x)).strftime('%m-%d') for\n x in range(numdays))\n x = pd.Series(np.concatenate((X_train, X_test)))\n plt.xticks(x, labels, rotation=60)\n plt.show()\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n\n\ndef get_province_df(df, provinceName: str) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province'] == provinceName) & df['city'].isnull()]\n\n\ndef get_China_total(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & df['province'].isnull()]\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\ndef add_days(DXYArea: pandas.core.frame.DataFrame\n ) ->pandas.core.frame.DataFrame:\n \"\"\"\n Create a new column: Days, number of days after 2019-12-08 (detect the first case)\n \"\"\"\n DXYArea['date'] = pd.to_datetime(DXYArea['date'])\n first_day = datetime.datetime(2019, 12, 8)\n DXYArea['Days'] = (DXYArea['date'] - first_day).dt.days\n return DXYArea\n\n\ndef split_train_test_by_date(df: pandas.core.frame.DataFrame, splicer):\n \"\"\"\n Separate Train and Test dataset in time series\n \"\"\"\n if type(splicer) == float:\n ndays = 3 if (df['date'].max() - df['date'].min()\n ).days < 3 else splicer * (df['date'].max() - df['date'].min()\n ).days\n ndays = np.ceil(ndays)\n elif type(splicer) == int:\n ndays = splicer\n else:\n raise Exception('split value should not be greater than length of data'\n )\n split_date = df['date'].max() - datetime.timedelta(days=ndays)\n Train = df[df['date'] < split_date]\n Test = df[df['date'] >= split_date]\n print('Train dataset: data before {} \\nTest dataset: the last {} days'.\n format(split_date, ndays))\n return Train, Test\n\n\ndef data_processing(df, splicer, features_to_engineer):\n overall_df = pd.DataFrame(df.groupby(['date']).agg({'confirmed': 'sum',\n 'suspected': 'sum', 'cured': 'sum', 'dead': 'sum', 'Days': 'mean'})\n ).reset_index()\n Train, Test = split_train_test_by_date(overall_df, splicer)\n print(Train)\n X_train = Train.loc[:, ['Days'] + [(x + '_lag1') for x in\n features_to_engineer]]\n y_train = Train['confirmed']\n X_test = Test.loc[:, ['Days'] + [(x + '_lag1') for x in\n features_to_engineer]]\n y_test = Test['confirmed']\n return X_train, X_test, y_train, y_test\n\n\ndef feature_engineering(df: pandas.core.frame.DataFrame, features_to_engineer):\n for feature in features_to_engineer:\n df[f'{feature}_lag1'] = df[f'{feature}'].shift()\n df[f'{feature}_lag1'].fillna(0, inplace=True)\n return df\n\n\ndef tsplot_conf_dead_cured(df, title_prefix, figsize=(13, 10), fontsize=18,\n logy=False):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n plot_df = df.groupby('date').agg('sum')\n plot_df.plot(y=['confirmed'], style='-*', ax=ax1, grid=True, figsize=\n figsize, logy=logy, color='black', marker='o')\n if logy:\n ax1.set_ylabel('log(confirmed)', color='black', fontsize=14)\n else:\n ax1.set_ylabel('confirmed', color='black', fontsize=14)\n if 'dailyNew_confirmed' in df.columns:\n ax11 = ax1.twinx()\n ax11.bar(x=plot_df.index, height=plot_df['dailyNew_confirmed'],\n alpha=0.3, color='blue')\n ax11.set_ylabel('dailyNew_confirmed', color='blue', fontsize=14)\n ax2 = fig.add_subplot(212)\n plot_df.plot(y=['dead', 'cured'], style=':*', grid=True, ax=ax2,\n figsize=figsize, sharex=False, logy=logy)\n ax2.set_ylabel('count')\n title = title_prefix + ' Cumulative Confirmed, Death, Cure'\n fig.suptitle(title, fontsize=fontsize)\n\n\ndef draw_province_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['province'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\ndef draw_fit_plot(degree: int, area: str, X_train, X_test, y_train, y_test,\n y_train_predicted, y_test_predict, df):\n if len(y_test) > 0:\n x = pd.Series(np.concatenate((X_train, X_test)))\n y = pd.Series(np.concatenate((y_train, y_test)))\n else:\n x = X_train\n y = y_train\n fig, ax = plt.subplots()\n plt.scatter(x, y, s=10, c='black')\n plt.plot(X_train, y_train_predicted, color='green')\n plt.plot(X_test, y_test_predict, color='blue')\n plt.title('Polynomial Regression {} with degree = {}'.format(area, degree))\n plt.ylabel('Confirmed cases')\n plt.xlabel('2020 Date')\n datemin = df['date'].min()\n numdays = len(X_train) + len(X_test)\n labels = list((datemin + datetime.timedelta(days=x)).strftime('%m-%d') for\n x in range(numdays))\n x = pd.Series(np.concatenate((X_train, X_test)))\n plt.xticks(x, labels, rotation=60)\n plt.show()\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n\n\ndef get_province_df(df, provinceName: str) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province'] == provinceName) & df['city'].isnull()]\n\n\ndef get_China_total(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & df['province'].isnull()]\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\ndef add_days(DXYArea: pandas.core.frame.DataFrame\n ) ->pandas.core.frame.DataFrame:\n \"\"\"\n Create a new column: Days, number of days after 2019-12-08 (detect the first case)\n \"\"\"\n DXYArea['date'] = pd.to_datetime(DXYArea['date'])\n first_day = datetime.datetime(2019, 12, 8)\n DXYArea['Days'] = (DXYArea['date'] - first_day).dt.days\n return DXYArea\n\n\ndef split_train_test_by_date(df: pandas.core.frame.DataFrame, splicer):\n \"\"\"\n Separate Train and Test dataset in time series\n \"\"\"\n if type(splicer) == float:\n ndays = 3 if (df['date'].max() - df['date'].min()\n ).days < 3 else splicer * (df['date'].max() - df['date'].min()\n ).days\n ndays = np.ceil(ndays)\n elif type(splicer) == int:\n ndays = splicer\n else:\n raise Exception('split value should not be greater than length of data'\n )\n split_date = df['date'].max() - datetime.timedelta(days=ndays)\n Train = df[df['date'] < split_date]\n Test = df[df['date'] >= split_date]\n print('Train dataset: data before {} \\nTest dataset: the last {} days'.\n format(split_date, ndays))\n return Train, Test\n\n\n<function token>\n\n\ndef feature_engineering(df: pandas.core.frame.DataFrame, features_to_engineer):\n for feature in features_to_engineer:\n df[f'{feature}_lag1'] = df[f'{feature}'].shift()\n df[f'{feature}_lag1'].fillna(0, inplace=True)\n return df\n\n\ndef tsplot_conf_dead_cured(df, title_prefix, figsize=(13, 10), fontsize=18,\n logy=False):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n plot_df = df.groupby('date').agg('sum')\n plot_df.plot(y=['confirmed'], style='-*', ax=ax1, grid=True, figsize=\n figsize, logy=logy, color='black', marker='o')\n if logy:\n ax1.set_ylabel('log(confirmed)', color='black', fontsize=14)\n else:\n ax1.set_ylabel('confirmed', color='black', fontsize=14)\n if 'dailyNew_confirmed' in df.columns:\n ax11 = ax1.twinx()\n ax11.bar(x=plot_df.index, height=plot_df['dailyNew_confirmed'],\n alpha=0.3, color='blue')\n ax11.set_ylabel('dailyNew_confirmed', color='blue', fontsize=14)\n ax2 = fig.add_subplot(212)\n plot_df.plot(y=['dead', 'cured'], style=':*', grid=True, ax=ax2,\n figsize=figsize, sharex=False, logy=logy)\n ax2.set_ylabel('count')\n title = title_prefix + ' Cumulative Confirmed, Death, Cure'\n fig.suptitle(title, fontsize=fontsize)\n\n\ndef draw_province_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['province'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\ndef draw_fit_plot(degree: int, area: str, X_train, X_test, y_train, y_test,\n y_train_predicted, y_test_predict, df):\n if len(y_test) > 0:\n x = pd.Series(np.concatenate((X_train, X_test)))\n y = pd.Series(np.concatenate((y_train, y_test)))\n else:\n x = X_train\n y = y_train\n fig, ax = plt.subplots()\n plt.scatter(x, y, s=10, c='black')\n plt.plot(X_train, y_train_predicted, color='green')\n plt.plot(X_test, y_test_predict, color='blue')\n plt.title('Polynomial Regression {} with degree = {}'.format(area, degree))\n plt.ylabel('Confirmed cases')\n plt.xlabel('2020 Date')\n datemin = df['date'].min()\n numdays = len(X_train) + len(X_test)\n labels = list((datemin + datetime.timedelta(days=x)).strftime('%m-%d') for\n x in range(numdays))\n x = pd.Series(np.concatenate((X_train, X_test)))\n plt.xticks(x, labels, rotation=60)\n plt.show()\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n\n\ndef get_province_df(df, provinceName: str) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province'] == provinceName) & df['city'].isnull()]\n\n\ndef get_China_total(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & df['province'].isnull()]\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\ndef add_days(DXYArea: pandas.core.frame.DataFrame\n ) ->pandas.core.frame.DataFrame:\n \"\"\"\n Create a new column: Days, number of days after 2019-12-08 (detect the first case)\n \"\"\"\n DXYArea['date'] = pd.to_datetime(DXYArea['date'])\n first_day = datetime.datetime(2019, 12, 8)\n DXYArea['Days'] = (DXYArea['date'] - first_day).dt.days\n return DXYArea\n\n\n<function token>\n<function token>\n\n\ndef feature_engineering(df: pandas.core.frame.DataFrame, features_to_engineer):\n for feature in features_to_engineer:\n df[f'{feature}_lag1'] = df[f'{feature}'].shift()\n df[f'{feature}_lag1'].fillna(0, inplace=True)\n return df\n\n\ndef tsplot_conf_dead_cured(df, title_prefix, figsize=(13, 10), fontsize=18,\n logy=False):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n plot_df = df.groupby('date').agg('sum')\n plot_df.plot(y=['confirmed'], style='-*', ax=ax1, grid=True, figsize=\n figsize, logy=logy, color='black', marker='o')\n if logy:\n ax1.set_ylabel('log(confirmed)', color='black', fontsize=14)\n else:\n ax1.set_ylabel('confirmed', color='black', fontsize=14)\n if 'dailyNew_confirmed' in df.columns:\n ax11 = ax1.twinx()\n ax11.bar(x=plot_df.index, height=plot_df['dailyNew_confirmed'],\n alpha=0.3, color='blue')\n ax11.set_ylabel('dailyNew_confirmed', color='blue', fontsize=14)\n ax2 = fig.add_subplot(212)\n plot_df.plot(y=['dead', 'cured'], style=':*', grid=True, ax=ax2,\n figsize=figsize, sharex=False, logy=logy)\n ax2.set_ylabel('count')\n title = title_prefix + ' Cumulative Confirmed, Death, Cure'\n fig.suptitle(title, fontsize=fontsize)\n\n\ndef draw_province_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['province'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\ndef draw_fit_plot(degree: int, area: str, X_train, X_test, y_train, y_test,\n y_train_predicted, y_test_predict, df):\n if len(y_test) > 0:\n x = pd.Series(np.concatenate((X_train, X_test)))\n y = pd.Series(np.concatenate((y_train, y_test)))\n else:\n x = X_train\n y = y_train\n fig, ax = plt.subplots()\n plt.scatter(x, y, s=10, c='black')\n plt.plot(X_train, y_train_predicted, color='green')\n plt.plot(X_test, y_test_predict, color='blue')\n plt.title('Polynomial Regression {} with degree = {}'.format(area, degree))\n plt.ylabel('Confirmed cases')\n plt.xlabel('2020 Date')\n datemin = df['date'].min()\n numdays = len(X_train) + len(X_test)\n labels = list((datemin + datetime.timedelta(days=x)).strftime('%m-%d') for\n x in range(numdays))\n x = pd.Series(np.concatenate((X_train, X_test)))\n plt.xticks(x, labels, rotation=60)\n plt.show()\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n\n\ndef get_province_df(df, provinceName: str) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province'] == provinceName) & df['city'].isnull()]\n\n\ndef get_China_total(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & df['province'].isnull()]\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\ndef add_days(DXYArea: pandas.core.frame.DataFrame\n ) ->pandas.core.frame.DataFrame:\n \"\"\"\n Create a new column: Days, number of days after 2019-12-08 (detect the first case)\n \"\"\"\n DXYArea['date'] = pd.to_datetime(DXYArea['date'])\n first_day = datetime.datetime(2019, 12, 8)\n DXYArea['Days'] = (DXYArea['date'] - first_day).dt.days\n return DXYArea\n\n\n<function token>\n<function token>\n\n\ndef feature_engineering(df: pandas.core.frame.DataFrame, features_to_engineer):\n for feature in features_to_engineer:\n df[f'{feature}_lag1'] = df[f'{feature}'].shift()\n df[f'{feature}_lag1'].fillna(0, inplace=True)\n return df\n\n\ndef tsplot_conf_dead_cured(df, title_prefix, figsize=(13, 10), fontsize=18,\n logy=False):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n plot_df = df.groupby('date').agg('sum')\n plot_df.plot(y=['confirmed'], style='-*', ax=ax1, grid=True, figsize=\n figsize, logy=logy, color='black', marker='o')\n if logy:\n ax1.set_ylabel('log(confirmed)', color='black', fontsize=14)\n else:\n ax1.set_ylabel('confirmed', color='black', fontsize=14)\n if 'dailyNew_confirmed' in df.columns:\n ax11 = ax1.twinx()\n ax11.bar(x=plot_df.index, height=plot_df['dailyNew_confirmed'],\n alpha=0.3, color='blue')\n ax11.set_ylabel('dailyNew_confirmed', color='blue', fontsize=14)\n ax2 = fig.add_subplot(212)\n plot_df.plot(y=['dead', 'cured'], style=':*', grid=True, ax=ax2,\n figsize=figsize, sharex=False, logy=logy)\n ax2.set_ylabel('count')\n title = title_prefix + ' Cumulative Confirmed, Death, Cure'\n fig.suptitle(title, fontsize=fontsize)\n\n\ndef draw_province_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['province'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\n<function token>\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n\n\ndef get_province_df(df, provinceName: str) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province'] == provinceName) & df['city'].isnull()]\n\n\ndef get_China_total(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & df['province'].isnull()]\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\ndef add_days(DXYArea: pandas.core.frame.DataFrame\n ) ->pandas.core.frame.DataFrame:\n \"\"\"\n Create a new column: Days, number of days after 2019-12-08 (detect the first case)\n \"\"\"\n DXYArea['date'] = pd.to_datetime(DXYArea['date'])\n first_day = datetime.datetime(2019, 12, 8)\n DXYArea['Days'] = (DXYArea['date'] - first_day).dt.days\n return DXYArea\n\n\n<function token>\n<function token>\n\n\ndef feature_engineering(df: pandas.core.frame.DataFrame, features_to_engineer):\n for feature in features_to_engineer:\n df[f'{feature}_lag1'] = df[f'{feature}'].shift()\n df[f'{feature}_lag1'].fillna(0, inplace=True)\n return df\n\n\ndef tsplot_conf_dead_cured(df, title_prefix, figsize=(13, 10), fontsize=18,\n logy=False):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n plot_df = df.groupby('date').agg('sum')\n plot_df.plot(y=['confirmed'], style='-*', ax=ax1, grid=True, figsize=\n figsize, logy=logy, color='black', marker='o')\n if logy:\n ax1.set_ylabel('log(confirmed)', color='black', fontsize=14)\n else:\n ax1.set_ylabel('confirmed', color='black', fontsize=14)\n if 'dailyNew_confirmed' in df.columns:\n ax11 = ax1.twinx()\n ax11.bar(x=plot_df.index, height=plot_df['dailyNew_confirmed'],\n alpha=0.3, color='blue')\n ax11.set_ylabel('dailyNew_confirmed', color='blue', fontsize=14)\n ax2 = fig.add_subplot(212)\n plot_df.plot(y=['dead', 'cured'], style=':*', grid=True, ax=ax2,\n figsize=figsize, sharex=False, logy=logy)\n ax2.set_ylabel('count')\n title = title_prefix + ' Cumulative Confirmed, Death, Cure'\n fig.suptitle(title, fontsize=fontsize)\n\n\n<function token>\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\n<function token>\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n\n\ndef get_province_df(df, provinceName: str) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province'] == provinceName) & df['city'].isnull()]\n\n\ndef get_China_total(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & df['province'].isnull()]\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\ndef add_days(DXYArea: pandas.core.frame.DataFrame\n ) ->pandas.core.frame.DataFrame:\n \"\"\"\n Create a new column: Days, number of days after 2019-12-08 (detect the first case)\n \"\"\"\n DXYArea['date'] = pd.to_datetime(DXYArea['date'])\n first_day = datetime.datetime(2019, 12, 8)\n DXYArea['Days'] = (DXYArea['date'] - first_day).dt.days\n return DXYArea\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef tsplot_conf_dead_cured(df, title_prefix, figsize=(13, 10), fontsize=18,\n logy=False):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n plot_df = df.groupby('date').agg('sum')\n plot_df.plot(y=['confirmed'], style='-*', ax=ax1, grid=True, figsize=\n figsize, logy=logy, color='black', marker='o')\n if logy:\n ax1.set_ylabel('log(confirmed)', color='black', fontsize=14)\n else:\n ax1.set_ylabel('confirmed', color='black', fontsize=14)\n if 'dailyNew_confirmed' in df.columns:\n ax11 = ax1.twinx()\n ax11.bar(x=plot_df.index, height=plot_df['dailyNew_confirmed'],\n alpha=0.3, color='blue')\n ax11.set_ylabel('dailyNew_confirmed', color='blue', fontsize=14)\n ax2 = fig.add_subplot(212)\n plot_df.plot(y=['dead', 'cured'], style=':*', grid=True, ax=ax2,\n figsize=figsize, sharex=False, logy=logy)\n ax2.set_ylabel('count')\n title = title_prefix + ' Cumulative Confirmed, Death, Cure'\n fig.suptitle(title, fontsize=fontsize)\n\n\n<function token>\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\n<function token>\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n\n\ndef get_province_df(df, provinceName: str) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province'] == provinceName) & df['city'].isnull()]\n\n\ndef get_China_total(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & df['province'].isnull()]\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\ndef add_days(DXYArea: pandas.core.frame.DataFrame\n ) ->pandas.core.frame.DataFrame:\n \"\"\"\n Create a new column: Days, number of days after 2019-12-08 (detect the first case)\n \"\"\"\n DXYArea['date'] = pd.to_datetime(DXYArea['date'])\n first_day = datetime.datetime(2019, 12, 8)\n DXYArea['Days'] = (DXYArea['date'] - first_day).dt.days\n return DXYArea\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\n<function token>\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n\n\ndef get_province_df(df, provinceName: str) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province'] == provinceName) & df['city'].isnull()]\n\n\ndef get_China_total(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & df['province'].isnull()]\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\n<function token>\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n\n\ndef get_province_df(df, provinceName: str) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of given province\n \"\"\"\n return df[(df['province'] == provinceName) & df['city'].isnull()]\n\n\n<function token>\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\n<function token>\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef get_China_exhubei(df) ->pandas.core.frame.DataFrame:\n \"\"\"\n Return time series data of China total (including HK and Taiwan)\n \"\"\"\n return df[(df['countryCode'] == 'CN') & (df['province'] !=\n 'Hubei Province') & ~df['province'].isnull() & ~df['city'].isnull()]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\n<function token>\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef draw_city_trend(title_prefix: str, df: pandas.core.frame.DataFrame):\n \"\"\"\n df is the daily dataset from DXY\n \"\"\"\n sub_df = df[df['city'] == title_prefix]\n tsplot_conf_dead_cured(sub_df, title_prefix)\n\n\n<function token>\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef fit_pygam_model(X_train: pandas.core.frame.DataFrame, X_test: pandas.\n core.frame.DataFrame, y_train: pandas.core.frame.DataFrame, y_test:\n pandas.core.frame.DataFrame):\n \"\"\"\n Creates a general additive model LinearGAM (normally distributed errors)\n with grid search. Returns the best model with given hyperparameters.\n hyperparameters: n_splines and lam regularization parameter.\n \"\"\"\n from pygam import LinearGAM\n gam = LinearGAM().gridsearch(X_train.values, y_train, n_splines=np.\n arange(3, 20), lam=np.logspace(-3, 3, 11))\n print(gam.summary())\n y_train_predicted = gam.predict(X_train)\n y_test_predicted = np.floor(gam.predict(X_test))\n rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))\n mae_train = mean_absolute_error(y_train, y_train_predicted)\n r2_train = r2_score(y_train, y_train_predicted)\n print('RMSE of training set is {}'.format(rmse_train))\n print('MAE of testing set is {}'.format(mae_train))\n print('R2 score of training set is {}\\n'.format(r2_train))\n if len(y_test) > 0:\n rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predicted))\n mae_test = mean_absolute_error(y_test, y_test_predicted)\n r2_test = r2_score(y_test, y_test_predicted)\n print('RMSE of testing set is {}'.format(rmse_test))\n print('MAE of testing set is {}'.format(mae_test))\n print('R2 score of testing set is {}\\n'.format(r2_test))\n \"\"\"\n Visualize the feature significance and confidence intervals\n \"\"\"\n num_features = len(X_train.columns)\n fig = plt.figure(figsize=(18, 12))\n fig.subplots_adjust(hspace=0.4)\n cnt = 1\n p_values = gam.statistics_['p_values']\n for i in range(num_features):\n axs = fig.add_subplot(num_features, 1, cnt)\n m = gam.generate_X_grid(term=i)\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m))\n axs.plot(m[:, i], gam.partial_dependence(term=i, X=m, width=0.95)[1\n ], c='r', ls='--')\n axs.set_title(X_train.columns[i] + ('*' if p_values[cnt] < 0.05 else\n ''))\n cnt += 1\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,598 |
4fa043369912d84a77bdeb2e3d9619cdc3fd5c02
|
m = float(input("digite o metro a se convertido: "))
cm = m*100
mm = m*1000
print("{} metro(s) para cm é {} cm(s)".format(m,cm))
print("{} metro(s) para mm é {} mm(s)".format(m,mm))
|
[
"m = float(input(\"digite o metro a se convertido: \")) \n\ncm = m*100\nmm = m*1000\n\nprint(\"{} metro(s) para cm é {} cm(s)\".format(m,cm))\nprint(\"{} metro(s) para mm é {} mm(s)\".format(m,mm))\n",
"m = float(input('digite o metro a se convertido: '))\ncm = m * 100\nmm = m * 1000\nprint('{} metro(s) para cm é {} cm(s)'.format(m, cm))\nprint('{} metro(s) para mm é {} mm(s)'.format(m, mm))\n",
"<assignment token>\nprint('{} metro(s) para cm é {} cm(s)'.format(m, cm))\nprint('{} metro(s) para mm é {} mm(s)'.format(m, mm))\n",
"<assignment token>\n<code token>\n"
] | false |
98,599 |
e60c41144b46730a16d95ece3eaf2bd35d6f7988
|
#guess my mood program
#made by ikon beth
#on my path to mastering Python
import random
print("Do you know that I can measure your mood from your energy? Let's get started then!")
mood = int(random.randrange(4))
if mood ==int(("0")):
print ("You are:" + " Happy!")
elif mood == int(("1")):
print("You are:"+ " neutral!")
elif mood == int(("2")):
print ("You are really:"+ " sad!")
else:
print("Are you kidding me! Your mood swings like a gyroscope!")
input("Press the enter key to exit:")
|
[
"#guess my mood program\r\n#made by ikon beth\r\n#on my path to mastering Python\r\n\r\n\r\nimport random\r\nprint(\"Do you know that I can measure your mood from your energy? Let's get started then!\")\r\nmood = int(random.randrange(4))\r\nif mood ==int((\"0\")):\r\n print (\"You are:\" + \" Happy!\")\r\nelif mood == int((\"1\")):\r\n print(\"You are:\"+ \" neutral!\")\r\nelif mood == int((\"2\")):\r\n print (\"You are really:\"+ \" sad!\")\r\nelse:\r\n print(\"Are you kidding me! Your mood swings like a gyroscope!\")\r\n\r\ninput(\"Press the enter key to exit:\")\r\n",
"import random\nprint(\n \"Do you know that I can measure your mood from your energy? Let's get started then!\"\n )\nmood = int(random.randrange(4))\nif mood == int('0'):\n print('You are:' + ' Happy!')\nelif mood == int('1'):\n print('You are:' + ' neutral!')\nelif mood == int('2'):\n print('You are really:' + ' sad!')\nelse:\n print('Are you kidding me! Your mood swings like a gyroscope!')\ninput('Press the enter key to exit:')\n",
"<import token>\nprint(\n \"Do you know that I can measure your mood from your energy? Let's get started then!\"\n )\nmood = int(random.randrange(4))\nif mood == int('0'):\n print('You are:' + ' Happy!')\nelif mood == int('1'):\n print('You are:' + ' neutral!')\nelif mood == int('2'):\n print('You are really:' + ' sad!')\nelse:\n print('Are you kidding me! Your mood swings like a gyroscope!')\ninput('Press the enter key to exit:')\n",
"<import token>\nprint(\n \"Do you know that I can measure your mood from your energy? Let's get started then!\"\n )\n<assignment token>\nif mood == int('0'):\n print('You are:' + ' Happy!')\nelif mood == int('1'):\n print('You are:' + ' neutral!')\nelif mood == int('2'):\n print('You are really:' + ' sad!')\nelse:\n print('Are you kidding me! Your mood swings like a gyroscope!')\ninput('Press the enter key to exit:')\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.